22template<
class Tgr
idVec>
24 using Tgridscal = shambase::VecComponent<TgridVec>;
40 cnt_x = cell_count[0];
41 cnt_y = cell_count[1];
42 cnt_z = cell_count[2];
43 cnt_xy = cnt_x * cnt_y;
45 tot_count = cnt_x * cnt_y * cnt_z;
48 done = (current_iterator == tot_count);
53 std::pair<TgridVec, TgridVec> next() {
55 u32 idx = current_iterator % cnt_x;
56 u32 idy = (current_iterator / cnt_x) % cnt_y;
57 u32 idz = current_iterator / cnt_xy;
59 u64 id_a = idx + cnt_x * idy + cnt_xy * idz;
61 assert(id_a < tot_count);
62 assert(idx + cnt_x * idy + cnt_xy * idz == current_iterator);
64 TgridVec acc_min = sz * TgridVec{idx, idy, idz};
65 TgridVec acc_max = sz * TgridVec{idx + 1, idy + 1, idz + 1};
68 if (current_iterator == tot_count) {
72 return {acc_min, acc_max};
75 std::vector<std::pair<TgridVec, TgridVec>> next_n(
u32 n) {
77 std::vector<std::pair<TgridVec, TgridVec>> res;
78 for (
u32 i = 0; i < n; i++) {
82 res.push_back(next());
87 void skip(
u32 n) { next_n(n); }
89 bool is_done() {
return done; }
92template<
class Tvec,
class Tgr
idVec>
94 TgridVec bmin, TgridVec cell_size, std::array<u32, dim> cell_count) {
99 bmin.x() + cell_size.x() * (cell_count[0]),
100 bmin.y() + cell_size.y() * (cell_count[1]),
101 bmin.z() + cell_size.z() * (cell_count[2])};
105 if ((cell_size.x() != cell_size.y()) || (cell_size.y() != cell_size.z())) {
106 ON_RANK_0(logger::warn_ln(
"AMR Grid",
"your cells aren't cube"));
109 static_assert(dim == 3,
"this is not implemented for dim != 3");
111 std::array<u32, dim> patch_count;
113 constexpr u32 gcd_pow2 = 1U << 31U;
116 gcd_cell_count = std::gcd(cell_count[0], cell_count[1]);
117 gcd_cell_count = std::gcd(gcd_cell_count, cell_count[2]);
118 gcd_cell_count = std::gcd(gcd_cell_count, gcd_pow2);
124 cell_count[0] / gcd_cell_count,
125 cell_count[1] / gcd_cell_count,
126 cell_count[2] / gcd_cell_count);
128 sched.make_patch_base_grid<3>(
129 {{cell_count[0] / gcd_cell_count,
130 cell_count[1] / gcd_cell_count,
131 cell_count[2] / gcd_cell_count}});
137 u32 cell_tot_count = cell_count[0] * cell_count[1] * cell_count[2];
139 auto has_pdat = [&]() {
140 using namespace shamrock::patch;
150 auto next_n_patch = [&]() {
151 u32 nmax = scheduler().crit_patch_split;
153 u64 loc_gen_count = (has_pdat()) ? nmax : 0;
155 auto gen_info = shamalgs::collective::fetch_view(loc_gen_count);
157 u64 skip_start = gen_info.head_offset;
158 u64 gen_cnt = loc_gen_count;
159 u64 skip_end = gen_info.total_byte_count - loc_gen_count - gen_info.head_offset;
168 skip_start + gen_cnt + skip_end);
169 cell_gen_iter.skip(skip_start);
170 auto tmp_out = cell_gen_iter.next_n(gen_cnt);
171 cell_gen_iter.skip(skip_end);
173 std::vector<TgridVec> bmin;
174 std::vector<TgridVec> bmax;
176 for (
auto [m, M] : tmp_out) {
183 if (!tmp_out.empty()) {
184 tmp.resize(tmp_out.size());
187 tmp.get_field<TgridVec>(0).
override(bmin, tmp_out.size());
188 tmp.get_field<TgridVec>(1).
override(bmax, tmp_out.size());
195 u32 nmax = scheduler().crit_patch_split;
196 while (!cell_gen_iter.is_done()) {
200 inserter.push_patch_data<TgridVec>(pdat,
"cell_min", sched.
crit_patch_split * 8, [&]() {
202 return scheduler().patch_data.owned_data.get(p.id_patch).get_obj_cnt();
208 AMRSortBlocks block_sorter(context, solver_config, storage);
209 block_sorter.reorder_amr_blocks();
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
u64 crit_patch_split
splitting limit (if load value > crit_patch_split => patch split)
void set_coord_domain_bound(vectype bmin, vectype bmax)
modify the bounding box of the patch domain
Class to insert data in the PatchScheduler.
PatchDataLayer container class, the layout is described in patchdata_layout.
Patch object that contain generic patch information.
#define ON_RANK_0(x)
Macro to execute code only on rank 0.