Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
AMRSetup.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
21
22template<class TgridVec>
24 using Tgridscal = shambase::VecComponent<TgridVec>;
26
27 private:
28 u32 cnt_x;
29 u32 cnt_y;
30 u32 cnt_z;
31 u32 cnt_xy;
32 u32 tot_count;
33 TgridVec sz;
34 bool done;
35 u32 current_iterator;
36
37 public:
38 CellGenIterator(std::array<u32, dim> cell_count, TgridVec cell_size) {
39
40 cnt_x = cell_count[0];
41 cnt_y = cell_count[1];
42 cnt_z = cell_count[2];
43 cnt_xy = cnt_x * cnt_y;
44
45 tot_count = cnt_x * cnt_y * cnt_z;
46
47 current_iterator = 0;
48 done = (current_iterator == tot_count);
49
50 sz = cell_size;
51 }
52
53 std::pair<TgridVec, TgridVec> next() {
54
55 u32 idx = current_iterator % cnt_x;
56 u32 idy = (current_iterator / cnt_x) % cnt_y;
57 u32 idz = current_iterator / cnt_xy;
58
59 u64 id_a = idx + cnt_x * idy + cnt_xy * idz;
60
61 assert(id_a < tot_count);
62 assert(idx + cnt_x * idy + cnt_xy * idz == current_iterator);
63
64 TgridVec acc_min = sz * TgridVec{idx, idy, idz};
65 TgridVec acc_max = sz * TgridVec{idx + 1, idy + 1, idz + 1};
66
67 current_iterator++;
68 if (current_iterator == tot_count) {
69 done = true;
70 }
71
72 return {acc_min, acc_max};
73 }
74
75 std::vector<std::pair<TgridVec, TgridVec>> next_n(u32 n) {
76
77 std::vector<std::pair<TgridVec, TgridVec>> res;
78 for (u32 i = 0; i < n; i++) {
79 if (done) {
80 break;
81 }
82 res.push_back(next());
83 }
84 return res;
85 }
86
87 void skip(u32 n) { next_n(n); }
88
89 bool is_done() { return done; }
90};
91
92template<class Tvec, class TgridVec>
94 TgridVec bmin, TgridVec cell_size, std::array<u32, dim> cell_count) {
95
96 PatchScheduler &sched = scheduler();
97
98 TgridVec bmax{
99 bmin.x() + cell_size.x() * (cell_count[0]),
100 bmin.y() + cell_size.y() * (cell_count[1]),
101 bmin.z() + cell_size.z() * (cell_count[2])};
102
103 sched.set_coord_domain_bound(bmin, bmax);
104
105 if ((cell_size.x() != cell_size.y()) || (cell_size.y() != cell_size.z())) {
106 ON_RANK_0(logger::warn_ln("AMR Grid", "your cells aren't cube"));
107 }
108
109 static_assert(dim == 3, "this is not implemented for dim != 3");
110
111 std::array<u32, dim> patch_count;
112
113 constexpr u32 gcd_pow2 = 1U << 31U;
114 u32 gcd_cell_count;
115 {
116 gcd_cell_count = std::gcd(cell_count[0], cell_count[1]);
117 gcd_cell_count = std::gcd(gcd_cell_count, cell_count[2]);
118 gcd_cell_count = std::gcd(gcd_cell_count, gcd_pow2);
119 }
120
121 shamlog_debug_ln(
122 "AMRGrid",
123 "patch grid :",
124 cell_count[0] / gcd_cell_count,
125 cell_count[1] / gcd_cell_count,
126 cell_count[2] / gcd_cell_count);
127
128 sched.make_patch_base_grid<3>(
129 {{cell_count[0] / gcd_cell_count,
130 cell_count[1] / gcd_cell_count,
131 cell_count[2] / gcd_cell_count}});
132
133 sched.for_each_patch([](u64 id_patch, const shamrock::patch::Patch &p) {
134 // TODO implement check to verify that patch a cubes of size 2^n
135 });
136
137 u32 cell_tot_count = cell_count[0] * cell_count[1] * cell_count[2];
138
139 auto has_pdat = [&]() {
140 using namespace shamrock::patch;
141 bool ret = false;
142 sched.for_each_local_patchdata([&](const Patch &p, PatchDataLayer &pdat) {
143 ret = true;
144 });
145 return ret;
146 };
147
148 CellGenIterator cell_gen_iter(cell_count, cell_size);
149
150 auto next_n_patch = [&]() {
151 u32 nmax = scheduler().crit_patch_split;
152
153 u64 loc_gen_count = (has_pdat()) ? nmax : 0;
154
155 auto gen_info = shamalgs::collective::fetch_view(loc_gen_count);
156
157 u64 skip_start = gen_info.head_offset;
158 u64 gen_cnt = loc_gen_count;
159 u64 skip_end = gen_info.total_byte_count - loc_gen_count - gen_info.head_offset;
160
161 shamlog_debug_ln(
162 "AMRSetup",
163 "generate : ",
164 skip_start,
165 gen_cnt,
166 skip_end,
167 "total",
168 skip_start + gen_cnt + skip_end);
169 cell_gen_iter.skip(skip_start);
170 auto tmp_out = cell_gen_iter.next_n(gen_cnt);
171 cell_gen_iter.skip(skip_end);
172
173 std::vector<TgridVec> bmin;
174 std::vector<TgridVec> bmax;
175
176 for (auto [m, M] : tmp_out) {
177 bmin.push_back(m);
178 bmax.push_back(M);
179 }
180
181 // Make a patchdata from pos_data
182 shamrock::patch::PatchDataLayer tmp(sched.get_layout_ptr_old());
183 if (!tmp_out.empty()) {
184 tmp.resize(tmp_out.size());
185 tmp.fields_raz();
186
187 tmp.get_field<TgridVec>(0).override(bmin, tmp_out.size());
188 tmp.get_field<TgridVec>(1).override(bmax, tmp_out.size());
189 }
190 return tmp;
191 };
192
193 // mutli step injection routine
194 shamrock::DataInserterUtility inserter(sched);
195 u32 nmax = scheduler().crit_patch_split;
196 while (!cell_gen_iter.is_done()) {
197
198 shamrock::patch::PatchDataLayer pdat = next_n_patch();
199
200 inserter.push_patch_data<TgridVec>(pdat, "cell_min", sched.crit_patch_split * 8, [&]() {
201 scheduler().update_local_load_value([&](shamrock::patch::Patch p) {
202 return scheduler().patch_data.owned_data.get(p.id_patch).get_obj_cnt();
203 });
204 });
205 }
206
207 // Ensure that the blocks are sorted in each patches
208 AMRSortBlocks block_sorter(context, solver_config, storage);
209 block_sorter.reorder_amr_blocks();
210}
211
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
The MPI scheduler.
u64 crit_patch_split
splitting limit (if load value > crit_patch_split => patch split)
void set_coord_domain_bound(vectype bmin, vectype bmax)
modify the bounding box of the patch domain
Class to insert data in the PatchScheduler.
PatchDataLayer container class, the layout is described in patchdata_layout.
Patch object that contain generic patch information.
Definition Patch.hpp:33
#define ON_RANK_0(x)
Macro to execute code only on rank 0.
Definition worldInfo.hpp:73