Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
scheduler_patch_list.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
17#include "nlohmann/json_fwd.hpp"
22#include <random>
23#include <vector>
24
26 StackEntry stack_loc{};
27
28 using namespace shamrock::patch;
29
30 shamalgs::collective::vector_allgatherv(
31 local, get_patch_mpi_type<3>(), global, get_patch_mpi_type<3>(), MPI_COMM_WORLD);
32}
33
34std::unordered_set<u64> SchedulerPatchList::build_local() {
35 StackEntry stack_loc{};
36
37 std::unordered_set<u64> out_ids;
38
39 local.clear();
40 for (const shamrock::patch::Patch &p : global) {
41 // TODO add check node_owner_id valid
42 if (i32(p.node_owner_id) == shamcomm::world_rank()) {
43 local.push_back(p);
44 out_ids.insert(p.id_patch);
45 }
46 }
47
48 return out_ids;
49}
50
52 std::unordered_set<u64> &patch_id_lst,
53 std::vector<u64> &to_send_idx,
54 std::vector<u64> &to_recv_idx) {
55
56 local.clear();
57
58 for (u64 i = 0; i < global.size(); i++) {
59 const shamrock::patch::Patch &p = global[i];
60
61 bool was_owned = (patch_id_lst.find(p.id_patch) != patch_id_lst.end());
62
63 // TODO add check node_owner_id valid
64 if (i32(p.node_owner_id) == shamcomm::world_rank()) {
65 local.push_back(p);
66
67 if (!was_owned) {
68 to_recv_idx.push_back(i);
69 patch_id_lst.insert(p.id_patch);
70 }
71 } else {
72 if (was_owned) {
73 to_send_idx.push_back(i);
74 patch_id_lst.erase(p.id_patch);
75 }
76 }
77 }
78}
79
81 StackEntry stack_loc{};
83
84 u64 idx = 0;
86 id_patch_to_global_idx[p.id_patch] = idx;
87 idx++;
88 }
89}
90
92 StackEntry stack_loc{};
94
95 u64 idx = 0;
97 id_patch_to_local_idx[p.id_patch] = idx;
98 idx++;
99 }
100}
101
103 StackEntry stack_loc{};
104 for (shamrock::patch::Patch &p : local) {
105 p.pack_node_index = u64_max;
106 }
107}
108
109std::tuple<u64, u64, u64, u64, u64, u64, u64, u64> SchedulerPatchList::split_patch(u64 id_patch) {
110
111 using namespace shamrock::patch;
112
113 Patch &p0 = global[id_patch_to_global_idx[id_patch]];
114
115 std::array<Patch, 8> splts = p0.get_split();
116
117 p0 = splts[0]; // override existing patch
118
119 splts[1].id_patch = _next_patch_id;
121 splts[2].id_patch = _next_patch_id;
123 splts[3].id_patch = _next_patch_id;
125 splts[4].id_patch = _next_patch_id;
127 splts[5].id_patch = _next_patch_id;
129 splts[6].id_patch = _next_patch_id;
131 splts[7].id_patch = _next_patch_id;
133
134 // TODO use emplace_back instead
135 u64 idx_p1 = global.size();
136 global.push_back(splts[1]);
137
138 u64 idx_p2 = idx_p1 + 1;
139 global.push_back(splts[2]);
140
141 u64 idx_p3 = idx_p2 + 1;
142 global.push_back(splts[3]);
143
144 u64 idx_p4 = idx_p3 + 1;
145 global.push_back(splts[4]);
146
147 u64 idx_p5 = idx_p4 + 1;
148 global.push_back(splts[5]);
149
150 u64 idx_p6 = idx_p5 + 1;
151 global.push_back(splts[6]);
152
153 u64 idx_p7 = idx_p6 + 1;
154 global.push_back(splts[7]);
155
156 return {
157 id_patch_to_global_idx[id_patch], idx_p1, idx_p2, idx_p3, idx_p4, idx_p5, idx_p6, idx_p7};
158}
159
161 u64 idx0, u64 idx1, u64 idx2, u64 idx3, u64 idx4, u64 idx5, u64 idx6, u64 idx7) {
162
163 using namespace shamrock::patch;
164
165 Patch p = Patch::merge_patch(
166 {global[idx0],
167 global[idx1],
168 global[idx2],
169 global[idx3],
170 global[idx4],
171 global[idx5],
172 global[idx6],
173 global[idx7]});
174
175 global[idx0] = p;
176 global[idx1].set_err_mode();
177 global[idx2].set_err_mode();
178 global[idx3].set_err_mode();
179 global[idx4].set_err_mode();
180 global[idx5].set_err_mode();
181 global[idx6].set_err_mode();
182 global[idx7].set_err_mode();
183}
184
185namespace shamrock::patch {
186
196 inline void to_json(nlohmann::json &j, const Patch &p) {
197
198 // u64 id_patch;
199 // u64 pack_node_index;
200 // u64 load_value;
201 // std::array<u64,dim> coord_min;
202 // std::array<u64,dim> coord_max;
203 // u32 node_owner_id;
204
205 j = nlohmann::json{
206 {"id_patch", p.id_patch},
207 {"pack_node_index", p.pack_node_index},
208 {"load_value", p.load_value},
209 {"coord_min", p.coord_min},
210 {"coord_max", p.coord_max},
211 {"node_owner_id", p.node_owner_id},
212 };
213 }
214
224 inline void from_json(const nlohmann::json &j, Patch &p) {
225 j.at("id_patch").get_to(p.id_patch);
226 j.at("pack_node_index").get_to(p.pack_node_index);
227 j.at("load_value").get_to(p.load_value);
228 j.at("coord_min").get_to(p.coord_min);
229 j.at("coord_max").get_to(p.coord_max);
230 j.at("node_owner_id").get_to(p.node_owner_id);
231 }
232
233} // namespace shamrock::patch
234
235void to_json(nlohmann::json &j, const SchedulerPatchList &p) {
236 j = nlohmann::json{
237 {"_next_patch_id", p._next_patch_id},
238 {"global", p.global},
239 //{"local", p.local}, // must be disabled to avoid differences between ranks
240 {"is_load_values_up_to_date", p.is_load_values_up_to_date},
241 };
242}
243
244void from_json(const nlohmann::json &j, SchedulerPatchList &p) {
245 j.at("_next_patch_id").get_to(p._next_patch_id);
246 j.at("global").get_to(p.global);
247 // j.at("local").get_to(p.local); // must be disabled to avoid differences between ranks
248 j.at("is_load_values_up_to_date").get_to(p.is_load_values_up_to_date);
249}
250
251// TODO move in a separate file
252std::vector<shamrock::patch::Patch> make_fake_patch_list(u32 total_dtcnt, u64 div_limit) {
253
254 using namespace shamrock::patch;
255
256 std::vector<Patch> plist;
257
258 std::mt19937 eng(0x1111);
259 std::uniform_real_distribution<f32> split_val(0, 1);
260
261 using namespace shamrock::scheduler;
262
263 plist.push_back(
264 Patch{
265 0,
266 u64_max,
267 total_dtcnt,
268 0,
269 0,
270 0,
274 0,
275 });
276
277 bool listchanged = true;
278
279 u64 id_cnt = 0;
280 while (listchanged) {
281 listchanged = false;
282
283 std::vector<Patch> to_add;
284
285 for (Patch &p : plist) {
286 if (p.load_value > div_limit) {
287
288 /*
289 std::cout << "splitting : ( " <<
290 "[" << p.x_min << "," << p.x_max << "] " <<
291 "[" << p.y_min << "," << p.y_max << "] " <<
292 "[" << p.z_min << "," << p.z_max << "] " <<
293 " ) " << p.load_value << std::endl;
294 */
295
296 u64 min_x = p.coord_min[0];
297 u64 min_y = p.coord_min[1];
298 u64 min_z = p.coord_min[2];
299
300 u64 split_x = (((p.coord_max[0] - p.coord_min[0]) + 1) / 2) - 1 + min_x;
301 u64 split_y = (((p.coord_max[1] - p.coord_min[1]) + 1) / 2) - 1 + min_y;
302 u64 split_z = (((p.coord_max[2] - p.coord_min[2]) + 1) / 2) - 1 + min_z;
303
304 u64 max_x = p.coord_max[0];
305 u64 max_y = p.coord_max[1];
306 u64 max_z = p.coord_max[2];
307
308 u32 qte_m = split_val(eng) * p.load_value;
309 u32 qte_p = p.load_value - qte_m;
310
311 u32 qte_mm = split_val(eng) * qte_m;
312 u32 qte_mp = qte_m - qte_mm;
313
314 u32 qte_pm = split_val(eng) * qte_p;
315 u32 qte_pp = qte_p - qte_pm;
316
317 u32 qte_mmm = split_val(eng) * qte_mm;
318 u32 qte_mmp = qte_mm - qte_mmm;
319
320 u32 qte_mpm = split_val(eng) * qte_mp;
321 u32 qte_mpp = qte_mp - qte_mpm;
322
323 u32 qte_pmm = split_val(eng) * qte_pm;
324 u32 qte_pmp = qte_pm - qte_pmm;
325
326 u32 qte_ppm = split_val(eng) * qte_pp;
327 u32 qte_ppp = qte_pp - qte_ppm;
328
329 Patch child_mmm = Patch{
330 id_cnt,
331 u64_max,
332 qte_mmm,
333 min_x,
334 min_y,
335 min_z,
336 split_x,
337 split_y,
338 split_z,
339 0,
340 };
341 id_cnt++;
342
343 Patch child_mmp = Patch{
344 id_cnt,
345 u64_max,
346 qte_mmp,
347 min_x,
348 min_y,
349 split_z + 1,
350 split_x,
351 split_y,
352 max_z,
353 0,
354 };
355 id_cnt++;
356
357 Patch child_mpm = Patch{
358 id_cnt,
359 u64_max,
360 qte_mpm,
361 min_x,
362 split_y + 1,
363 min_z,
364 split_x,
365 max_y,
366 split_z,
367 0,
368 };
369 id_cnt++;
370
371 Patch child_mpp = Patch{
372 id_cnt,
373 u64_max,
374 qte_mpp,
375 min_x,
376 split_y + 1,
377 split_z + 1,
378 split_x,
379 max_y,
380 max_z,
381 0,
382 };
383 id_cnt++;
384
385 Patch child_pmm = Patch{
386 id_cnt,
387 u64_max,
388 qte_pmm,
389 split_x + 1,
390 min_y,
391 min_z,
392 max_x,
393 split_y,
394 split_z,
395 0,
396 };
397 id_cnt++;
398
399 Patch child_pmp = Patch{
400 id_cnt,
401 u64_max,
402 qte_pmp,
403 split_x + 1,
404 min_y,
405 split_z + 1,
406 max_x,
407 split_y,
408 max_z,
409 0,
410 };
411 id_cnt++;
412
413 Patch child_ppm = Patch{
414 id_cnt,
415 u64_max,
416 qte_ppm,
417 split_x + 1,
418 split_y + 1,
419 min_z,
420 max_x,
421 max_y,
422 split_z,
423 0,
424 };
425 id_cnt++;
426
427 Patch child_ppp = Patch{
428 id_cnt,
429 u64_max,
430 qte_ppp,
431 split_x + 1,
432 split_y + 1,
433 split_z + 1,
434 max_x,
435 max_y,
436 max_z,
437 0,
438 };
439 id_cnt++;
440
441 p = child_mmm;
442 to_add.push_back(child_mmp);
443 to_add.push_back(child_mpm);
444 to_add.push_back(child_mpp);
445 to_add.push_back(child_pmm);
446 to_add.push_back(child_pmp);
447 to_add.push_back(child_ppm);
448 to_add.push_back(child_ppp);
449 }
450 }
451
452 if (!to_add.empty()) {
453 listchanged = true;
454
455 plist.insert(plist.end(), to_add.begin(), to_add.end());
456 }
457
458 /*
459 for(Patch & p : plist){
460 std::cout << "( " <<
461 "[" << p.x_min << "," << p.x_max << "] " <<
462 "[" << p.y_min << "," << p.y_max << "] " <<
463 "[" << p.z_min << "," << p.z_max << "] " <<
464 " ) " << p.load_value << std::endl;
465 }
466
467 std::cout << "----- end cycle -----" << std::endl;
468 */
469 }
470
471 return plist;
472}
function to run load balancing with the hilbert curve
Header file for the patch struct and related function.
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
std::int32_t i32
32 bit integer
Handle the patch list of the mpi scheduler.
std::vector< shamrock::patch::Patch > local
contain the list of patch owned by the current node
void reset_local_pack_index()
reset Patch's pack index value
std::unordered_map< u64, u64 > id_patch_to_local_idx
id_patch_to_local_idx[patch_id] = index in local patch list
std::vector< shamrock::patch::Patch > global
contain the list of all patches in the simulation
void build_global()
rebuild global from the local list of each tables
void build_local_differantial(std::unordered_set< u64 > &patch_id_lst, std::vector< u64 > &to_send_idx, std::vector< u64 > &to_recv_idx)
Build the local patch list and create a differential of patches to send / recv since last time.
u64 _next_patch_id
The next available patch id.
std::tuple< u64, u64, u64, u64, u64, u64, u64, u64 > split_patch(u64 id_patch)
split the Patch having id_patch as id and return the index of the 8 subpatches in the global vector
std::unordered_set< u64 > build_local()
select owned patches owned by the node to rebuild local
std::unordered_map< u64, u64 > id_patch_to_global_idx
id_patch_to_global_idx[patch_id] = index in global patch list
void merge_patch(u64 idx0, u64 idx1, u64 idx2, u64 idx3, u64 idx4, u64 idx5, u64 idx6, u64 idx7)
merge the 8 given patches index in the global vector
void build_local_idx_map()
recompute id_patch_to_local_idx
void build_global_idx_map()
recompute id_patch_to_global_idx
i32 world_rank()
Gives the rank of the current process in the MPI communicator.
Definition worldInfo.cpp:40
constexpr u64 u64_max
u64 max value
void to_json(nlohmann::json &j, const SchedulerPatchList &p)
Serializes a SchedulerPatchList object to a JSON object.
std::vector< shamrock::patch::Patch > make_fake_patch_list(u32 total_dtcnt, u64 div_limit)
generate a fake patch list corresponding to a tree structure
void from_json(const nlohmann::json &j, SchedulerPatchList &p)
Deserializes a JSON object into a SchedulerPatchList object.
Class to handle the patch list of the mpi scheduler.
This file contains the definition for the stacktrace related functionality.
Patch object that contain generic patch information.
Definition Patch.hpp:33
u64 id_patch
unique key that identify the patch
Definition Patch.hpp:86