17#include "nlohmann/json_fwd.hpp"
28 using namespace shamrock::patch;
30 shamalgs::collective::vector_allgatherv(
31 local, get_patch_mpi_type<3>(),
global, get_patch_mpi_type<3>(), MPI_COMM_WORLD);
37 std::unordered_set<u64> out_ids;
44 out_ids.insert(p.id_patch);
52 std::unordered_set<u64> &patch_id_lst,
53 std::vector<u64> &to_send_idx,
54 std::vector<u64> &to_recv_idx) {
61 bool was_owned = (patch_id_lst.find(p.id_patch) != patch_id_lst.end());
68 to_recv_idx.push_back(i);
69 patch_id_lst.insert(p.id_patch);
73 to_send_idx.push_back(i);
74 patch_id_lst.erase(p.id_patch);
111 using namespace shamrock::patch;
115 std::array<Patch, 8> splts = p0.get_split();
136 global.push_back(splts[1]);
138 u64 idx_p2 = idx_p1 + 1;
139 global.push_back(splts[2]);
141 u64 idx_p3 = idx_p2 + 1;
142 global.push_back(splts[3]);
144 u64 idx_p4 = idx_p3 + 1;
145 global.push_back(splts[4]);
147 u64 idx_p5 = idx_p4 + 1;
148 global.push_back(splts[5]);
150 u64 idx_p6 = idx_p5 + 1;
151 global.push_back(splts[6]);
153 u64 idx_p7 = idx_p6 + 1;
154 global.push_back(splts[7]);
163 using namespace shamrock::patch;
165 Patch p = Patch::merge_patch(
176 global[idx1].set_err_mode();
177 global[idx2].set_err_mode();
178 global[idx3].set_err_mode();
179 global[idx4].set_err_mode();
180 global[idx5].set_err_mode();
181 global[idx6].set_err_mode();
182 global[idx7].set_err_mode();
185namespace shamrock::patch {
196 inline void to_json(nlohmann::json &j,
const Patch &p) {
206 {
"id_patch", p.id_patch},
207 {
"pack_node_index", p.pack_node_index},
208 {
"load_value", p.load_value},
209 {
"coord_min", p.coord_min},
210 {
"coord_max", p.coord_max},
211 {
"node_owner_id", p.node_owner_id},
224 inline void from_json(
const nlohmann::json &j,
Patch &p) {
225 j.at(
"id_patch").get_to(p.id_patch);
226 j.at(
"pack_node_index").get_to(p.pack_node_index);
227 j.at(
"load_value").get_to(p.load_value);
228 j.at(
"coord_min").get_to(p.coord_min);
229 j.at(
"coord_max").get_to(p.coord_max);
230 j.at(
"node_owner_id").get_to(p.node_owner_id);
237 {
"_next_patch_id", p._next_patch_id},
238 {
"global", p.global},
240 {
"is_load_values_up_to_date", p.is_load_values_up_to_date},
245 j.at(
"_next_patch_id").get_to(p._next_patch_id);
246 j.at(
"global").get_to(p.global);
248 j.at(
"is_load_values_up_to_date").get_to(p.is_load_values_up_to_date);
254 using namespace shamrock::patch;
256 std::vector<Patch> plist;
258 std::mt19937 eng(0x1111);
259 std::uniform_real_distribution<f32> split_val(0, 1);
261 using namespace shamrock::scheduler;
277 bool listchanged =
true;
280 while (listchanged) {
283 std::vector<Patch> to_add;
285 for (
Patch &p : plist) {
286 if (p.load_value > div_limit) {
296 u64 min_x = p.coord_min[0];
297 u64 min_y = p.coord_min[1];
298 u64 min_z = p.coord_min[2];
300 u64 split_x = (((p.coord_max[0] - p.coord_min[0]) + 1) / 2) - 1 + min_x;
301 u64 split_y = (((p.coord_max[1] - p.coord_min[1]) + 1) / 2) - 1 + min_y;
302 u64 split_z = (((p.coord_max[2] - p.coord_min[2]) + 1) / 2) - 1 + min_z;
304 u64 max_x = p.coord_max[0];
305 u64 max_y = p.coord_max[1];
306 u64 max_z = p.coord_max[2];
308 u32 qte_m = split_val(eng) * p.load_value;
309 u32 qte_p = p.load_value - qte_m;
311 u32 qte_mm = split_val(eng) * qte_m;
312 u32 qte_mp = qte_m - qte_mm;
314 u32 qte_pm = split_val(eng) * qte_p;
315 u32 qte_pp = qte_p - qte_pm;
317 u32 qte_mmm = split_val(eng) * qte_mm;
318 u32 qte_mmp = qte_mm - qte_mmm;
320 u32 qte_mpm = split_val(eng) * qte_mp;
321 u32 qte_mpp = qte_mp - qte_mpm;
323 u32 qte_pmm = split_val(eng) * qte_pm;
324 u32 qte_pmp = qte_pm - qte_pmm;
326 u32 qte_ppm = split_val(eng) * qte_pp;
327 u32 qte_ppp = qte_pp - qte_ppm;
442 to_add.push_back(child_mmp);
443 to_add.push_back(child_mpm);
444 to_add.push_back(child_mpp);
445 to_add.push_back(child_pmm);
446 to_add.push_back(child_pmp);
447 to_add.push_back(child_ppm);
448 to_add.push_back(child_ppp);
452 if (!to_add.empty()) {
455 plist.insert(plist.end(), to_add.begin(), to_add.end());
function to run load balancing with the hilbert curve
Header file for the patch struct and related function.
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
std::int32_t i32
32 bit integer
Handle the patch list of the mpi scheduler.
std::vector< shamrock::patch::Patch > local
contain the list of patch owned by the current node
void reset_local_pack_index()
reset Patch's pack index value
std::unordered_map< u64, u64 > id_patch_to_local_idx
id_patch_to_local_idx[patch_id] = index in local patch list
std::vector< shamrock::patch::Patch > global
contain the list of all patches in the simulation
void build_global()
rebuild global from the local list of each tables
void build_local_differantial(std::unordered_set< u64 > &patch_id_lst, std::vector< u64 > &to_send_idx, std::vector< u64 > &to_recv_idx)
Build the local patch list and create a differential of patches to send / recv since last time.
u64 _next_patch_id
The next available patch id.
std::tuple< u64, u64, u64, u64, u64, u64, u64, u64 > split_patch(u64 id_patch)
split the Patch having id_patch as id and return the index of the 8 subpatches in the global vector
std::unordered_set< u64 > build_local()
select owned patches owned by the node to rebuild local
std::unordered_map< u64, u64 > id_patch_to_global_idx
id_patch_to_global_idx[patch_id] = index in global patch list
void merge_patch(u64 idx0, u64 idx1, u64 idx2, u64 idx3, u64 idx4, u64 idx5, u64 idx6, u64 idx7)
merge the 8 given patches index in the global vector
void build_local_idx_map()
recompute id_patch_to_local_idx
void build_global_idx_map()
recompute id_patch_to_global_idx
i32 world_rank()
Gives the rank of the current process in the MPI communicator.
constexpr u64 u64_max
u64 max value
void to_json(nlohmann::json &j, const SchedulerPatchList &p)
Serializes a SchedulerPatchList object to a JSON object.
std::vector< shamrock::patch::Patch > make_fake_patch_list(u32 total_dtcnt, u64 div_limit)
generate a fake patch list corresponding to a tree structure
void from_json(const nlohmann::json &j, SchedulerPatchList &p)
Deserializes a JSON object into a SchedulerPatchList object.
Class to handle the patch list of the mpi scheduler.
This file contains the definition for the stacktrace related functionality.
Patch object that contain generic patch information.
u64 id_patch
unique key that identify the patch