Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
Model.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
20#include "shambase/memory.hpp"
22#include "shambase/string.hpp"
23#include "shamcomm/logs.hpp"
34#include <functional>
35#include <stdexcept>
36#include <utility>
37#include <vector>
38
39template<class Tvec, template<class> class SPHKernel>
41
42 if (solver.solver_config.scheduler_conf.split_load_value == 0) {
44 "Scheduler load value should be greater than 0");
45 }
46
47 solver.init_required_fields();
48 ctx.init_sched(
49 solver.solver_config.scheduler_conf.split_load_value,
50 solver.solver_config.scheduler_conf.merge_load_value);
51
52 using namespace shamrock::patch;
53
54 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
55
56 sched.add_root_patch();
57
58 shamlog_debug_ln("Sys", "build local scheduler tables");
61 sched.update_local_load_value([&](shamrock::patch::Patch p) {
62 return sched.patch_data.owned_data.get(p.id_patch).get_obj_cnt();
63 });
64 solver.init_ghost_layout();
65
66 solver.init_solver_graph();
67}
68
69template<class Tvec, template<class> class SPHKernel>
71 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
72 return shamalgs::collective::allreduce_sum(sched.get_rank_count());
73}
74
75template<class Tvec, template<class> class SPHKernel>
77 return totmass / get_total_part_count();
78}
79
80template<class Tvec, template<class> class SPHKernel>
82 Tscal dr, std::pair<Tvec, Tvec> box) -> std::pair<Tvec, Tvec> {
83 StackEntry stack_loc{};
84 auto [a, b] = generic::setup::generators::get_ideal_fcc_box<Tscal>(
85 dr, std::make_tuple(box.first, box.second));
86 return {a, b};
87}
88
89template<class Tvec, template<class> class SPHKernel>
91 Tscal dr, std::pair<Tvec, Tvec> box) -> std::pair<Tvec, Tvec> {
92 StackEntry stack_loc{};
93 auto [a, b] = generic::setup::generators::get_ideal_fcc_box<Tscal>(
94 dr, std::make_tuple(box.first, box.second));
95 return {a, b};
96}
97
98template<class Tvec, template<class> class SPHKernel>
100 Tscal dr, std::pair<Tvec, Tvec> _box) {
101 StackEntry stack_loc{};
102
104
105 using namespace shamrock::patch;
106
107 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
108
109 std::string log = "";
110
111 auto make_sliced = [&]() {
112 std::vector<Tvec> vec_lst;
113 generic::setup::generators::add_particles_fcc(
114 dr,
115 std::make_tuple(box.lower, box.upper),
116 [&](Tvec r) {
117 return box.contain_pos(r);
118 },
119 [&](Tvec r, Tscal h) {
120 vec_lst.push_back(r);
121 });
122
123 std::vector<std::vector<Tvec>> sliced_buf;
124
125 u32 sz_buf = sched.crit_patch_split * 4;
126
127 std::vector<Tvec> cur_buf;
128 for (u32 i = 0; i < vec_lst.size(); i++) {
129 cur_buf.push_back(vec_lst[i]);
130
131 if (cur_buf.size() > sz_buf) {
132 sliced_buf.push_back(std::exchange(cur_buf, std::vector<Tvec>{}));
133 }
134 }
135
136 if (cur_buf.size() > 0) {
137 sliced_buf.push_back(std::exchange(cur_buf, std::vector<Tvec>{}));
138 }
139
140 return sliced_buf;
141 };
142
143 std::vector<std::vector<Tvec>> sliced_buf = make_sliced();
144
145 for (std::vector<Tvec> to_ins : sliced_buf) {
146
147 sched.for_each_local_patchdata([&](const Patch &p, PatchDataLayer &pdat) {
149 = sched.get_sim_box().template get_patch_transform<Tvec>();
150
151 shammath::CoordRange<Tvec> patch_coord = ptransf.to_obj_coord(p);
152
153 std::vector<Tvec> vec_acc;
154 for (Tvec r : to_ins) {
155 if (patch_coord.contain_pos(r)) {
156 vec_acc.push_back(r);
157 }
158 }
159
160 if (vec_acc.size() == 0) {
161 return;
162 }
163
164 log += shambase::format(
165 "\n rank = {} patch id={}, add N={} particles, coords = {} {}",
167 p.id_patch,
168 vec_acc.size(),
169 patch_coord.lower,
170 patch_coord.upper);
171
172 PatchDataLayer tmp(sched.get_layout_ptr_old());
173 tmp.resize(vec_acc.size());
174 tmp.fields_raz();
175
176 {
177 u32 len = vec_acc.size();
178 PatchDataField<Tvec> &f = tmp.template get_field<Tvec>(
179 sched.pdl_old().template get_field_idx<Tvec>("xyz"));
180 sycl::buffer<Tvec> buf(vec_acc.data(), len);
181 f.override(buf, len);
182 }
183
184 {
185 PatchDataField<Tscal> &f = tmp.template get_field<Tscal>(
186 sched.pdl_old().template get_field_idx<Tscal>("hpart"));
187 using Kernel = SPHKernel<Tscal>;
188 f.override(Kernel::hfactd * dr);
189 }
190
191 pdat.insert_elements(tmp);
192 });
193
194 sched.check_patchdata_locality_correctness();
195 sched.scheduler_step(true, true);
196 }
197
198 sched.owned_patch_id = sched.patch_list.build_local();
200 sched.update_local_load_value([&](Patch p) {
201 return sched.patch_data.owned_data.get(p.id_patch).get_obj_cnt();
202 });
203
204 shamlog_debug_ln("setup", log);
205}
206
207template<class Tvec, template<class> class SPHKernel>
209 Tscal dr, std::pair<Tvec, Tvec> _box) {
210 StackEntry stack_loc{};
211
213
214 using namespace shamrock::patch;
215
216 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
217
218 std::string log = "";
219
220 auto make_sliced = [&]() {
221 std::vector<Tvec> vec_lst;
222 generic::setup::generators::add_particles_fcc(
223 dr,
224 std::make_tuple(box.lower, box.upper),
225 [&](Tvec r) {
226 return box.contain_pos(r);
227 },
228 [&](Tvec r, Tscal h) {
229 vec_lst.push_back(r);
230 });
231
232 std::vector<std::vector<Tvec>> sliced_buf;
233
234 u32 sz_buf = sched.crit_patch_split * 4;
235
236 std::vector<Tvec> cur_buf;
237 for (u32 i = 0; i < vec_lst.size(); i++) {
238 cur_buf.push_back(vec_lst[i]);
239
240 if (cur_buf.size() > sz_buf) {
241 sliced_buf.push_back(std::exchange(cur_buf, std::vector<Tvec>{}));
242 }
243 }
244
245 if (cur_buf.size() > 0) {
246 sliced_buf.push_back(std::exchange(cur_buf, std::vector<Tvec>{}));
247 }
248
249 return sliced_buf;
250 };
251
252 std::vector<std::vector<Tvec>> sliced_buf = make_sliced();
253
254 for (std::vector<Tvec> to_ins : sliced_buf) {
255
256 sched.for_each_local_patchdata([&](const Patch &p, PatchDataLayer &pdat) {
258 = sched.get_sim_box().template get_patch_transform<Tvec>();
259
260 shammath::CoordRange<Tvec> patch_coord = ptransf.to_obj_coord(p);
261
262 std::vector<Tvec> vec_acc;
263 for (Tvec r : to_ins) {
264 if (patch_coord.contain_pos(r)) {
265 vec_acc.push_back(r);
266 }
267 }
268
269 if (vec_acc.size() == 0) {
270 return;
271 }
272
273 log += shambase::format(
274 "\n rank = {} patch id={}, add N={} particles, coords = {} {}",
276 p.id_patch,
277 vec_acc.size(),
278 patch_coord.lower,
279 patch_coord.upper);
280
281 PatchDataLayer tmp(sched.get_layout_ptr_old());
282 tmp.resize(vec_acc.size());
283 tmp.fields_raz();
284
285 {
286 u32 len = vec_acc.size();
287 PatchDataField<Tvec> &f = tmp.template get_field<Tvec>(
288 sched.pdl_old().template get_field_idx<Tvec>("xyz"));
289 sycl::buffer<Tvec> buf(vec_acc.data(), len);
290 f.override(buf, len);
291 }
292
293 {
294 PatchDataField<Tscal> &f = tmp.template get_field<Tscal>(
295 sched.pdl_old().template get_field_idx<Tscal>("hpart"));
296 using Kernel = SPHKernel<Tscal>;
297 f.override(Kernel::hfactd * dr);
298 }
299
300 pdat.insert_elements(tmp);
301 });
302
303 sched.check_patchdata_locality_correctness();
304 sched.scheduler_step(true, true);
305 }
306
307 sched.owned_patch_id = sched.patch_list.build_local();
309 sched.update_local_load_value([&](Patch p) {
310 return sched.patch_data.owned_data.get(p.id_patch).get_obj_cnt();
311 });
312
313 shamlog_debug_ln("setup", log);
314}
315
316// Explicit template instantiations for all supported kernel types
Header file describing a Node Instance.
MPI scheduler.
double f64
Alias for double.
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
The MPI scheduler.
SchedulerPatchData patch_data
handle the data of the patches of the scheduler
u64 crit_patch_split
splitting limit (if load value > crit_patch_split => patch split)
void scheduler_step(bool do_split_merge, bool do_load_balancing)
scheduler step
SchedulerPatchList patch_list
handle the list of the patches of the scheduler
std::unordered_set< u64 > owned_patch_id
(owned_patch_id = patch_list.build_local())
void add_root_patch()
add patch to the scheduler
std::unordered_set< u64 > build_local()
select owned patches owned by the node to rebuild local
void build_local_idx_map()
recompute id_patch_to_local_idx
The GSPH Model class.
Definition Model.hpp:63
void init()
Initialise the model and all the related data structures (patch scheduler in particular)
Definition Model.cpp:40
PatchDataLayer container class, the layout is described in patchdata_layout.
shambase::DistributedData< PatchData > owned_data
map container for patchdata owned by the current node (layout : id_patch,data)
This header file contains utility functions related to exception handling in the code.
GSPH Model class - high-level interface for GSPH simulations.
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...
Definition memory.hpp:110
i32 world_rank()
Gives the rank of the current process in the MPI communicator.
Definition worldInfo.cpp:40
sph kernels
This file contains the definition for the stacktrace related functionality.
Patch object that contain generic patch information.
Definition Patch.hpp:33