Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
Model.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
17#include "shambase/memory.hpp"
24#include <string>
25
26template<class Tvec, class TgridVec>
28
29 if (solver.solver_config.scheduler_conf.split_load_value == 0) {
31 "Scheduler load value should be greater than 0");
32 }
33
34 solver.init_required_fields();
35 // solver.init_ghost_layout();
36 ctx.init_sched(
37 solver.solver_config.scheduler_conf.split_load_value,
38 solver.solver_config.scheduler_conf.merge_load_value);
39
40 using namespace shamrock::patch;
41
42 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
43
44 // sched.add_root_patch();
45
46 // std::cout << "build local" << std::endl;
47 // sched.owned_patch_id = sched.patch_list.build_local();
48 // sched.patch_list.build_local_idx_map();
49 // sched.update_local_dtcnt_value();
50 // sched.update_local_load_value();
51
52 solver.init_solver_graph();
53}
54
55template<class Tvec, class TgridVec>
57 TgridVec bmin, TgridVec cell_size, u32_3 cell_count) {
58
59 if (cell_size.x() < Solver::Config::AMRBlock::Nside) {
61 "the x block size must be larger than {}, currently : cell_size = {}",
62 Solver::Config::AMRBlock::Nside,
63 cell_size));
64 }
65 if (cell_size.y() < Solver::Config::AMRBlock::Nside) {
67 "the y block size must be larger than {}, currently : cell_size = {}",
68 Solver::Config::AMRBlock::Nside,
69 cell_size));
70 }
71 if (cell_size.z() < Solver::Config::AMRBlock::Nside) {
73 "the z block size must be larger than {}, currently : cell_size = {}",
74 Solver::Config::AMRBlock::Nside,
75 cell_size));
76 }
77
78 modules::AMRSetup<Tvec, TgridVec> setup(ctx, solver.solver_config, solver.storage);
79 setup.make_base_grid(bmin, cell_size, {cell_count[0], cell_count[1], cell_count[2]});
80 return;
81
82 /* Old cell injection
83 shamrock::amr::AMRGrid<TgridVec, 3> grid(shambase::get_check_ref(ctx.sched));
84 grid.make_base_grid(bmin, cell_size, {cell_count.x(), cell_count.y(), cell_count.z()});
85
86 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
87
88 sched.owned_patch_id = sched.patch_list.build_local();
89 sched.patch_list.build_local_idx_map();
90 sched.update_local_load_value([&](shamrock::patch::Patch p) {
91 return sched.patch_data.owned_data.get(p.id_patch).get_obj_cnt();
92 });
93 sched.scheduler_step(true, true);
94 */
95}
96
97template<class Tvec, class TgridVec>
99
100 StackEntry stack_loc{};
101 shamrock::LegacyVtkWriter writer(filename, true, shamrock::UnstructuredGrid);
102
103 try {
104
105 PatchScheduler &sched = shambase::get_check_ref(ctx.sched);
106
107 u32 block_size = Solver::AMRBlock::block_size;
108
109 u64 num_obj = sched.get_rank_count();
110
111 std::unique_ptr<sycl::buffer<TgridVec>> pos1 = sched.rankgather_field<TgridVec>(0);
112 std::unique_ptr<sycl::buffer<TgridVec>> pos2 = sched.rankgather_field<TgridVec>(1);
113
114 sycl::buffer<Tvec> pos_min_cell(num_obj * block_size);
115 sycl::buffer<Tvec> pos_max_cell(num_obj * block_size);
116
117 if (num_obj > 0) {
118
119 shamsys::instance::get_compute_queue().submit([&, block_size](sycl::handler &cgh) {
120 sycl::accessor acc_p1{shambase::get_check_ref(pos1), cgh, sycl::read_only};
121 sycl::accessor acc_p2{shambase::get_check_ref(pos2), cgh, sycl::read_only};
122 sycl::accessor cell_min{pos_min_cell, cgh, sycl::write_only, sycl::no_init};
123 sycl::accessor cell_max{pos_max_cell, cgh, sycl::write_only, sycl::no_init};
124
125 using Block = typename Solver::AMRBlock;
126
127 shambase::parallel_for(cgh, num_obj, "rescale cells", [=](u64 id_a) {
128 Tvec block_min = acc_p1[id_a].template convert<Tscal>();
129 Tvec block_max = acc_p2[id_a].template convert<Tscal>();
130
131 Tvec delta_cell = (block_max - block_min) / Block::side_size;
132#pragma unroll
133 for (u32 ix = 0; ix < Block::side_size; ix++) {
134#pragma unroll
135 for (u32 iy = 0; iy < Block::side_size; iy++) {
136#pragma unroll
137 for (u32 iz = 0; iz < Block::side_size; iz++) {
138 u32 i = Block::get_index({ix, iy, iz});
139 Tvec delta_val = delta_cell * Tvec{ix, iy, iz};
140 cell_min[id_a * block_size + i] = block_min + delta_val;
141 cell_max[id_a * block_size + i]
142 = block_min + (delta_cell) + delta_val;
143 }
144 }
145 }
146 });
147 });
148 }
149
150 writer.write_voxel_cells(pos_min_cell, pos_max_cell, num_obj * block_size);
151
152 writer.add_cell_data_section();
153
154 u32 fieldnum = 3;
155 if (solver.solver_config.is_dust_on()) {
156 u32 ndust = solver.solver_config.dust_config.ndust;
157 fieldnum += 2 * ndust;
158 }
159 writer.add_field_data_section(fieldnum);
160
161 std::unique_ptr<sycl::buffer<Tscal>> fields_rho = sched.rankgather_field<Tscal>(2);
162 writer.write_field("rho", fields_rho, num_obj * block_size);
163
164 std::unique_ptr<sycl::buffer<Tvec>> fields_vel = sched.rankgather_field<Tvec>(3);
165 writer.write_field("rhovel", fields_vel, num_obj * block_size);
166
167 std::unique_ptr<sycl::buffer<Tscal>> fields_eint = sched.rankgather_field<Tscal>(4);
168 writer.write_field("rhoetot", fields_eint, num_obj * block_size);
169
170 if (solver.solver_config.is_dust_on()) {
171 u32 ndust = solver.solver_config.dust_config.ndust;
172
173 shamrock::patch::PatchDataLayerLayout &pdl = solver.scheduler().pdl_old();
174 const u32 irho_dust = pdl.get_field_idx<Tscal>("rho_dust");
175 const u32 irhovel_dust = pdl.get_field_idx<Tvec>("rhovel_dust");
176
177 std::unique_ptr<sycl::buffer<Tscal>> fields_rho_dust
178 = sched.rankgather_field<Tscal>(irho_dust);
179 // writer.write_field("rho_dust", fields_rho_dust, ndust*num_obj*block_size);
180
181 if (fields_rho_dust) {
182 u32 nobj = fields_rho_dust->size();
183 u32 nsplit = ndust;
184
185 for (u32 off = 0; off < nsplit; off++) {
186
187 sycl::buffer<Tscal> partition(nobj / nsplit);
188
190 .submit([&, off, nsplit](sycl::handler &cgh) {
191 sycl::accessor out{partition, cgh, sycl::write_only, sycl::no_init};
192 sycl::accessor in{*fields_rho_dust, cgh, sycl::read_only};
193
194 shambase::parallel_for(
195 cgh, nobj / nsplit, "split field for dump", [=](u64 i) {
196 out[i] = in[i * nsplit + off];
197 });
198 })
199 .wait();
200
201 writer.write_field(
202 std::string("rho_dust") + std::to_string(off),
203 partition,
204 num_obj * block_size);
205 }
206 }
207
208 std::unique_ptr<sycl::buffer<Tvec>> fields_vel_dust
209 = sched.rankgather_field<Tvec>(irhovel_dust);
210 if (fields_vel_dust) {
211 u32 nobj = fields_vel_dust->size();
212 u32 nsplit = ndust;
213
214 for (u32 off = 0; off < nsplit; off++) {
215
216 sycl::buffer<Tvec> partition(nobj / nsplit);
217
219 .submit([&, off, nsplit](sycl::handler &cgh) {
220 sycl::accessor out{partition, cgh, sycl::write_only, sycl::no_init};
221 sycl::accessor in{*fields_vel_dust, cgh, sycl::read_only};
222
223 shambase::parallel_for(
224 cgh, nobj / nsplit, "split field for dump", [=](u64 i) {
225 out[i] = in[i * nsplit + off];
226 });
227 })
228 .wait();
229
230 writer.write_field(
231 std::string("rhovel_dust") + std::to_string(off),
232 partition,
233 num_obj * block_size);
234 }
235 }
236 }
237
238 } catch (std::runtime_error e) {
239 logger::err_ln(
240 "Godunov",
241 "std::runtime_error catched while MPI file open -> unrecoverable\n what():\n",
242 e.what());
243 } catch (std::exception e) {
244 logger::err_ln(
245 "Godunov",
246 "exception catched while MPI file open -> unrecoverable\n what():\n",
247 e.what());
248 } catch (...) {
249 logger::err_ln("Godunov", "something unknwon catched while MPI file open -> unrecoverable");
250 }
251}
252
Header file describing a Node Instance.
sycl::queue & get_compute_queue(u32 id=0)
MPI scheduler.
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
The MPI scheduler.
void init()
Initialise the model and all the related data structures (patch scheduler in particular)
Definition Model.cpp:27
u32 get_field_idx(const std::string &field_name) const
Get the field id if matching name & type.
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...
Definition memory.hpp:110