26template<
class Tvec,
class Tgr
idVec>
29 if (solver.solver_config.scheduler_conf.split_load_value == 0) {
31 "Scheduler load value should be greater than 0");
34 solver.init_required_fields();
37 solver.solver_config.scheduler_conf.split_load_value,
38 solver.solver_config.scheduler_conf.merge_load_value);
40 using namespace shamrock::patch;
52 solver.init_solver_graph();
55template<
class Tvec,
class Tgr
idVec>
57 TgridVec bmin, TgridVec cell_size, u32_3 cell_count) {
59 if (cell_size.x() < Solver::Config::AMRBlock::Nside) {
61 "the x block size must be larger than {}, currently : cell_size = {}",
62 Solver::Config::AMRBlock::Nside,
65 if (cell_size.y() < Solver::Config::AMRBlock::Nside) {
67 "the y block size must be larger than {}, currently : cell_size = {}",
68 Solver::Config::AMRBlock::Nside,
71 if (cell_size.z() < Solver::Config::AMRBlock::Nside) {
73 "the z block size must be larger than {}, currently : cell_size = {}",
74 Solver::Config::AMRBlock::Nside,
78 modules::AMRSetup<Tvec, TgridVec> setup(ctx, solver.solver_config, solver.storage);
79 setup.make_base_grid(bmin, cell_size, {cell_count[0], cell_count[1], cell_count[2]});
97template<
class Tvec,
class Tgr
idVec>
107 u32 block_size = Solver::AMRBlock::block_size;
109 u64 num_obj = sched.get_rank_count();
111 std::unique_ptr<sycl::buffer<TgridVec>> pos1 = sched.rankgather_field<TgridVec>(0);
112 std::unique_ptr<sycl::buffer<TgridVec>> pos2 = sched.rankgather_field<TgridVec>(1);
114 sycl::buffer<Tvec> pos_min_cell(num_obj * block_size);
115 sycl::buffer<Tvec> pos_max_cell(num_obj * block_size);
122 sycl::accessor cell_min{pos_min_cell, cgh, sycl::write_only, sycl::no_init};
123 sycl::accessor cell_max{pos_max_cell, cgh, sycl::write_only, sycl::no_init};
125 using Block =
typename Solver::AMRBlock;
127 shambase::parallel_for(cgh, num_obj,
"rescale cells", [=](
u64 id_a) {
128 Tvec block_min = acc_p1[id_a].template convert<Tscal>();
129 Tvec block_max = acc_p2[id_a].template convert<Tscal>();
131 Tvec delta_cell = (block_max - block_min) / Block::side_size;
133 for (
u32 ix = 0; ix < Block::side_size; ix++) {
135 for (
u32 iy = 0; iy < Block::side_size; iy++) {
137 for (
u32 iz = 0; iz < Block::side_size; iz++) {
138 u32 i = Block::get_index({ix, iy, iz});
139 Tvec delta_val = delta_cell * Tvec{ix, iy, iz};
140 cell_min[id_a * block_size + i] = block_min + delta_val;
141 cell_max[id_a * block_size + i]
142 = block_min + (delta_cell) + delta_val;
150 writer.write_voxel_cells(pos_min_cell, pos_max_cell, num_obj * block_size);
152 writer.add_cell_data_section();
155 if (solver.solver_config.is_dust_on()) {
156 u32 ndust = solver.solver_config.dust_config.ndust;
157 fieldnum += 2 * ndust;
159 writer.add_field_data_section(fieldnum);
161 std::unique_ptr<sycl::buffer<Tscal>> fields_rho = sched.rankgather_field<Tscal>(2);
162 writer.write_field(
"rho", fields_rho, num_obj * block_size);
164 std::unique_ptr<sycl::buffer<Tvec>> fields_vel = sched.rankgather_field<Tvec>(3);
165 writer.write_field(
"rhovel", fields_vel, num_obj * block_size);
167 std::unique_ptr<sycl::buffer<Tscal>> fields_eint = sched.rankgather_field<Tscal>(4);
168 writer.write_field(
"rhoetot", fields_eint, num_obj * block_size);
170 if (solver.solver_config.is_dust_on()) {
171 u32 ndust = solver.solver_config.dust_config.ndust;
177 std::unique_ptr<sycl::buffer<Tscal>> fields_rho_dust
178 = sched.rankgather_field<Tscal>(irho_dust);
181 if (fields_rho_dust) {
182 u32 nobj = fields_rho_dust->size();
185 for (
u32 off = 0; off < nsplit; off++) {
187 sycl::buffer<Tscal> partition(nobj / nsplit);
190 .submit([&, off, nsplit](sycl::handler &cgh) {
191 sycl::accessor out{partition, cgh, sycl::write_only, sycl::no_init};
192 sycl::accessor in{*fields_rho_dust, cgh, sycl::read_only};
194 shambase::parallel_for(
195 cgh, nobj / nsplit,
"split field for dump", [=](
u64 i) {
196 out[i] = in[i * nsplit + off];
202 std::string(
"rho_dust") + std::to_string(off),
204 num_obj * block_size);
208 std::unique_ptr<sycl::buffer<Tvec>> fields_vel_dust
209 = sched.rankgather_field<Tvec>(irhovel_dust);
210 if (fields_vel_dust) {
211 u32 nobj = fields_vel_dust->size();
214 for (
u32 off = 0; off < nsplit; off++) {
216 sycl::buffer<Tvec> partition(nobj / nsplit);
219 .submit([&, off, nsplit](sycl::handler &cgh) {
220 sycl::accessor out{partition, cgh, sycl::write_only, sycl::no_init};
221 sycl::accessor in{*fields_vel_dust, cgh, sycl::read_only};
223 shambase::parallel_for(
224 cgh, nobj / nsplit,
"split field for dump", [=](
u64 i) {
225 out[i] = in[i * nsplit + off];
231 std::string(
"rhovel_dust") + std::to_string(off),
233 num_obj * block_size);
238 }
catch (std::runtime_error e) {
241 "std::runtime_error catched while MPI file open -> unrecoverable\n what():\n",
243 }
catch (std::exception e) {
246 "exception catched while MPI file open -> unrecoverable\n what():\n",
249 logger::err_ln(
"Godunov",
"something unknwon catched while MPI file open -> unrecoverable");
Header file describing a Node Instance.
sycl::queue & get_compute_queue(u32 id=0)
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
void init()
Initialise the model and all the related data structures (patch scheduler in particular)
u32 get_field_idx(const std::string &field_name) const
Get the field id if matching name & type.
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...