24template<
class Tvec,
class Tgr
idVec>
27 if (solver.solver_config.scheduler_conf.split_load_value == 0) {
29 "Scheduler load value should be greater than 0");
32 solver.init_required_fields();
35 solver.solver_config.scheduler_conf.split_load_value,
36 solver.solver_config.scheduler_conf.merge_load_value);
38 using namespace shamrock::patch;
51template<
class Tvec,
class Tgr
idVec>
53 TgridVec bmin, TgridVec cell_size, u32_3 cell_count) {
55 grid.make_base_grid(bmin, cell_size, {cell_count.x(), cell_count.y(), cell_count.z()});
67template<
class Tvec,
class Tgr
idVec>
75 u32 block_size = Solver::AMRBlock::block_size;
77 u64 num_obj = sched.get_rank_count();
79 std::unique_ptr<sycl::buffer<TgridVec>> pos1 = sched.rankgather_field<TgridVec>(0);
80 std::unique_ptr<sycl::buffer<TgridVec>> pos2 = sched.rankgather_field<TgridVec>(1);
82 sycl::buffer<Tvec> pos_min_cell(num_obj * block_size);
83 sycl::buffer<Tvec> pos_max_cell(num_obj * block_size);
88 sycl::accessor cell_min{pos_min_cell, cgh, sycl::write_only, sycl::no_init};
89 sycl::accessor cell_max{pos_max_cell, cgh, sycl::write_only, sycl::no_init};
91 using Block =
typename Solver::AMRBlock;
93 shambase::parallel_for(cgh, num_obj,
"rescale cells", [=](
u64 id_a) {
94 Tvec block_min = acc_p1[id_a].template convert<Tscal>();
95 Tvec block_max = acc_p2[id_a].template convert<Tscal>();
97 Tvec delta_cell = (block_max - block_min) / Block::side_size;
99 for (
u32 ix = 0; ix < Block::side_size; ix++) {
101 for (
u32 iy = 0; iy < Block::side_size; iy++) {
103 for (
u32 iz = 0; iz < Block::side_size; iz++) {
104 u32 i = Block::get_index({ix, iy, iz});
105 Tvec delta_val = delta_cell * Tvec{ix, iy, iz};
106 cell_min[id_a * block_size + i] = block_min + delta_val;
107 cell_max[id_a * block_size + i] = block_min + (delta_cell) + delta_val;
114 writer.write_voxel_cells(pos_min_cell, pos_max_cell, num_obj * block_size);
116 writer.add_cell_data_section();
117 writer.add_field_data_section(3);
119 std::unique_ptr<sycl::buffer<Tscal>> fields_rho = sched.rankgather_field<Tscal>(2);
120 writer.write_field(
"rho", fields_rho, num_obj * block_size);
122 std::unique_ptr<sycl::buffer<Tscal>> fields_eint = sched.rankgather_field<Tscal>(3);
123 writer.write_field(
"eint", fields_eint, num_obj * block_size);
125 std::unique_ptr<sycl::buffer<Tvec>> fields_vel = sched.rankgather_field<Tvec>(4);
126 writer.write_field(
"vel", fields_vel, num_obj * block_size);
129template<
class Tvec,
class Tgr
idVec>
132 return solver.evolve_once(t_current, dt_input);
Header file describing a Node Instance.
sycl::queue & get_compute_queue(u32 id=0)
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
SchedulerPatchData patch_data
handle the data of the patches of the scheduler
void scheduler_step(bool do_split_merge, bool do_load_balancing)
scheduler step
SchedulerPatchList patch_list
handle the list of the patches of the scheduler
std::unordered_set< u64 > owned_patch_id
(owned_patch_id = patch_list.build_local())
std::unordered_set< u64 > build_local()
select owned patches owned by the node to rebuild local
void build_local_idx_map()
recompute id_patch_to_local_idx
Zeus scheme implementation in Shamrock (WIP)
void init()
Initialise the model and all the related data structures (patch scheduler in particular)
The AMR grid only sees the grid as an integer map.
shambase::DistributedData< PatchData > owned_data
map container for patchdata owned by the current node (layout : id_patch,data)
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...
Patch object that contain generic patch information.