Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
TransportStep.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
22
23template<class Tvec, class TgridVec>
25 StackEntry stack_loc{};
26
27 using namespace shamrock::patch;
28 using namespace shamrock;
29 using namespace shammath;
30 using MergedPDat = shamrock::MergedPatchData;
31
32 using Block = typename Config::AMRBlock;
33
35 = shambase::get_check_ref(storage.ghost_layout.get());
36 u32 irho_interf = ghost_layout.get_field_idx<Tscal>("rho");
37 u32 ieint_interf = ghost_layout.get_field_idx<Tscal>("eint");
38 u32 ivel_interf = ghost_layout.get_field_idx<Tvec>("vel");
39
40 shamrock::SchedulerUtility utility(scheduler());
41 storage.Q.set(
42 utility.make_compute_field<sycl::vec<Tscal, 8>>("Q", Block::block_size, [&](u64 id) {
43 return storage.merged_patchdata_ghost.get().get(id).total_elements;
44 }));
45
46 ComputeField<sycl::vec<Tscal, 8>> &Q = storage.Q.get();
47
48 ComputeField<Tvec> &vel_n_xp = storage.vel_n_xp.get();
49 ComputeField<Tvec> &vel_n_yp = storage.vel_n_yp.get();
50 ComputeField<Tvec> &vel_n_zp = storage.vel_n_zp.get();
51
52 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
53 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
54
55 sham::DeviceBuffer<Tscal> &buf_rho = mpdat.pdat.get_field_buf_ref<Tscal>(irho_interf);
56 sham::DeviceBuffer<Tscal> &buf_eint = mpdat.pdat.get_field_buf_ref<Tscal>(ieint_interf);
57 sham::DeviceBuffer<Tvec> &buf_vel = mpdat.pdat.get_field_buf_ref<Tvec>(ivel_interf);
58
59 sham::DeviceBuffer<Tvec> &buf_vel_xp = vel_n_xp.get_buf_check(p.id_patch);
60 sham::DeviceBuffer<Tvec> &buf_vel_yp = vel_n_yp.get_buf_check(p.id_patch);
61 sham::DeviceBuffer<Tvec> &buf_vel_zp = vel_n_zp.get_buf_check(p.id_patch);
62
63 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q = Q.get_buf_check(p.id_patch);
64
65 bool cons_transp = solver_config.use_consistent_transport;
66
67 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
68
69 sham::EventList depends_list;
70
71 auto rho = buf_rho.get_read_access(depends_list);
72 auto vel = buf_vel.get_read_access(depends_list);
73 auto eint = buf_eint.get_read_access(depends_list);
74
75 auto vel_xp = buf_vel_xp.get_read_access(depends_list);
76 auto vel_yp = buf_vel_yp.get_read_access(depends_list);
77 auto vel_zp = buf_vel_zp.get_read_access(depends_list);
78
79 auto Q = buf_Q.get_write_access(depends_list);
80
81 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
82 Block::for_each_cells(
83 cgh, mpdat.total_elements, "compite Pis", [=](u32 /*block_id*/, u32 cell_gid) {
84 Tscal r = rho[cell_gid];
85 Tscal e = eint[cell_gid];
86 Tvec vm = vel[cell_gid];
87 Tvec vxp = vel_xp[cell_gid];
88 Tvec vyp = vel_yp[cell_gid];
89 Tvec vzp = vel_zp[cell_gid];
90
91 // without consistent transport
92
93 if (!cons_transp) {
94 Tvec tmp_m = vm * r;
95 Tscal tmp_x = vxp.x() * r;
96 Tscal tmp_y = vyp.y() * r;
97 Tscal tmp_z = vzp.z() * r;
98
99 Q[cell_gid] = {r, tmp_m.x(), tmp_m.y(), tmp_m.z(), tmp_x, tmp_y, tmp_z, e};
100 } else {
101
102 // with consistent transport
103 Tvec tmp_m = vm;
104 Tscal tmp_x = vxp.x();
105 Tscal tmp_y = vyp.y();
106 Tscal tmp_z = vzp.z();
107
108 Q[cell_gid]
109 = {r, tmp_m.x(), tmp_m.y(), tmp_m.z(), tmp_x, tmp_y, tmp_z, e / r};
110 }
111 });
112 });
113
114 buf_rho.complete_event_state(e);
115 buf_vel.complete_event_state(e);
116 buf_eint.complete_event_state(e);
117
118 buf_vel_xp.complete_event_state(e);
119 buf_vel_yp.complete_event_state(e);
120 buf_vel_zp.complete_event_state(e);
121
122 buf_Q.complete_event_state(e);
123 });
124}
125
126template<class Tvec, class TgridVec>
128 StackEntry stack_loc{};
129
130 using namespace shamrock::patch;
131 using namespace shamrock;
132 using namespace shammath;
133 using MergedPDat = shamrock::MergedPatchData;
134
135 using Block = typename Config::AMRBlock;
136
137 using Tscal8 = sycl::vec<Tscal, 8>;
138 ComputeField<Tscal8> &Q = storage.Q.get();
139
140 modules::ValueLoader<Tvec, TgridVec, Tscal8> val_load_vec8(context, solver_config, storage);
141
142 storage.Q_xm.set(val_load_vec8.load_value_with_gz(Q, {-1, 0, 0}, "Q_xm"));
143 storage.Q_ym.set(val_load_vec8.load_value_with_gz(Q, {0, -1, 0}, "Q_ym"));
144 storage.Q_zm.set(val_load_vec8.load_value_with_gz(Q, {0, 0, -1}, "Q_zm"));
145
146 ComputeField<Tscal8> &Q_xm = storage.Q_xm.get();
147 ComputeField<Tscal8> &Q_ym = storage.Q_ym.get();
148 ComputeField<Tscal8> &Q_zm = storage.Q_zm.get();
149
150 ComputeField<Tscal8> Q_xp = val_load_vec8.load_value_with_gz(Q, {1, 0, 0}, "Q_xp");
151 ComputeField<Tscal8> Q_yp = val_load_vec8.load_value_with_gz(Q, {0, 1, 0}, "Q_yp");
152 ComputeField<Tscal8> Q_zp = val_load_vec8.load_value_with_gz(Q, {0, 0, 1}, "Q_zp");
153
154 shamrock::SchedulerUtility utility(scheduler());
155
156 storage.a_x.set(utility.make_compute_field<Tscal8>("a_x", Block::block_size, [&](u64 id) {
157 return storage.merged_patchdata_ghost.get().get(id).total_elements;
158 }));
159
160 storage.a_y.set(utility.make_compute_field<Tscal8>("a_y", Block::block_size, [&](u64 id) {
161 return storage.merged_patchdata_ghost.get().get(id).total_elements;
162 }));
163
164 storage.a_z.set(utility.make_compute_field<Tscal8>("a_z", Block::block_size, [&](u64 id) {
165 return storage.merged_patchdata_ghost.get().get(id).total_elements;
166 }));
167
168 ComputeField<Tscal8> &a_x = storage.a_x.get();
169 ComputeField<Tscal8> &a_y = storage.a_y.get();
170 ComputeField<Tscal8> &a_z = storage.a_z.get();
171
172 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
173 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
174 sham::DeviceBuffer<TgridVec> &buf_cell_min = mpdat.pdat.get_field_buf_ref<TgridVec>(0);
175 sham::DeviceBuffer<TgridVec> &buf_cell_max = mpdat.pdat.get_field_buf_ref<TgridVec>(1);
176
177 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q = Q.get_buf_check(p.id_patch);
178
179 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_xm = Q_xm.get_buf_check(p.id_patch);
180 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_xp = Q_xp.get_buf_check(p.id_patch);
181 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_x = a_x.get_buf_check(p.id_patch);
182
183 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_ym = Q_ym.get_buf_check(p.id_patch);
184 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_yp = Q_yp.get_buf_check(p.id_patch);
185 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_y = a_y.get_buf_check(p.id_patch);
186
187 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_zm = Q_zm.get_buf_check(p.id_patch);
188 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_zp = Q_zp.get_buf_check(p.id_patch);
189 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_z = a_z.get_buf_check(p.id_patch);
190
191 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
192 {
193 sham::EventList depends_list;
194 auto cell_min = buf_cell_min.get_read_access(depends_list);
195 auto cell_max = buf_cell_max.get_read_access(depends_list);
196
197 auto Q = buf_Q.get_read_access(depends_list);
198 auto Q_xm = buf_Q_xm.get_read_access(depends_list);
199 auto Q_xp = buf_Q_xp.get_read_access(depends_list);
200 auto a_x = buf_a_x.get_write_access(depends_list);
201
202 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
203 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
204
205 Block::for_each_cells(
206 cgh, mpdat.total_elements, "compite a_x", [=](u32 block_id, u32 cell_gid) {
207 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
208 .template convert<Tscal>()
209 .x()
210 * coord_conv_fact;
211
212 Tscal8 Qi = Q[cell_gid];
213 Tscal8 Qim = Q_xm[cell_gid];
214 Tscal8 Qip = Q_xp[cell_gid];
215
216 Tscal8 dqm = (Qi - Qim) / d_cell;
217 Tscal8 dqp = (Qip - Qi) / d_cell;
218
219 a_x[cell_gid] = Tscal8{
220 shammath::van_leer_slope(dqm.s0(), dqp.s0()),
221 shammath::van_leer_slope(dqm.s1(), dqp.s1()),
222 shammath::van_leer_slope(dqm.s2(), dqp.s2()),
223 shammath::van_leer_slope(dqm.s3(), dqp.s3()),
224 shammath::van_leer_slope(dqm.s4(), dqp.s4()),
225 shammath::van_leer_slope(dqm.s5(), dqp.s5()),
226 shammath::van_leer_slope(dqm.s6(), dqp.s6()),
227 shammath::van_leer_slope(dqm.s7(), dqp.s7())};
228 });
229 });
230 buf_cell_min.complete_event_state(e);
231 buf_cell_max.complete_event_state(e);
232 buf_Q.complete_event_state(e);
233 buf_Q_xm.complete_event_state(e);
234 buf_Q_xp.complete_event_state(e);
235 buf_a_x.complete_event_state(e);
236 }
237
238 {
239 sham::EventList depends_list;
240 auto cell_min = buf_cell_min.get_read_access(depends_list);
241 auto cell_max = buf_cell_max.get_read_access(depends_list);
242
243 auto Q = buf_Q.get_read_access(depends_list);
244 auto Q_ym = buf_Q_ym.get_read_access(depends_list);
245 auto Q_yp = buf_Q_yp.get_read_access(depends_list);
246 auto a_y = buf_a_y.get_write_access(depends_list);
247
248 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
249 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
250
251 Block::for_each_cells(
252 cgh, mpdat.total_elements, "compite a_y", [=](u32 block_id, u32 cell_gid) {
253 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
254 .template convert<Tscal>()
255 .y()
256 * coord_conv_fact;
257
258 Tscal8 Qi = Q[cell_gid];
259 Tscal8 Qim = Q_ym[cell_gid];
260 Tscal8 Qip = Q_yp[cell_gid];
261
262 Tscal8 dqm = (Qi - Qim) / d_cell;
263 Tscal8 dqp = (Qip - Qi) / d_cell;
264
265 a_y[cell_gid] = Tscal8{
266 shammath::van_leer_slope(dqm.s0(), dqp.s0()),
267 shammath::van_leer_slope(dqm.s1(), dqp.s1()),
268 shammath::van_leer_slope(dqm.s2(), dqp.s2()),
269 shammath::van_leer_slope(dqm.s3(), dqp.s3()),
270 shammath::van_leer_slope(dqm.s4(), dqp.s4()),
271 shammath::van_leer_slope(dqm.s5(), dqp.s5()),
272 shammath::van_leer_slope(dqm.s6(), dqp.s6()),
273 shammath::van_leer_slope(dqm.s7(), dqp.s7())};
274 });
275 });
276 buf_cell_min.complete_event_state(e);
277 buf_cell_max.complete_event_state(e);
278 buf_Q.complete_event_state(e);
279 buf_Q_ym.complete_event_state(e);
280 buf_Q_yp.complete_event_state(e);
281 buf_a_y.complete_event_state(e);
282 }
283
284 {
285 sham::EventList depends_list;
286 auto cell_min = buf_cell_min.get_read_access(depends_list);
287 auto cell_max = buf_cell_max.get_read_access(depends_list);
288
289 auto Q = buf_Q.get_read_access(depends_list);
290 auto Q_zm = buf_Q_zm.get_read_access(depends_list);
291 auto Q_zp = buf_Q_zp.get_read_access(depends_list);
292 auto a_z = buf_a_z.get_write_access(depends_list);
293
294 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
295 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
296
297 Block::for_each_cells(
298 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
299 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
300 .template convert<Tscal>()
301 .z()
302 * coord_conv_fact;
303
304 Tscal8 Qi = Q[cell_gid];
305 Tscal8 Qim = Q_zm[cell_gid];
306 Tscal8 Qip = Q_zp[cell_gid];
307
308 Tscal8 dqm = (Qi - Qim) / d_cell;
309 Tscal8 dqp = (Qip - Qi) / d_cell;
310
311 a_z[cell_gid] = Tscal8{
312 shammath::van_leer_slope(dqm.s0(), dqp.s0()),
313 shammath::van_leer_slope(dqm.s1(), dqp.s1()),
314 shammath::van_leer_slope(dqm.s2(), dqp.s2()),
315 shammath::van_leer_slope(dqm.s3(), dqp.s3()),
316 shammath::van_leer_slope(dqm.s4(), dqp.s4()),
317 shammath::van_leer_slope(dqm.s5(), dqp.s5()),
318 shammath::van_leer_slope(dqm.s6(), dqp.s6()),
319 shammath::van_leer_slope(dqm.s7(), dqp.s7())};
320 });
321 });
322 buf_cell_min.complete_event_state(e);
323 buf_cell_max.complete_event_state(e);
324 buf_Q.complete_event_state(e);
325 buf_Q_zm.complete_event_state(e);
326 buf_Q_zp.complete_event_state(e);
327 buf_a_z.complete_event_state(e);
328 }
329
330 if (a_x.get_field(p.id_patch).has_nan()) {
331 logger::err_ln("[Zeus]", "nan detected in a_x");
333 }
334
335 if (a_y.get_field(p.id_patch).has_nan()) {
336 logger::err_ln("[Zeus]", "nan detected in a_y");
338 }
339
340 if (a_z.get_field(p.id_patch).has_nan()) {
341 logger::err_ln("[Zeus]", "nan detected in a_z");
343 }
344 });
345}
346
347template<class Tvec, class TgridVec>
349 Tscal dt_in) {
350
351 StackEntry stack_loc{};
352
353 using namespace shamrock::patch;
354 using namespace shamrock;
355 using namespace shammath;
356 using MergedPDat = shamrock::MergedPatchData;
357
358 using Block = typename Config::AMRBlock;
359
360 using Tscal8 = sycl::vec<Tscal, 8>;
361
362 modules::ValueLoader<Tvec, TgridVec, Tscal8> val_load_vec8(context, solver_config, storage);
363 ComputeField<Tscal8> &Q = storage.Q.get();
364 ComputeField<Tscal8> &a_x = storage.a_x.get();
365 ComputeField<Tscal8> &a_y = storage.a_y.get();
366 ComputeField<Tscal8> &a_z = storage.a_z.get();
367
368 ComputeField<Tscal8> a_xm = val_load_vec8.load_value_with_gz(a_x, {-1, 0, 0}, "a_xm");
369 ComputeField<Tscal8> a_ym = val_load_vec8.load_value_with_gz(a_y, {0, -1, 0}, "a_ym");
370 ComputeField<Tscal8> a_zm = val_load_vec8.load_value_with_gz(a_z, {0, 0, -1}, "a_zm");
371
372 ComputeField<Tscal8> &Q_xm = storage.Q_xm.get();
373 ComputeField<Tscal8> &Q_ym = storage.Q_ym.get();
374 ComputeField<Tscal8> &Q_zm = storage.Q_zm.get();
375
376 shamrock::SchedulerUtility utility(scheduler());
377 storage.Qstar_x.set(
378 utility.make_compute_field<Tscal8>("Qstar_x", Block::block_size, [&](u64 id) {
379 return storage.merged_patchdata_ghost.get().get(id).total_elements;
380 }));
381
382 storage.Qstar_y.set(
383 utility.make_compute_field<Tscal8>("Qstar_y", Block::block_size, [&](u64 id) {
384 return storage.merged_patchdata_ghost.get().get(id).total_elements;
385 }));
386
387 storage.Qstar_z.set(
388 utility.make_compute_field<Tscal8>("Qstar_z", Block::block_size, [&](u64 id) {
389 return storage.merged_patchdata_ghost.get().get(id).total_elements;
390 }));
391
392 ComputeField<Tscal8> &Qstar_x = storage.Qstar_x.get();
393 ComputeField<Tscal8> &Qstar_y = storage.Qstar_y.get();
394 ComputeField<Tscal8> &Qstar_z = storage.Qstar_z.get();
395
397 = shambase::get_check_ref(storage.ghost_layout.get());
398 u32 irho_interf = ghost_layout.get_field_idx<Tscal>("rho");
399 u32 ieint_interf = ghost_layout.get_field_idx<Tscal>("eint");
400 u32 ivel_interf = ghost_layout.get_field_idx<Tvec>("vel");
401
402 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
403
404 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
405 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
406 sham::DeviceBuffer<TgridVec> &buf_cell_min = mpdat.pdat.get_field_buf_ref<TgridVec>(0);
407 sham::DeviceBuffer<TgridVec> &buf_cell_max = mpdat.pdat.get_field_buf_ref<TgridVec>(1);
408
409 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q = Q.get_buf_check(p.id_patch);
410
411 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_xm = Q_xm.get_buf_check(p.id_patch);
412 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_x = a_x.get_buf_check(p.id_patch);
413 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_xm = a_xm.get_buf_check(p.id_patch);
414
415 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_ym = Q_ym.get_buf_check(p.id_patch);
416 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_y = a_y.get_buf_check(p.id_patch);
417 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_ym = a_ym.get_buf_check(p.id_patch);
418
419 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_zm = Q_zm.get_buf_check(p.id_patch);
420 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_z = a_z.get_buf_check(p.id_patch);
421 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_a_zm = a_zm.get_buf_check(p.id_patch);
422
423 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_x = Qstar_x.get_buf_check(p.id_patch);
424 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_y = Qstar_y.get_buf_check(p.id_patch);
425 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_z = Qstar_z.get_buf_check(p.id_patch);
426
427 sham::DeviceBuffer<Tvec> &buf_vel = mpdat.pdat.get_field_buf_ref<Tvec>(ivel_interf);
428
429 {
430 sham::EventList depends_list;
431 auto cell_min = buf_cell_min.get_read_access(depends_list);
432 auto cell_max = buf_cell_max.get_read_access(depends_list);
433
434 auto Q = buf_Q.get_read_access(depends_list);
435 auto Q_xm = buf_Q_xm.get_read_access(depends_list);
436 auto a_x = buf_a_x.get_read_access(depends_list);
437 auto a_xm = buf_a_xm.get_read_access(depends_list);
438 auto Qstar_x = buf_Qstar_x.get_write_access(depends_list);
439
440 auto vel = buf_vel.get_read_access(depends_list);
441
442 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
443 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
444 Tscal dt = dt_in;
445 bool enable_vanleer = solver_config.use_van_leer;
446 Block::for_each_cells(
447 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
448 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
449 .template convert<Tscal>()
450 .x()
451 * coord_conv_fact;
452
453 Tscal8 Qi = Q[cell_gid];
454 Tscal8 Qim = Q_xm[cell_gid];
455 Tscal8 ai = a_x[cell_gid];
456 Tscal8 aim = a_xm[cell_gid];
457 Tscal vx = vel[cell_gid].x();
458
459 Tscal8 res;
460
461 if (enable_vanleer) {
462 if (vx >= 0) {
463 res = Qim + 0.5 * (d_cell - vx * dt) * aim;
464 } else {
465 res = Qi - 0.5 * (d_cell + vx * dt) * ai;
466 }
467 } else {
468 if (vx >= 0) {
469 res = Qim;
470 } else {
471 res = Qi;
472 }
473 }
474
475 Qstar_x[cell_gid] = res;
476 });
477 });
478 buf_cell_min.complete_event_state(e);
479 buf_cell_max.complete_event_state(e);
480 buf_Q.complete_event_state(e);
481 buf_Q_xm.complete_event_state(e);
482 buf_a_x.complete_event_state(e);
483 buf_a_xm.complete_event_state(e);
484 buf_Qstar_x.complete_event_state(e);
485 buf_vel.complete_event_state(e);
486 }
487
488 {
489 sham::EventList depends_list;
490 auto cell_min = buf_cell_min.get_read_access(depends_list);
491 auto cell_max = buf_cell_max.get_read_access(depends_list);
492
493 auto Q = buf_Q.get_read_access(depends_list);
494 auto Q_ym = buf_Q_ym.get_read_access(depends_list);
495 auto a_y = buf_a_y.get_read_access(depends_list);
496 auto a_ym = buf_a_ym.get_read_access(depends_list);
497 auto Qstar_y = buf_Qstar_y.get_write_access(depends_list);
498
499 auto vel = buf_vel.get_read_access(depends_list);
500
501 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
502 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
503 Tscal dt = dt_in;
504 bool enable_vanleer = solver_config.use_van_leer;
505 Block::for_each_cells(
506 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
507 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
508 .template convert<Tscal>()
509 .y()
510 * coord_conv_fact;
511
512 Tscal8 Qi = Q[cell_gid];
513 Tscal8 Qim = Q_ym[cell_gid];
514 Tscal8 ai = a_y[cell_gid];
515 Tscal8 aim = a_ym[cell_gid];
516 Tscal vy = vel[cell_gid].y();
517
518 Tscal8 res;
519 if (enable_vanleer) {
520 if (vy >= 0) {
521 res = Qim + aim * (d_cell - vy * dt) * 0.5;
522 } else {
523 res = Qi - ai * (d_cell + vy * dt) * 0.5;
524 }
525 } else {
526 if (vy >= 0) {
527 res = Qim;
528 } else {
529 res = Qi;
530 }
531 }
532
533 Qstar_y[cell_gid] = res;
534 });
535 });
536 buf_cell_min.complete_event_state(e);
537 buf_cell_max.complete_event_state(e);
538 buf_Q.complete_event_state(e);
539 buf_Q_ym.complete_event_state(e);
540 buf_a_y.complete_event_state(e);
541 buf_a_ym.complete_event_state(e);
542 buf_Qstar_y.complete_event_state(e);
543 buf_vel.complete_event_state(e);
544 }
545
546 {
547 sham::EventList depends_list;
548 auto cell_min = buf_cell_min.get_read_access(depends_list);
549 auto cell_max = buf_cell_max.get_read_access(depends_list);
550
551 auto Q = buf_Q.get_read_access(depends_list);
552 auto Q_zm = buf_Q_zm.get_read_access(depends_list);
553 auto a_z = buf_a_z.get_read_access(depends_list);
554 auto a_zm = buf_a_zm.get_read_access(depends_list);
555 auto Qstar_z = buf_Qstar_z.get_write_access(depends_list);
556
557 auto vel = buf_vel.get_read_access(depends_list);
558
559 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
560 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
561 Tscal dt = dt_in;
562 bool enable_vanleer = solver_config.use_van_leer;
563 Block::for_each_cells(
564 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
565 Tscal d_cell = (cell_max[block_id] - cell_min[block_id])
566 .template convert<Tscal>()
567 .z()
568 * coord_conv_fact;
569
570 Tscal8 Qi = Q[cell_gid];
571 Tscal8 Qim = Q_zm[cell_gid];
572 Tscal8 ai = a_z[cell_gid];
573 Tscal8 aim = a_zm[cell_gid];
574 Tscal vz = vel[cell_gid].z();
575
576 Tscal8 res;
577
578 if (enable_vanleer) {
579 if (vz >= 0) {
580 res = Qim + aim * (d_cell - vz * dt) * 0.5;
581 } else {
582 res = Qi - ai * (d_cell + vz * dt) * 0.5;
583 }
584 } else {
585 if (vz >= 0) {
586 res = Qim;
587 } else {
588 res = Qi;
589 }
590 }
591
592 Qstar_z[cell_gid] = res;
593 });
594 });
595
596 buf_cell_min.complete_event_state(e);
597 buf_cell_max.complete_event_state(e);
598 buf_Q.complete_event_state(e);
599 buf_Q_zm.complete_event_state(e);
600 buf_a_z.complete_event_state(e);
601 buf_a_zm.complete_event_state(e);
602 buf_Qstar_z.complete_event_state(e);
603 buf_vel.complete_event_state(e);
604 }
605 });
606}
607
608template<class Tvec, class TgridVec>
610
611 StackEntry stack_loc{};
612
613 using namespace shamrock;
614 using Tscal8 = sycl::vec<Tscal, 8>;
615
616 ComputeField<Tscal8> &Qstar_x_in = storage.Qstar_x.get();
617 ComputeField<Tscal8> &Qstar_y_in = storage.Qstar_y.get();
618 ComputeField<Tscal8> &Qstar_z_in = storage.Qstar_z.get();
619
620 modules::GhostZones gz(context, solver_config, storage);
621
622 auto Qstar_x_out = gz.exchange_compute_field(Qstar_x_in);
623 auto Qstar_y_out = gz.exchange_compute_field(Qstar_y_in);
624 auto Qstar_z_out = gz.exchange_compute_field(Qstar_z_in);
625
626 storage.Qstar_x.reset();
627 storage.Qstar_y.reset();
628 storage.Qstar_z.reset();
629
630 storage.Qstar_x.set(std::move(Qstar_x_out));
631 storage.Qstar_y.set(std::move(Qstar_y_out));
632 storage.Qstar_z.set(std::move(Qstar_z_out));
633}
634
635template<class Tvec, class TgridVec>
637
638 StackEntry stack_loc{};
639
640 using namespace shamrock::patch;
641 using namespace shamrock;
642 using namespace shammath;
643 using MergedPDat = shamrock::MergedPatchData;
644
645 using Block = typename Config::AMRBlock;
646
647 using Tscal8 = sycl::vec<Tscal, 8>;
648
649 shamrock::SchedulerUtility utility(scheduler());
650 storage.Flux_x.set(utility.make_compute_field<Tscal8>("Flux_x", Block::block_size, [&](u64 id) {
651 return storage.merged_patchdata_ghost.get().get(id).total_elements;
652 }));
653
654 storage.Flux_y.set(utility.make_compute_field<Tscal8>("Flux_y", Block::block_size, [&](u64 id) {
655 return storage.merged_patchdata_ghost.get().get(id).total_elements;
656 }));
657
658 storage.Flux_z.set(utility.make_compute_field<Tscal8>("Flux_z", Block::block_size, [&](u64 id) {
659 return storage.merged_patchdata_ghost.get().get(id).total_elements;
660 }));
661
662 ComputeField<Tscal8> &Qstar_x = storage.Qstar_x.get();
663 ComputeField<Tscal8> &Qstar_y = storage.Qstar_y.get();
664 ComputeField<Tscal8> &Qstar_z = storage.Qstar_z.get();
665
666 ComputeField<Tscal8> &Flux_x = storage.Flux_x.get();
667 ComputeField<Tscal8> &Flux_y = storage.Flux_y.get();
668 ComputeField<Tscal8> &Flux_z = storage.Flux_z.get();
669
671 = shambase::get_check_ref(storage.ghost_layout.get());
672 u32 irho_interf = ghost_layout.get_field_idx<Tscal>("rho");
673 u32 ieint_interf = ghost_layout.get_field_idx<Tscal>("eint");
674 u32 ivel_interf = ghost_layout.get_field_idx<Tvec>("vel");
675
676 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
677
678 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
679 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
680 sham::DeviceBuffer<TgridVec> &buf_cell_min = mpdat.pdat.get_field_buf_ref<TgridVec>(0);
681 sham::DeviceBuffer<TgridVec> &buf_cell_max = mpdat.pdat.get_field_buf_ref<TgridVec>(1);
682
683 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_x = Qstar_x.get_buf_check(p.id_patch);
684 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_y = Qstar_y.get_buf_check(p.id_patch);
685 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Qstar_z = Qstar_z.get_buf_check(p.id_patch);
686
687 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_x = Flux_x.get_buf_check(p.id_patch);
688 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_y = Flux_y.get_buf_check(p.id_patch);
689 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_z = Flux_z.get_buf_check(p.id_patch);
690
691 sham::DeviceBuffer<Tvec> &buf_vel = mpdat.pdat.get_field_buf_ref<Tvec>(ivel_interf);
692
693 {
694 sham::EventList depends_list;
695 auto cell_min = buf_cell_min.get_read_access(depends_list);
696 auto cell_max = buf_cell_max.get_read_access(depends_list);
697
698 auto Qstar_x = buf_Qstar_x.get_read_access(depends_list);
699 auto Flux_x = buf_Flux_x.get_write_access(depends_list);
700
701 auto vel = buf_vel.get_read_access(depends_list);
702
703 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
704 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
705
706 bool const_transp = solver_config.use_consistent_transport;
707 Block::for_each_cells(
708 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
709 Tvec d_cell
710 = (cell_max[block_id] - cell_min[block_id]).template convert<Tscal>()
711 * coord_conv_fact;
712
713 Tscal8 Qstari = Qstar_x[cell_gid];
714 Tscal vx = vel[cell_gid].x();
715
716 // with consistent transport
717 if (const_transp) {
718 Qstari.s1() *= Qstari.s0();
719 Qstari.s2() *= Qstari.s0();
720 Qstari.s3() *= Qstari.s0();
721 Qstari.s4() *= Qstari.s0();
722 Qstari.s5() *= Qstari.s0();
723 Qstari.s6() *= Qstari.s0();
724 Qstari.s7() *= Qstari.s0();
725 }
726
727 Flux_x[cell_gid] = Qstari * (vx * d_cell.y() * d_cell.z());
728 });
729 });
730 buf_cell_min.complete_event_state(e);
731 buf_cell_max.complete_event_state(e);
732 buf_Qstar_x.complete_event_state(e);
733 buf_Flux_x.complete_event_state(e);
734 buf_vel.complete_event_state(e);
735 }
736
737 {
738 sham::EventList depends_list;
739 auto cell_min = buf_cell_min.get_read_access(depends_list);
740 auto cell_max = buf_cell_max.get_read_access(depends_list);
741
742 auto Qstar_y = buf_Qstar_y.get_read_access(depends_list);
743 auto Flux_y = buf_Flux_y.get_write_access(depends_list);
744
745 auto vel = buf_vel.get_read_access(depends_list);
746
747 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
748 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
749
750 bool const_transp = solver_config.use_consistent_transport;
751 Block::for_each_cells(
752 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
753 Tvec d_cell
754 = (cell_max[block_id] - cell_min[block_id]).template convert<Tscal>()
755 * coord_conv_fact;
756
757 Tscal8 Qstari = Qstar_y[cell_gid];
758 Tscal vy = vel[cell_gid].y();
759 // with consistent transport
760 if (const_transp) {
761 Qstari.s1() *= Qstari.s0();
762 Qstari.s2() *= Qstari.s0();
763 Qstari.s3() *= Qstari.s0();
764 Qstari.s4() *= Qstari.s0();
765 Qstari.s5() *= Qstari.s0();
766 Qstari.s6() *= Qstari.s0();
767 Qstari.s7() *= Qstari.s0();
768 }
769 Flux_y[cell_gid] = Qstari * (vy * d_cell.x() * d_cell.z());
770 });
771 });
772 buf_cell_min.complete_event_state(e);
773 buf_cell_max.complete_event_state(e);
774 buf_Qstar_y.complete_event_state(e);
775 buf_Flux_y.complete_event_state(e);
776 buf_vel.complete_event_state(e);
777 }
778
779 {
780 sham::EventList depends_list;
781 auto cell_min = buf_cell_min.get_read_access(depends_list);
782 auto cell_max = buf_cell_max.get_read_access(depends_list);
783
784 auto Qstar_z = buf_Qstar_z.get_read_access(depends_list);
785 auto Flux_z = buf_Flux_z.get_write_access(depends_list);
786
787 auto vel = buf_vel.get_read_access(depends_list);
788
789 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
790 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
791
792 bool const_transp = solver_config.use_consistent_transport;
793 Block::for_each_cells(
794 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
795 Tvec d_cell
796 = (cell_max[block_id] - cell_min[block_id]).template convert<Tscal>()
797 * coord_conv_fact;
798
799 Tscal8 Qstari = Qstar_z[cell_gid];
800 Tscal vz = vel[cell_gid].z();
801 // with consistent transport
802 if (const_transp) {
803 Qstari.s1() *= Qstari.s0();
804 Qstari.s2() *= Qstari.s0();
805 Qstari.s3() *= Qstari.s0();
806 Qstari.s4() *= Qstari.s0();
807 Qstari.s5() *= Qstari.s0();
808 Qstari.s6() *= Qstari.s0();
809 Qstari.s7() *= Qstari.s0();
810 }
811 Flux_z[cell_gid] = Qstari * (vz * d_cell.x() * d_cell.y());
812 });
813 });
814 buf_cell_min.complete_event_state(e);
815 buf_cell_max.complete_event_state(e);
816 buf_Qstar_z.complete_event_state(e);
817 buf_Flux_z.complete_event_state(e);
818 buf_vel.complete_event_state(e);
819 }
820 });
821
822 storage.Qstar_x.reset();
823 storage.Qstar_y.reset();
824 storage.Qstar_z.reset();
825}
826
827template<class Tvec, class TgridVec>
829
830 StackEntry stack_loc{};
831
832 using namespace shamrock::patch;
833 using namespace shamrock;
834 using namespace shammath;
835 using MergedPDat = shamrock::MergedPatchData;
836
837 using Block = typename Config::AMRBlock;
838
839 using Tscal8 = sycl::vec<Tscal, 8>;
840
841 ComputeField<Tscal8> &Flux_x = storage.Flux_x.get();
842 ComputeField<Tscal8> &Flux_y = storage.Flux_y.get();
843 ComputeField<Tscal8> &Flux_z = storage.Flux_z.get();
844
845 modules::ValueLoader<Tvec, TgridVec, Tscal8> val_load_vec8(context, solver_config, storage);
846 storage.Flux_xp.set(val_load_vec8.load_value_with_gz(Flux_x, {1, 0, 0}, "Flux_xp"));
847 storage.Flux_yp.set(val_load_vec8.load_value_with_gz(Flux_y, {0, 1, 0}, "Flux_yp"));
848 storage.Flux_zp.set(val_load_vec8.load_value_with_gz(Flux_z, {0, 0, 1}, "Flux_zp"));
849}
850
851template<class Tvec, class TgridVec>
853
854 StackEntry stack_loc{};
855
856 using namespace shamrock::patch;
857 using namespace shamrock;
858 using namespace shammath;
859 using MergedPDat = shamrock::MergedPatchData;
860
861 using Block = typename Config::AMRBlock;
862
863 using Tscal8 = sycl::vec<Tscal, 8>;
864
865 ComputeField<Tscal8> &Q = storage.Q.get();
866 ComputeField<Tscal8> &Flux_x = storage.Flux_x.get();
867 ComputeField<Tscal8> &Flux_y = storage.Flux_y.get();
868 ComputeField<Tscal8> &Flux_z = storage.Flux_z.get();
869 ComputeField<Tscal8> &Flux_xp = storage.Flux_xp.get();
870 ComputeField<Tscal8> &Flux_yp = storage.Flux_yp.get();
871 ComputeField<Tscal8> &Flux_zp = storage.Flux_zp.get();
872
873 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
874 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
875 sham::DeviceBuffer<TgridVec> &buf_cell_min = mpdat.pdat.get_field_buf_ref<TgridVec>(0);
876 sham::DeviceBuffer<TgridVec> &buf_cell_max = mpdat.pdat.get_field_buf_ref<TgridVec>(1);
877
878 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q = Q.get_buf_check(p.id_patch);
879 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_x = Flux_x.get_buf_check(p.id_patch);
880 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_y = Flux_y.get_buf_check(p.id_patch);
881 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_z = Flux_z.get_buf_check(p.id_patch);
882 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_xp = Flux_xp.get_buf_check(p.id_patch);
883 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_yp = Flux_yp.get_buf_check(p.id_patch);
884 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Flux_zp = Flux_zp.get_buf_check(p.id_patch);
885
886 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
887
888 sham::EventList depends_list;
889 auto cell_min = buf_cell_min.get_read_access(depends_list);
890 auto cell_max = buf_cell_max.get_read_access(depends_list);
891
892 auto Q = buf_Q.get_write_access(depends_list);
893 auto Flux_x = buf_Flux_x.get_read_access(depends_list);
894 auto Flux_y = buf_Flux_y.get_read_access(depends_list);
895 auto Flux_z = buf_Flux_z.get_read_access(depends_list);
896 auto Flux_xp = buf_Flux_xp.get_read_access(depends_list);
897 auto Flux_yp = buf_Flux_yp.get_read_access(depends_list);
898 auto Flux_zp = buf_Flux_zp.get_read_access(depends_list);
899
900 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
901 Tscal _dt = dt;
902
903 Tscal coord_conv_fact = solver_config.grid_coord_to_pos_fact / Block::Nside;
904
905 bool const_transp = solver_config.use_consistent_transport;
906 Block::for_each_cells(
907 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
908 Tvec d_cell
909 = (cell_max[block_id] - cell_min[block_id]).template convert<Tscal>()
910 * coord_conv_fact;
911
912 Tscal V = d_cell.x() * d_cell.y() * d_cell.z();
913
914 Tscal8 fsum = {};
915
916 fsum -= Flux_xp[cell_gid];
917 fsum -= Flux_yp[cell_gid];
918 fsum -= Flux_zp[cell_gid];
919 fsum += Flux_x[cell_gid];
920 fsum += Flux_y[cell_gid];
921 fsum += Flux_z[cell_gid];
922
923 fsum /= V;
924 fsum *= _dt;
925
926 Tscal8 Qtmp = Q[cell_gid];
927
928 // with consistent transport
929 if (const_transp) {
930 Qtmp.s1() *= Qtmp.s0();
931 Qtmp.s2() *= Qtmp.s0();
932 Qtmp.s3() *= Qtmp.s0();
933 Qtmp.s4() *= Qtmp.s0();
934 Qtmp.s5() *= Qtmp.s0();
935 Qtmp.s6() *= Qtmp.s0();
936 Qtmp.s7() *= Qtmp.s0();
937 }
938
939 Qtmp += fsum;
940
941 Q[cell_gid] = Qtmp;
942 });
943 });
944 buf_cell_min.complete_event_state(e);
945 buf_cell_max.complete_event_state(e);
946 buf_Q.complete_event_state(e);
947 buf_Flux_x.complete_event_state(e);
948 buf_Flux_y.complete_event_state(e);
949 buf_Flux_z.complete_event_state(e);
950 buf_Flux_xp.complete_event_state(e);
951 buf_Flux_yp.complete_event_state(e);
952 buf_Flux_zp.complete_event_state(e);
953 });
954
955 storage.Flux_x.reset();
956 storage.Flux_y.reset();
957 storage.Flux_z.reset();
958 storage.Flux_xp.reset();
959 storage.Flux_yp.reset();
960 storage.Flux_zp.reset();
961}
962
963template<class Tvec, class TgridVec>
965
966 StackEntry stack_loc{};
967
968 using namespace shamrock::patch;
969 using namespace shamrock;
970 using namespace shammath;
971 using MergedPDat = shamrock::MergedPatchData;
972
973 using Block = typename Config::AMRBlock;
974
975 using Tscal8 = sycl::vec<Tscal, 8>;
976
977 ComputeField<Tscal8> &Q = storage.Q.get();
978
979 modules::ValueLoader<Tvec, TgridVec, Tscal8> val_load_vec8(context, solver_config, storage);
980 storage.Q_xm.set(val_load_vec8.load_value_with_gz(Q, {-1, 0, 0}, "Q_xm"));
981 storage.Q_ym.set(val_load_vec8.load_value_with_gz(Q, {0, -1, 0}, "Q_ym"));
982 storage.Q_zm.set(val_load_vec8.load_value_with_gz(Q, {0, 0, -1}, "Q_zm"));
983
984 ComputeField<Tscal8> &Q_xm = storage.Q_xm.get();
985 ComputeField<Tscal8> &Q_ym = storage.Q_ym.get();
986 ComputeField<Tscal8> &Q_zm = storage.Q_zm.get();
987
989 = shambase::get_check_ref(storage.ghost_layout.get());
990 u32 irho_interf = ghost_layout.get_field_idx<Tscal>("rho");
991 u32 ieint_interf = ghost_layout.get_field_idx<Tscal>("eint");
992 u32 ivel_interf = ghost_layout.get_field_idx<Tvec>("vel");
993
994 scheduler().for_each_patchdata_nonempty([&](Patch p, PatchDataLayer &pdat) {
995 MergedPDat &mpdat = storage.merged_patchdata_ghost.get().get(p.id_patch);
996
997 sham::DeviceBuffer<Tscal> &buf_rho = mpdat.pdat.get_field_buf_ref<Tscal>(irho_interf);
998 sham::DeviceBuffer<Tscal> &buf_eint = mpdat.pdat.get_field_buf_ref<Tscal>(ieint_interf);
999 sham::DeviceBuffer<Tvec> &buf_vel = mpdat.pdat.get_field_buf_ref<Tvec>(ivel_interf);
1000
1001 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q = Q.get_buf_check(p.id_patch);
1002 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_xm = Q_xm.get_buf_check(p.id_patch);
1003 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_ym = Q_ym.get_buf_check(p.id_patch);
1004 sham::DeviceBuffer<sycl::vec<Tscal, 8>> &buf_Q_zm = Q_zm.get_buf_check(p.id_patch);
1005
1006 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
1007
1008 sham::EventList depends_list;
1009 auto Q = buf_Q.get_read_access(depends_list);
1010 auto Q_xm = buf_Q_xm.get_read_access(depends_list);
1011 auto Q_ym = buf_Q_ym.get_read_access(depends_list);
1012 auto Q_zm = buf_Q_zm.get_read_access(depends_list);
1013
1014 auto rrho = buf_rho.get_write_access(depends_list);
1015 auto reint = buf_eint.get_write_access(depends_list);
1016 auto rvel = buf_vel.get_write_access(depends_list);
1017
1018 auto e = q.submit(depends_list, [&](sycl::handler &cgh) {
1019 Block::for_each_cells(
1020 cgh, mpdat.total_elements, "compite a_z", [=](u32 block_id, u32 cell_gid) {
1021 Tscal8 Q_i_j_k = Q[cell_gid];
1022 Tscal8 Q_im1_j_k = Q_xm[cell_gid];
1023 Tscal8 Q_i_jm1_k = Q_ym[cell_gid];
1024 Tscal8 Q_i_j_km1 = Q_zm[cell_gid];
1025
1026 Tscal rho = Q_i_j_k.s0();
1027
1028 Tscal vx = (Q_i_j_k.s1() + Q_im1_j_k.s4()) / (Q_i_j_k.s0() + Q_im1_j_k.s0());
1029 Tscal vy = (Q_i_j_k.s2() + Q_i_jm1_k.s5()) / (Q_i_j_k.s0() + Q_i_jm1_k.s0());
1030 Tscal vz = (Q_i_j_k.s3() + Q_i_j_km1.s6()) / (Q_i_j_k.s0() + Q_i_j_km1.s0());
1031
1032 Tscal e = Q_i_j_k.s7();
1033
1034 rrho[cell_gid] = rho;
1035 reint[cell_gid] = e;
1036 rvel[cell_gid] = {vx, vy, vz};
1037 });
1038 });
1039
1040 buf_Q.complete_event_state(e);
1041 buf_Q_xm.complete_event_state(e);
1042 buf_Q_ym.complete_event_state(e);
1043 buf_Q_zm.complete_event_state(e);
1044
1045 buf_rho.complete_event_state(e);
1046 buf_eint.complete_event_state(e);
1047 buf_vel.complete_event_state(e);
1048 });
1049}
1050
std::uint32_t u32
32 bit unsigned integer
std::uint64_t u64
64 bit unsigned integer
A buffer allocated in USM (Unified Shared Memory)
void complete_event_state(sycl::event e) const
Complete the event state of the buffer.
T * get_write_access(sham::EventList &depends_list, SourceLocation src_loc=SourceLocation{})
Get a read-write pointer to the buffer's data.
const T * get_read_access(sham::EventList &depends_list, SourceLocation src_loc=SourceLocation{}) const
Get a read-only pointer to the buffer's data.
A SYCL queue associated with a device and a context.
sycl::event submit(Fct &&fct)
Submits a kernel to the SYCL queue.
DeviceQueue & get_queue(u32 id=0)
Get a reference to a DeviceQueue.
Class to manage a list of SYCL events.
Definition EventList.hpp:31
void compute_cell_centered_momentas()
Compute face momentas ( eq 48 49)
u32 get_field_idx(const std::string &field_name) const
Get the field id if matching name & type.
PatchDataLayer container class, the layout is described in patchdata_layout.
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...
Definition memory.hpp:110
namespace for math utility
Definition AABB.hpp:26
namespace for the main framework
Definition __init__.py:1
Math header to compute slope limiters.
utility class to handle AMR blocks
Definition AMRBlock.hpp:35
Patch object that contain generic patch information.
Definition Patch.hpp:33