Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
CartesianRender.cpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
20#include "shammath/AABB.hpp"
25
27
28 template<class Tvec>
29 sham::DeviceBuffer<Tvec> pixel_to_positions(
30 Tvec center, Tvec delta_x, Tvec delta_y, u32 nx, u32 ny) {
31
32 sham::DeviceBuffer<Tvec> ret{nx * ny, shamsys::instance::get_compute_scheduler_ptr()};
33
34 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
35
37 q, sham::MultiRef{}, sham::MultiRef{ret}, nx * ny, [=](u32 gid, Tvec *position) {
38 u32 ix = gid % nx;
39 u32 iy = gid / nx;
40 f64 fx = ((f64(ix) + 0.5) / nx) - 0.5;
41 f64 fy = ((f64(iy) + 0.5) / ny) - 0.5;
42 position[gid] = center + delta_x * fx + delta_y * fy;
43 });
44
45 return ret;
46 }
47
48 template<class Tvec>
49 sham::DeviceBuffer<shammath::Ray<Tvec>> pixel_to_orthographic_rays(
50 Tvec center, Tvec delta_x, Tvec delta_y, u32 nx, u32 ny) {
51
52 using Tscal = shambase::VecComponent<Tvec>;
53
55 nx * ny, shamsys::instance::get_compute_scheduler_ptr()};
56
57 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
58
59 Tvec e_z = sycl::cross(delta_x, delta_y);
60 Tscal len = sycl::length(e_z);
61 if (!(len > 0)) {
63 "The cross product of delta_x and delta_y is zero\n"
64 " args :"
65 " center = {}\n"
66 " delta_x = {}\n"
67 " delta_y = {}\n"
68 " nx = {}\n"
69 " ny = {}\n"
70 " -> e_z = {}\n",
71 center,
72 delta_x,
73 delta_y,
74 nx,
75 ny,
76 e_z));
77 }
78 e_z /= len;
79
81 q,
83 sham::MultiRef{ret},
84 nx * ny,
85 [=](u32 gid, shammath::Ray<Tvec> *ray) {
86 u32 ix = gid % nx;
87 u32 iy = gid / nx;
88 f64 fx = ((f64(ix) + 0.5) / nx) - 0.5;
89 f64 fy = ((f64(iy) + 0.5) / ny) - 0.5;
90 Tvec pos_render = center + delta_x * fx + delta_y * fy;
91
92 ray[gid] = shammath::Ray<Tvec>(pos_render, e_z);
93 });
94
95 return ret;
96 }
97
98 template<class Tvec, class Tfield, template<class> class SPHKernel>
99 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_slice(
100 std::string field_name,
101 const sham::DeviceBuffer<Tvec> &positions,
102 std::optional<std::function<py::array_t<Tfield>(size_t, pybind11::dict &)>> custom_getter)
104
105 if (shamcomm::world_rank() == 0) {
106 logger::info_ln(
107 "sph::CartesianRender",
108 shambase::format(
109 "compute_slice field_name: {}, positions count: {}",
110 field_name,
111 positions.get_size()));
112 }
113
115 t.start();
116
117 auto ret = RenderFieldGetter<Tvec, Tfield, SPHKernel>(context, solver_config, storage)
118 .runner_function(
119 field_name,
120 [&](auto field_getter) -> sham::DeviceBuffer<Tfield> {
121 return compute_slice(field_getter, positions);
122 },
123 custom_getter);
124
125 t.end();
126 if (shamcomm::world_rank() == 0) {
127 logger::info_ln(
128 "sph::CartesianRender",
129 shambase::format("compute_slice took {}", t.get_time_str()));
130 }
131
132 return ret;
133 }
134
135 template<class Tvec, class Tfield, template<class> class SPHKernel>
136 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_column_integ(
137 std::string field_name,
139 std::optional<std::function<py::array_t<Tfield>(size_t, pybind11::dict &)>> custom_getter)
141
142 if (shamcomm::world_rank() == 0) {
143 logger::info_ln(
144 "sph::CartesianRender",
145 shambase::format(
146 "compute_column_integ field_name: {}, rays count: {}",
147 field_name,
148 rays.get_size()));
149 }
150
152 t.start();
153
154 auto ret = RenderFieldGetter<Tvec, Tfield, SPHKernel>(context, solver_config, storage)
155 .runner_function(
156 field_name,
157 [&](auto field_getter) -> sham::DeviceBuffer<Tfield> {
158 return compute_column_integ(field_getter, rays);
159 },
160 custom_getter);
161
162 t.end();
163 if (shamcomm::world_rank() == 0) {
164 logger::info_ln(
165 "sph::CartesianRender",
166 shambase::format("compute_column_integ took {}", t.get_time_str()));
167 }
168
169 return ret;
170 }
171
172 template<class Tvec, class Tfield, template<class> class SPHKernel>
173 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_azymuthal_integ(
174 std::string field_name,
176 std::optional<std::function<py::array_t<Tfield>(size_t, pybind11::dict &)>> custom_getter)
178
179 if (shamcomm::world_rank() == 0) {
180 logger::info_ln(
181 "sph::CartesianRender",
182 shambase::format(
183 "compute_azymuthal_integ field_name: {}, ring_rays count: {}",
184 field_name,
185 ring_rays.get_size()));
186 }
187
189 t.start();
190
191 auto ret = RenderFieldGetter<Tvec, Tfield, SPHKernel>(context, solver_config, storage)
192 .runner_function(
193 field_name,
194 [&](auto field_getter) -> sham::DeviceBuffer<Tfield> {
195 return compute_azymuthal_integ(field_getter, ring_rays);
196 },
197 custom_getter);
198
199 t.end();
200 if (shamcomm::world_rank() == 0) {
201 logger::info_ln(
202 "sph::CartesianRender",
203 shambase::format("compute_azymuthal_integ took {}", t.get_time_str()));
204 }
205
206 return ret;
207 }
208
209 template<class Tvec, class Tfield, template<class> class SPHKernel>
210 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_slice(
211 std::function<field_getter_t> field_getter, const sham::DeviceBuffer<Tvec> &positions)
213
215 positions.get_size(), shamsys::instance::get_compute_scheduler_ptr()};
217
218 using u_morton = u32;
219 using RTree = RadixTree<u_morton, Tvec>;
220
222 = scheduler().get_sim_box().template get_patch_transform<Tvec>();
223
224 scheduler().for_each_patchdata_nonempty([&](const shamrock::patch::Patch cur_p,
226 shammath::CoordRange<Tvec> box = transf.to_obj_coord(cur_p);
227
228 PatchDataField<Tvec> &main_field = pdat.get_field<Tvec>(0);
229
230 auto &buf_xyz = pdat.get_field<Tvec>(0).get_buf();
231 auto &buf_hpart
232 = pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf();
233
234 auto &buf_field_to_render = field_getter(cur_p, pdat);
235
236 u32 obj_cnt = main_field.get_obj_cnt();
237
238 RTree tree(
239 shamsys::instance::get_compute_scheduler_ptr(),
240 {box.lower, box.upper},
241 buf_xyz,
242 obj_cnt,
243 solver_config.tree_reduction_level);
244
245 tree.compute_cell_ibounding_box(shamsys::instance::get_compute_queue());
246 tree.convert_bounding_box(shamsys::instance::get_compute_queue());
247
248 RadixTreeField<Tscal> hmax_tree = tree.compute_int_boxes(
249 shamsys::instance::get_compute_queue(),
250 pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf(),
251 1);
252
253 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
254
255 sham::EventList depends_list;
256 Tfield *render_field = ret.get_write_access(depends_list);
257
258 const Tvec *pixel_positions = positions.get_read_access(depends_list);
259
260 auto xyz = buf_xyz.get_read_access(depends_list);
261 auto hpart = buf_hpart.get_read_access(depends_list);
262 auto torender = buf_field_to_render.get_read_access(depends_list);
263
264 sycl::event e2 = q.submit(depends_list, [&, render_field](sycl::handler &cgh) {
265 shamrock::tree::ObjectIterator particle_looper(tree, cgh);
266
267 sycl::accessor hmax{
268 shambase::get_check_ref(hmax_tree.radix_tree_field_buf), cgh, sycl::read_only};
269
270 constexpr Tscal Rker2 = Kernel::Rkern * Kernel::Rkern;
271
272 Tscal partmass = solver_config.gpart_mass;
273
274 shambase::parallel_for(
275 cgh, positions.get_size(), "compute slice render", [=](u32 gid) {
276 Tvec pos_render = pixel_positions[gid];
277
278 Tfield ret = sham::VectorProperties<Tfield>::get_zero();
279
280 particle_looper.rtree_for(
281 [&](u32 node_id, Tvec bmin, Tvec bmax) -> bool {
282 Tscal rint_cell = hmax[node_id] * Kernel::Rkern;
283
284 auto interbox
285 = shammath::CoordRange<Tvec>{bmin, bmax}.expand_all(rint_cell);
286
287 return interbox.contain_pos(pos_render);
288 },
289 [&](u32 id_b) {
290 Tvec dr = pos_render - xyz[id_b];
291 Tscal rab2 = sycl::dot(dr, dr);
292 Tscal h_b = hpart[id_b];
293
294 if (rab2 > h_b * h_b * Rker2) {
295 return;
296 }
297
298 Tscal rab = sycl::sqrt(rab2);
299
300 Tfield val = torender[id_b];
301
302 Tscal rho_b = shamrock::sph::rho_h(partmass, h_b, Kernel::hfactd);
303
304 ret += partmass * val * Kernel::W_3d(rab, h_b) / rho_b;
305 });
306
307 render_field[gid] += ret;
308 });
309 });
310
311 buf_xyz.complete_event_state(e2);
312 buf_hpart.complete_event_state(e2);
313 buf_field_to_render.complete_event_state(e2);
314 ret.complete_event_state(e2);
315 positions.complete_event_state(e2);
316 });
317
318 shamalgs::collective::reduce_buffer_in_place_sum(ret, MPI_COMM_WORLD);
319
320 return ret;
321 }
322
323 template<class Tvec, class Tfield, template<class> class SPHKernel>
324 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_column_integ(
325 std::function<field_getter_t> field_getter,
327
329 rays.get_size(), shamsys::instance::get_compute_scheduler_ptr()};
331
332 using u_morton = u32;
333 using RTree = RadixTree<u_morton, Tvec>;
334
336 = scheduler().get_sim_box().template get_patch_transform<Tvec>();
337
338 scheduler().for_each_patchdata_nonempty([&](const shamrock::patch::Patch cur_p,
340 shammath::CoordRange<Tvec> box = transf.to_obj_coord(cur_p);
341
342 PatchDataField<Tvec> &main_field = pdat.get_field<Tvec>(0);
343
344 auto &buf_xyz = pdat.get_field<Tvec>(0).get_buf();
345 auto &buf_hpart
346 = pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf();
347
348 auto &buf_field_to_render = field_getter(cur_p, pdat);
349
350 u32 obj_cnt = main_field.get_obj_cnt();
351
352 RTree tree(
353 shamsys::instance::get_compute_scheduler_ptr(),
354 {box.lower, box.upper},
355 buf_xyz,
356 obj_cnt,
357 solver_config.tree_reduction_level);
358
359 tree.compute_cell_ibounding_box(shamsys::instance::get_compute_queue());
360 tree.convert_bounding_box(shamsys::instance::get_compute_queue());
361
362 RadixTreeField<Tscal> hmax_tree = tree.compute_int_boxes(
363 shamsys::instance::get_compute_queue(),
364 pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf(),
365 1);
366
367 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
368
369 sham::EventList depends_list;
370 Tfield *render_field = ret.get_write_access(depends_list);
371
372 const shammath::Ray<Tvec> *image_rays = rays.get_read_access(depends_list);
373
374 auto xyz = buf_xyz.get_read_access(depends_list);
375 auto hpart = buf_hpart.get_read_access(depends_list);
376 auto torender = buf_field_to_render.get_read_access(depends_list);
377
378 sycl::event e2 = q.submit(depends_list, [&, render_field](sycl::handler &cgh) {
379 shamrock::tree::ObjectIterator particle_looper(tree, cgh);
380
381 sycl::accessor hmax{
382 shambase::get_check_ref(hmax_tree.radix_tree_field_buf), cgh, sycl::read_only};
383
384 constexpr Tscal Rker2 = Kernel::Rkern * Kernel::Rkern;
385
386 Tscal partmass = solver_config.gpart_mass;
387
388 shambase::parallel_for(cgh, rays.get_size(), "compute slice render", [=](u32 gid) {
389 Tfield ret = sham::VectorProperties<Tfield>::get_zero();
390
391 shammath::Ray<Tvec> ray = image_rays[gid];
392
393 particle_looper.rtree_for(
394 [&](u32 node_id, Tvec bmin, Tvec bmax) -> bool {
395 Tscal rint_cell = hmax[node_id] * Kernel::Rkern;
396
397 auto interbox = shammath::AABB<Tvec>{bmin, bmax}.expand_all(rint_cell);
398
399 return interbox.intersect_ray(ray);
400 },
401 [&](u32 id_b) {
402 Tvec dr = ray.origin - xyz[id_b];
403
404 dr -= ray.direction * sycl::dot(dr, ray.direction);
405
406 Tscal rab2 = sycl::dot(dr, dr);
407 Tscal h_b = hpart[id_b];
408
409 if (rab2 > h_b * h_b * Rker2) {
410 return;
411 }
412
413 Tscal rab = sycl::sqrt(rab2);
414
415 Tfield val = torender[id_b];
416
417 Tscal rho_b = shamrock::sph::rho_h(partmass, h_b, Kernel::hfactd);
418
419 ret += partmass * val * Kernel::Y_3d(rab, h_b, 4) / rho_b;
420 });
421
422 render_field[gid] += ret;
423 });
424 });
425
426 buf_xyz.complete_event_state(e2);
427 buf_hpart.complete_event_state(e2);
428 buf_field_to_render.complete_event_state(e2);
429 ret.complete_event_state(e2);
430 rays.complete_event_state(e2);
431 });
432
433 shamalgs::collective::reduce_buffer_in_place_sum(ret, MPI_COMM_WORLD);
434
435 return ret;
436 }
437
438 template<class Tvec, class Tfield, template<class> class SPHKernel>
439 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_azymuthal_integ(
440 std::function<field_getter_t> field_getter,
443
445 ring_rays.get_size(), shamsys::instance::get_compute_scheduler_ptr()};
447
448 using u_morton = u32;
449 using RTree = RadixTree<u_morton, Tvec>;
450
452 = scheduler().get_sim_box().template get_patch_transform<Tvec>();
453
454 scheduler().for_each_patchdata_nonempty([&](const shamrock::patch::Patch cur_p,
456 shammath::CoordRange<Tvec> box = transf.to_obj_coord(cur_p);
457
458 PatchDataField<Tvec> &main_field = pdat.get_field<Tvec>(0);
459
460 auto &buf_xyz = pdat.get_field<Tvec>(0).get_buf();
461 auto &buf_hpart
462 = pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf();
463
464 auto &buf_field_to_render = field_getter(cur_p, pdat);
465
466 u32 obj_cnt = main_field.get_obj_cnt();
467
468 RTree tree(
469 shamsys::instance::get_compute_scheduler_ptr(),
470 {box.lower, box.upper},
471 buf_xyz,
472 obj_cnt,
473 solver_config.tree_reduction_level);
474
475 tree.compute_cell_ibounding_box(shamsys::instance::get_compute_queue());
476 tree.convert_bounding_box(shamsys::instance::get_compute_queue());
477
478 RadixTreeField<Tscal> hmax_tree = tree.compute_int_boxes(
479 shamsys::instance::get_compute_queue(),
480 pdat.get_field<Tscal>(pdat.pdl().get_field_idx<Tscal>("hpart")).get_buf(),
481 1);
482
483 sham::DeviceQueue &q = shamsys::instance::get_compute_scheduler().get_queue();
484
485 sham::EventList depends_list;
486 Tfield *render_field = ret.get_write_access(depends_list);
487
488 const shammath::RingRay<Tvec> *ring_rays_ptr = ring_rays.get_read_access(depends_list);
489
490 auto xyz = buf_xyz.get_read_access(depends_list);
491 auto hpart = buf_hpart.get_read_access(depends_list);
492 auto torender = buf_field_to_render.get_read_access(depends_list);
493
494 sycl::event e2 = q.submit(depends_list, [&, render_field](sycl::handler &cgh) {
495 shamrock::tree::ObjectIterator particle_looper(tree, cgh);
496
497 sycl::accessor hmax{
498 shambase::get_check_ref(hmax_tree.radix_tree_field_buf), cgh, sycl::read_only};
499
500 constexpr Tscal Rker2 = Kernel::Rkern * Kernel::Rkern;
501
502 Tscal partmass = solver_config.gpart_mass;
503
504 shambase::parallel_for(
505 cgh, ring_rays.get_size(), "compute slice render", [=](u32 gid) {
506 Tfield ret = sham::VectorProperties<Tfield>::get_zero();
507
508 shammath::RingRay<Tvec> ring_ray = ring_rays_ptr[gid];
509 Tvec ez = ring_ray.get_ez();
510
511 particle_looper.rtree_for(
512 [&](u32 node_id, Tvec bmin, Tvec bmax) -> bool {
513 Tscal rint_cell = hmax[node_id] * Kernel::Rkern;
514
515 auto interbox
516 = shammath::AABB<Tvec>{bmin, bmax}.expand_all(rint_cell);
517
518 return interbox.intersect_ring_ray_approx(ring_ray);
519 },
520 [&](u32 id_b) {
521 Tvec r_center = ring_ray.center - xyz[id_b];
522
523 Tscal z_val = sycl::dot(r_center, ez);
524 Tscal x_val = sycl::dot(r_center, ring_ray.e_x);
525 Tscal y_val = sycl::dot(r_center, ring_ray.e_y);
526 Tscal r_val = sycl::sqrt(x_val * x_val + y_val * y_val);
527
528 Tscal delta_r = r_val - ring_ray.radius;
529
530 Tscal rab2_ring = z_val * z_val + delta_r * delta_r;
531 Tscal h_b = hpart[id_b];
532
533 if (rab2_ring > h_b * h_b * Rker2) {
534 return;
535 }
536
537 Tscal rab = sycl::sqrt(rab2_ring);
538
539 Tfield val = torender[id_b];
540
541 Tscal rho_b = shamrock::sph::rho_h(partmass, h_b, Kernel::hfactd);
542
543 // TODO: account for curvature
544 ret += partmass * val * Kernel::Y_3d(rab, h_b, 4) / rho_b;
545 });
546
547 render_field[gid] += ret;
548 });
549 });
550
551 buf_xyz.complete_event_state(e2);
552 buf_hpart.complete_event_state(e2);
553 buf_field_to_render.complete_event_state(e2);
554 ret.complete_event_state(e2);
555 ring_rays.complete_event_state(e2);
556 });
557
558 shamalgs::collective::reduce_buffer_in_place_sum(ret, MPI_COMM_WORLD);
559
560 return ret;
561 }
562
563 template<class Tvec, class Tfield, template<class> class SPHKernel>
564 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_slice(
565 std::function<field_getter_t> field_getter,
566 Tvec center,
567 Tvec delta_x,
568 Tvec delta_y,
569 u32 nx,
571
572 auto positions = pixel_to_positions(center, delta_x, delta_y, nx, ny);
573
574 return compute_slice(field_getter, positions);
575 }
576
577 template<class Tvec, class Tfield, template<class> class SPHKernel>
578 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_column_integ(
579 std::function<field_getter_t> field_getter,
580 Tvec center,
581 Tvec delta_x,
582 Tvec delta_y,
583 u32 nx,
585
586 auto rays = pixel_to_orthographic_rays(center, delta_x, delta_y, nx, ny);
587
588 return compute_column_integ(field_getter, rays);
589 }
590
591 template<class Tvec, class Tfield, template<class> class SPHKernel>
592 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_slice(
593 std::string field_name,
594 Tvec center,
595 Tvec delta_x,
596 Tvec delta_y,
597 u32 nx,
598 u32 ny,
599 std::optional<std::function<pybind11::array_t<Tfield>(size_t, pybind11::dict &)>>
600 custom_getter) -> sham::DeviceBuffer<Tfield> {
601 auto positions = pixel_to_positions(center, delta_x, delta_y, nx, ny);
602 return compute_slice(field_name, positions, custom_getter);
603 }
604
605 template<class Tvec, class Tfield, template<class> class SPHKernel>
606 auto CartesianRender<Tvec, Tfield, SPHKernel>::compute_column_integ(
607 std::string field_name,
608 Tvec center,
609 Tvec delta_x,
610 Tvec delta_y,
611 u32 nx,
612 u32 ny,
613 std::optional<std::function<pybind11::array_t<Tfield>(size_t, pybind11::dict &)>>
614 custom_getter) -> sham::DeviceBuffer<Tfield> {
615 auto rays = pixel_to_orthographic_rays(center, delta_x, delta_y, nx, ny);
616 return compute_column_integ(field_name, rays, custom_getter);
617 }
618
619} // namespace shammodels::sph::modules
620
621using namespace shammath;
625
629
633
constexpr const char * xyz
Position field (3D coordinates)
constexpr const char * hpart
Smoothing length field.
double f64
Alias for double.
std::uint32_t u32
32 bit unsigned integer
The radix tree.
Definition RadixTree.hpp:50
A buffer allocated in USM (Unified Shared Memory)
void complete_event_state(sycl::event e) const
Complete the event state of the buffer.
size_t get_size() const
Gets the number of elements in the buffer.
const T * get_read_access(sham::EventList &depends_list, SourceLocation src_loc=SourceLocation{}) const
Get a read-only pointer to the buffer's data.
A SYCL queue associated with a device and a context.
sycl::event submit(Fct &&fct)
Submits a kernel to the SYCL queue.
DeviceQueue & get_queue(u32 id=0)
Get a reference to a DeviceQueue.
Class to manage a list of SYCL events.
Definition EventList.hpp:31
Class Timer measures the time elapsed since the timer was started.
Definition time.hpp:96
std::string get_time_str() const
Converts the stored nanosecond time to a string representation.
Definition time.hpp:117
void end()
Stops the timer and stores the elapsed time in nanoseconds.
Definition time.hpp:111
void start()
Starts the timer.
Definition time.hpp:106
u32 get_field_idx(const std::string &field_name) const
Get the field id if matching name & type.
PatchDataLayer container class, the layout is described in patchdata_layout.
This header file contains utility functions related to exception handling in the code.
void kernel_call(sham::DeviceQueue &q, RefIn in, RefOut in_out, u32 n, Functor &&func, SourceLocation &&callsite=SourceLocation{})
Submit a kernel to a SYCL queue.
void throw_with_loc(std::string message, SourceLocation loc=SourceLocation{})
Throw an exception and append the source location to it.
T & get_check_ref(const std::unique_ptr< T > &ptr, SourceLocation loc=SourceLocation())
Takes a std::unique_ptr and returns a reference to the object it holds. It throws a std::runtime_erro...
Definition memory.hpp:110
i32 world_rank()
Gives the rank of the current process in the MPI communicator.
Definition worldInfo.cpp:40
namespace for math utility
Definition AABB.hpp:26
namespace for the sph model modules
A class that references multiple buffers or similar objects.
Ray representation for intersection testing.
Definition AABB.hpp:34
Ring ray representation for intersection testing.
Definition AABB.hpp:67
Patch object that contain generic patch information.
Definition Patch.hpp:33