Shamrock 2025.10.0
Astrophysical Code
Loading...
Searching...
No Matches
symtensors.hpp
Go to the documentation of this file.
1// -------------------------------------------------------//
2//
3// SHAMROCK code for hydrodynamics
4// Copyright (c) 2021-2026 Timothée David--Cléris <tim.shamrock@proton.me>
5// SPDX-License-Identifier: CeCILL Free Software License Agreement v2.1
6// Shamrock is licensed under the CeCILL 2.1 License, see LICENSE for more information
7//
8// -------------------------------------------------------//
9
10#pragma once
11
19
20namespace shammath {
21
22 template<class T>
24
25 static constexpr u32 compo_cnt = 3;
26
27 T v_0;
28 T v_1;
29 T v_2;
30
31 inline T inner(const SymTensor3d_1 &t) const {
32 return v_0 * t.v_0 + v_1 * t.v_1 + v_2 * t.v_2;
33 }
34
35 inline SymTensor3d_1 inner(const T scal) const {
36 return SymTensor3d_1<T>{v_0 * scal, v_1 * scal, v_2 * scal};
37 }
38
39 inline SymTensor3d_1 &operator*=(const T scal) {
40
41 v_0 *= scal;
42 v_1 *= scal;
43 v_2 *= scal;
44
45 return *this;
46 }
47
48 SymTensor3d_1 operator*(const T &scal) const {
49 return SymTensor3d_1<T>{
50 v_0 * scal,
51 v_1 * scal,
52 v_2 * scal,
53 };
54 }
55
56 inline SymTensor3d_1 &operator+=(const SymTensor3d_1 other) {
57
58 v_0 += other.v_0;
59 v_1 += other.v_1;
60 v_2 += other.v_2;
61
62 return *this;
63 }
64
65 SymTensor3d_1 operator+(const SymTensor3d_1 &t2) const {
66 return SymTensor3d_1<T>{v_0 + t2.v_0, v_1 + t2.v_1, v_2 + t2.v_2};
67 }
68
69 SymTensor3d_1 operator-(const SymTensor3d_1 &t2) const {
70 return SymTensor3d_1<T>{v_0 - t2.v_0, v_1 - t2.v_1, v_2 - t2.v_2};
71 }
72
73 template<class Tacc>
74 inline void store(Tacc &&acc, u32 offset) const {
75 acc[offset + 0] = v_0;
76 acc[offset + 1] = v_1;
77 acc[offset + 2] = v_2;
78 }
79
80 template<class Tacc>
81 inline static SymTensor3d_1 load(Tacc &&acc, u32 offset) {
82 return SymTensor3d_1{
83 acc[offset + 0],
84 acc[offset + 1],
85 acc[offset + 2],
86 };
87 }
88 };
89
90 template<class T>
92
93 static constexpr u32 compo_cnt = 6;
94
95 T v_00;
96 T v_01;
97 T v_02;
98 T v_11;
99 T v_12;
100 T v_22;
101
102 inline T inner(const SymTensor3d_2 &t) const {
103 return v_00 * t.v_00 + 2 * v_01 * t.v_01 + 2 * v_02 * t.v_02 + v_11 * t.v_11
104 + 2 * v_12 * t.v_12 + v_22 * t.v_22;
105 }
106
107 inline SymTensor3d_1<T> inner(const SymTensor3d_1<T> &t) const {
108 return SymTensor3d_1<T>{
109 v_00 * t.v_0 + v_01 * t.v_1 + v_02 * t.v_2,
110 v_01 * t.v_0 + v_11 * t.v_1 + v_12 * t.v_2,
111 v_02 * t.v_0 + v_12 * t.v_1 + v_22 * t.v_2};
112 }
113
114 inline SymTensor3d_2 inner(const T scal) const {
115 return SymTensor3d_2<T>{
116 v_00 * scal, v_01 * scal, v_02 * scal, v_11 * scal, v_12 * scal, v_22 * scal};
117 }
118
119 inline SymTensor3d_2 &operator*=(const T scal) {
120
121 v_00 *= scal;
122 v_01 *= scal;
123 v_02 *= scal;
124 v_11 *= scal;
125 v_12 *= scal;
126 v_22 *= scal;
127
128 return *this;
129 }
130
131 SymTensor3d_2 operator*(const T &scal) const {
132 return SymTensor3d_2<T>{
133 v_00 * scal, v_01 * scal, v_02 * scal, v_11 * scal, v_12 * scal, v_22 * scal};
134 }
135
136 inline SymTensor3d_2 &operator+=(const SymTensor3d_2 other) {
137
138 v_00 += other.v_00;
139 v_01 += other.v_01;
140 v_02 += other.v_02;
141 v_11 += other.v_11;
142 v_12 += other.v_12;
143 v_22 += other.v_22;
144
145 return *this;
146 }
147
148 SymTensor3d_2 operator+(const SymTensor3d_2 &t2) const {
149 return SymTensor3d_2<T>{
150 v_00 + t2.v_00,
151 v_01 + t2.v_01,
152 v_02 + t2.v_02,
153 v_11 + t2.v_11,
154 v_12 + t2.v_12,
155 v_22 + t2.v_22};
156 }
157
158 SymTensor3d_2 operator-(const SymTensor3d_2 &t2) const {
159 return SymTensor3d_2<T>{
160 v_00 - t2.v_00,
161 v_01 - t2.v_01,
162 v_02 - t2.v_02,
163 v_11 - t2.v_11,
164 v_12 - t2.v_12,
165 v_22 - t2.v_22};
166 }
167
168 template<class Tacc>
169 inline void store(Tacc &&acc, u32 offset) const {
170 acc[offset + 0] = v_00;
171 acc[offset + 1] = v_01;
172 acc[offset + 2] = v_02;
173 acc[offset + 3] = v_11;
174 acc[offset + 4] = v_12;
175 acc[offset + 5] = v_22;
176 }
177
178 template<class Tacc>
179 inline static SymTensor3d_2 load(Tacc &&acc, u32 offset) {
180 return SymTensor3d_2{
181 acc[offset + 0],
182 acc[offset + 1],
183 acc[offset + 2],
184 acc[offset + 3],
185 acc[offset + 4],
186 acc[offset + 5],
187 };
188 }
189 };
190
191 template<class T>
193
194 static constexpr u32 compo_cnt = 10;
195
196 T v_000;
197 T v_001;
198 T v_002;
199 T v_011;
200 T v_012;
201 T v_022;
202 T v_111;
203 T v_112;
204 T v_122;
205 T v_222;
206
207 inline T inner(const SymTensor3d_3 &t) const {
208 return v_000 * t.v_000 + 3 * v_001 * t.v_001 + 3 * v_002 * t.v_002 + 3 * v_011 * t.v_011
209 + 6 * v_012 * t.v_012 + 3 * v_022 * t.v_022 + v_111 * t.v_111
210 + 3 * v_112 * t.v_112 + 3 * v_122 * t.v_122 + v_222 * t.v_222;
211 }
212
213 inline SymTensor3d_1<T> inner(const SymTensor3d_2<T> &t) const {
214 return SymTensor3d_1<T>{
215 v_000 * t.v_00 + 2 * v_001 * t.v_01 + 2 * v_002 * t.v_02 + v_011 * t.v_11
216 + 2 * v_012 * t.v_12 + v_022 * t.v_22,
217 v_001 * t.v_00 + 2 * v_011 * t.v_01 + 2 * v_012 * t.v_02 + v_111 * t.v_11
218 + 2 * v_112 * t.v_12 + v_122 * t.v_22,
219 v_002 * t.v_00 + 2 * v_012 * t.v_01 + 2 * v_022 * t.v_02 + v_112 * t.v_11
220 + 2 * v_122 * t.v_12 + v_222 * t.v_22};
221 }
222
223 inline SymTensor3d_2<T> inner(const SymTensor3d_1<T> &t) const {
224
225 return SymTensor3d_2<T>{
226 v_000 * t.v_0 + v_001 * t.v_1 + v_002 * t.v_2,
227 v_001 * t.v_0 + v_011 * t.v_1 + v_012 * t.v_2,
228 v_002 * t.v_0 + v_012 * t.v_1 + v_022 * t.v_2,
229 v_011 * t.v_0 + v_111 * t.v_1 + v_112 * t.v_2,
230 v_012 * t.v_0 + v_112 * t.v_1 + v_122 * t.v_2,
231 v_022 * t.v_0 + v_122 * t.v_1 + v_222 * t.v_2};
232 }
233
234 inline SymTensor3d_3 inner(const T scal) const {
235 return SymTensor3d_3<T>{
236 v_000 * scal,
237 v_001 * scal,
238 v_002 * scal,
239 v_011 * scal,
240 v_012 * scal,
241 v_022 * scal,
242 v_111 * scal,
243 v_112 * scal,
244 v_122 * scal,
245 v_222 * scal};
246 }
247
248 inline SymTensor3d_3 &operator*=(const T scal) {
249
250 v_000 *= scal;
251 v_001 *= scal;
252 v_002 *= scal;
253 v_011 *= scal;
254 v_012 *= scal;
255 v_022 *= scal;
256 v_111 *= scal;
257 v_112 *= scal;
258 v_122 *= scal;
259 v_222 *= scal;
260
261 return *this;
262 }
263
264 SymTensor3d_3 operator*(const T &scal) const {
265 return SymTensor3d_3{
266 v_000 * scal,
267 v_001 * scal,
268 v_002 * scal,
269 v_011 * scal,
270 v_012 * scal,
271 v_022 * scal,
272 v_111 * scal,
273 v_112 * scal,
274 v_122 * scal,
275 v_222 * scal};
276 }
277
278 inline SymTensor3d_3 &operator+=(const SymTensor3d_3 other) {
279
280 v_000 += other.v_000;
281 v_001 += other.v_001;
282 v_002 += other.v_002;
283 v_011 += other.v_011;
284 v_012 += other.v_012;
285 v_022 += other.v_022;
286 v_111 += other.v_111;
287 v_112 += other.v_112;
288 v_122 += other.v_122;
289 v_222 += other.v_222;
290
291 return *this;
292 }
293
294 SymTensor3d_3 operator+(const SymTensor3d_3 &t2) const {
295 return SymTensor3d_3{
296 v_000 + t2.v_000,
297 v_001 + t2.v_001,
298 v_002 + t2.v_002,
299 v_011 + t2.v_011,
300 v_012 + t2.v_012,
301 v_022 + t2.v_022,
302 v_111 + t2.v_111,
303 v_112 + t2.v_112,
304 v_122 + t2.v_122,
305 v_222 + t2.v_222};
306 }
307
308 SymTensor3d_3 operator-(const SymTensor3d_3 &t2) const {
309 return SymTensor3d_3{
310 v_000 - t2.v_000,
311 v_001 - t2.v_001,
312 v_002 - t2.v_002,
313 v_011 - t2.v_011,
314 v_012 - t2.v_012,
315 v_022 - t2.v_022,
316 v_111 - t2.v_111,
317 v_112 - t2.v_112,
318 v_122 - t2.v_122,
319 v_222 - t2.v_222};
320 }
321
322 template<class Tacc>
323 inline void store(Tacc &&acc, u32 offset) const {
324 acc[offset + 0] = v_000;
325 acc[offset + 1] = v_001;
326 acc[offset + 2] = v_002;
327 acc[offset + 3] = v_011;
328 acc[offset + 4] = v_012;
329 acc[offset + 5] = v_022;
330 acc[offset + 6] = v_111;
331 acc[offset + 7] = v_112;
332 acc[offset + 8] = v_122;
333 acc[offset + 9] = v_222;
334 }
335
336 template<class Tacc>
337 inline static SymTensor3d_3 load(Tacc &&acc, u32 offset) {
338 return SymTensor3d_3{
339 acc[offset + 0],
340 acc[offset + 1],
341 acc[offset + 2],
342 acc[offset + 3],
343 acc[offset + 4],
344 acc[offset + 5],
345 acc[offset + 6],
346 acc[offset + 7],
347 acc[offset + 8],
348 acc[offset + 9],
349 };
350 }
351 };
352
353 template<class T>
355
356 static constexpr u32 compo_cnt = 15;
357
358 T v_0000;
359 T v_0001;
360 T v_0002;
361 T v_0011;
362 T v_0012;
363 T v_0022;
364 T v_0111;
365 T v_0112;
366 T v_0122;
367 T v_0222;
368 T v_1111;
369 T v_1112;
370 T v_1122;
371 T v_1222;
372 T v_2222;
373
374 inline T inner(const SymTensor3d_4 &t) const {
375 return v_0000 * t.v_0000 + 4 * v_0001 * t.v_0001 + 4 * v_0002 * t.v_0002
376 + 6 * v_0011 * t.v_0011 + 12 * v_0012 * t.v_0012 + 6 * v_0022 * t.v_0022
377 + 4 * v_0111 * t.v_0111 + 12 * v_0112 * t.v_0112 + 12 * v_0122 * t.v_0122
378 + 4 * v_0222 * t.v_0222 + v_1111 * t.v_1111 + 4 * v_1112 * t.v_1112
379 + 6 * v_1122 * t.v_1122 + 4 * v_1222 * t.v_1222 + v_2222 * t.v_2222;
380 }
381
382 inline SymTensor3d_1<T> inner(const SymTensor3d_3<T> &t) const {
383 return SymTensor3d_1<T>{
384 v_0000 * t.v_000 + 3 * v_0001 * t.v_001 + 3 * v_0002 * t.v_002
385 + 3 * v_0011 * t.v_011 + 6 * v_0012 * t.v_012 + 3 * v_0022 * t.v_022
386 + v_0111 * t.v_111 + 3 * v_0112 * t.v_112 + 3 * v_0122 * t.v_122
387 + v_0222 * t.v_222,
388 v_0001 * t.v_000 + 3 * v_0011 * t.v_001 + 3 * v_0012 * t.v_002
389 + 3 * v_0111 * t.v_011 + 6 * v_0112 * t.v_012 + 3 * v_0122 * t.v_022
390 + v_1111 * t.v_111 + 3 * v_1112 * t.v_112 + 3 * v_1122 * t.v_122
391 + v_1222 * t.v_222,
392 v_0002 * t.v_000 + 3 * v_0012 * t.v_001 + 3 * v_0022 * t.v_002
393 + 3 * v_0112 * t.v_011 + 6 * v_0122 * t.v_012 + 3 * v_0222 * t.v_022
394 + v_1112 * t.v_111 + 3 * v_1122 * t.v_112 + 3 * v_1222 * t.v_122
395 + v_2222 * t.v_222};
396 }
397
398 inline SymTensor3d_2<T> inner(const SymTensor3d_2<T> &t) const {
399
400 return SymTensor3d_2<T>{
401 v_0000 * t.v_00 + 2 * v_0001 * t.v_01 + 2 * v_0002 * t.v_02 + v_0011 * t.v_11
402 + 2 * v_0012 * t.v_12 + v_0022 * t.v_22,
403 v_0001 * t.v_00 + 2 * v_0011 * t.v_01 + 2 * v_0012 * t.v_02 + v_0111 * t.v_11
404 + 2 * v_0112 * t.v_12 + v_0122 * t.v_22,
405 v_0002 * t.v_00 + 2 * v_0012 * t.v_01 + 2 * v_0022 * t.v_02 + v_0112 * t.v_11
406 + 2 * v_0122 * t.v_12 + v_0222 * t.v_22,
407 v_0011 * t.v_00 + 2 * v_0111 * t.v_01 + 2 * v_0112 * t.v_02 + v_1111 * t.v_11
408 + 2 * v_1112 * t.v_12 + v_1122 * t.v_22,
409 v_0012 * t.v_00 + 2 * v_0112 * t.v_01 + 2 * v_0122 * t.v_02 + v_1112 * t.v_11
410 + 2 * v_1122 * t.v_12 + v_1222 * t.v_22,
411 v_0022 * t.v_00 + 2 * v_0122 * t.v_01 + 2 * v_0222 * t.v_02 + v_1122 * t.v_11
412 + 2 * v_1222 * t.v_12 + v_2222 * t.v_22};
413 }
414
415 inline SymTensor3d_3<T> inner(const SymTensor3d_1<T> &t) const {
416 return SymTensor3d_3<T>{
417 v_0000 * t.v_0 + v_0001 * t.v_1 + v_0002 * t.v_2,
418 v_0001 * t.v_0 + v_0011 * t.v_1 + v_0012 * t.v_2,
419 v_0002 * t.v_0 + v_0012 * t.v_1 + v_0022 * t.v_2,
420 v_0011 * t.v_0 + v_0111 * t.v_1 + v_0112 * t.v_2,
421 v_0012 * t.v_0 + v_0112 * t.v_1 + v_0122 * t.v_2,
422 v_0022 * t.v_0 + v_0122 * t.v_1 + v_0222 * t.v_2,
423 v_0111 * t.v_0 + v_1111 * t.v_1 + v_1112 * t.v_2,
424 v_0112 * t.v_0 + v_1112 * t.v_1 + v_1122 * t.v_2,
425 v_0122 * t.v_0 + v_1122 * t.v_1 + v_1222 * t.v_2,
426 v_0222 * t.v_0 + v_1222 * t.v_1 + v_2222 * t.v_2};
427 }
428
429 inline SymTensor3d_4 inner(const T scal) const {
430 return SymTensor3d_4<T>{
431 v_0000 * scal,
432 v_0001 * scal,
433 v_0002 * scal,
434 v_0011 * scal,
435 v_0012 * scal,
436 v_0022 * scal,
437 v_0111 * scal,
438 v_0112 * scal,
439 v_0122 * scal,
440 v_0222 * scal,
441 v_1111 * scal,
442 v_1112 * scal,
443 v_1122 * scal,
444 v_1222 * scal,
445 v_2222 * scal};
446 }
447
448 inline SymTensor3d_4 &operator*=(const T scal) {
449
450 v_0000 *= scal;
451 v_0001 *= scal;
452 v_0002 *= scal;
453 v_0011 *= scal;
454 v_0012 *= scal;
455 v_0022 *= scal;
456 v_0111 *= scal;
457 v_0112 *= scal;
458 v_0122 *= scal;
459 v_0222 *= scal;
460 v_1111 *= scal;
461 v_1112 *= scal;
462 v_1122 *= scal;
463 v_1222 *= scal;
464 v_2222 *= scal;
465
466 return *this;
467 }
468
469 SymTensor3d_4 operator*(const T &scal) const {
470 return SymTensor3d_4{
471 v_0000 * scal,
472 v_0001 * scal,
473 v_0002 * scal,
474 v_0011 * scal,
475 v_0012 * scal,
476 v_0022 * scal,
477 v_0111 * scal,
478 v_0112 * scal,
479 v_0122 * scal,
480 v_0222 * scal,
481 v_1111 * scal,
482 v_1112 * scal,
483 v_1122 * scal,
484 v_1222 * scal,
485 v_2222 * scal};
486 }
487
488 inline SymTensor3d_4 &operator+=(const SymTensor3d_4 other) {
489
490 v_0000 += other.v_0000;
491 v_0001 += other.v_0001;
492 v_0002 += other.v_0002;
493 v_0011 += other.v_0011;
494 v_0012 += other.v_0012;
495 v_0022 += other.v_0022;
496 v_0111 += other.v_0111;
497 v_0112 += other.v_0112;
498 v_0122 += other.v_0122;
499 v_0222 += other.v_0222;
500 v_1111 += other.v_1111;
501 v_1112 += other.v_1112;
502 v_1122 += other.v_1122;
503 v_1222 += other.v_1222;
504 v_2222 += other.v_2222;
505
506 return *this;
507 }
508
509 SymTensor3d_4 operator+(const SymTensor3d_4 &t2) const {
510 return SymTensor3d_4{
511 v_0000 + t2.v_0000,
512 v_0001 + t2.v_0001,
513 v_0002 + t2.v_0002,
514 v_0011 + t2.v_0011,
515 v_0012 + t2.v_0012,
516 v_0022 + t2.v_0022,
517 v_0111 + t2.v_0111,
518 v_0112 + t2.v_0112,
519 v_0122 + t2.v_0122,
520 v_0222 + t2.v_0222,
521 v_1111 + t2.v_1111,
522 v_1112 + t2.v_1112,
523 v_1122 + t2.v_1122,
524 v_1222 + t2.v_1222,
525 v_2222 + t2.v_2222};
526 }
527
528 SymTensor3d_4 operator-(const SymTensor3d_4 &t2) const {
529 return SymTensor3d_4{
530 v_0000 - t2.v_0000,
531 v_0001 - t2.v_0001,
532 v_0002 - t2.v_0002,
533 v_0011 - t2.v_0011,
534 v_0012 - t2.v_0012,
535 v_0022 - t2.v_0022,
536 v_0111 - t2.v_0111,
537 v_0112 - t2.v_0112,
538 v_0122 - t2.v_0122,
539 v_0222 - t2.v_0222,
540 v_1111 - t2.v_1111,
541 v_1112 - t2.v_1112,
542 v_1122 - t2.v_1122,
543 v_1222 - t2.v_1222,
544 v_2222 - t2.v_2222};
545 }
546
547 template<class Tacc>
548 inline void store(Tacc &&acc, u32 offset) const {
549 acc[offset + 0] = v_0000;
550 acc[offset + 1] = v_0001;
551 acc[offset + 2] = v_0002;
552 acc[offset + 3] = v_0011;
553 acc[offset + 4] = v_0012;
554 acc[offset + 5] = v_0022;
555 acc[offset + 6] = v_0111;
556 acc[offset + 7] = v_0112;
557 acc[offset + 8] = v_0122;
558 acc[offset + 9] = v_0222;
559 acc[offset + 10] = v_1111;
560 acc[offset + 11] = v_1112;
561 acc[offset + 12] = v_1122;
562 acc[offset + 13] = v_1222;
563 acc[offset + 14] = v_2222;
564 }
565
566 template<class Tacc>
567 inline static SymTensor3d_4 load(Tacc &&acc, u32 offset) {
568 return SymTensor3d_4<T>{
569 acc[offset + 0],
570 acc[offset + 1],
571 acc[offset + 2],
572 acc[offset + 3],
573 acc[offset + 4],
574 acc[offset + 5],
575 acc[offset + 6],
576 acc[offset + 7],
577 acc[offset + 8],
578 acc[offset + 9],
579 acc[offset + 10],
580 acc[offset + 11],
581 acc[offset + 12],
582 acc[offset + 13],
583 acc[offset + 14],
584 };
585 }
586 };
587
588 template<class T>
590
591 static constexpr u32 compo_cnt = 21;
592
593 T v_00000;
594 T v_00001;
595 T v_00002;
596 T v_00011;
597 T v_00012;
598 T v_00022;
599 T v_00111;
600 T v_00112;
601 T v_00122;
602 T v_00222;
603 T v_01111;
604 T v_01112;
605 T v_01122;
606 T v_01222;
607 T v_02222;
608 T v_11111;
609 T v_11112;
610 T v_11122;
611 T v_11222;
612 T v_12222;
613 T v_22222;
614
615 inline T inner(const SymTensor3d_5 &t) const {
616 return v_00000 * t.v_00000 + 5 * v_00001 * t.v_00001 + 5 * v_00002 * t.v_00002
617 + 10 * v_00011 * t.v_00011 + 20 * v_00012 * t.v_00012 + 10 * v_00022 * t.v_00022
618 + 10 * v_00111 * t.v_00111 + 30 * v_00112 * t.v_00112 + 30 * v_00122 * t.v_00122
619 + 10 * v_00222 * t.v_00222 + 5 * v_01111 * t.v_01111 + 20 * v_01112 * t.v_01112
620 + 30 * v_01122 * t.v_01122 + 20 * v_01222 * t.v_01222 + 5 * v_02222 * t.v_02222
621 + v_11111 * t.v_11111
622 + 5
623 * (v_11112 * t.v_11112 + 2 * v_11122 * t.v_11122 + 2 * v_11222 * t.v_11222
624 + v_12222 * t.v_12222)
625 + v_22222 * t.v_22222;
626 }
627
628 inline SymTensor3d_1<T> inner(const SymTensor3d_4<T> &t) const {
629 return SymTensor3d_1<T>{
630 v_00000 * t.v_0000 + 4 * v_00001 * t.v_0001 + 4 * v_00002 * t.v_0002
631 + 6 * v_00011 * t.v_0011 + 12 * v_00012 * t.v_0012 + 6 * v_00022 * t.v_0022
632 + 4 * v_00111 * t.v_0111 + 12 * v_00112 * t.v_0112 + 12 * v_00122 * t.v_0122
633 + 4 * v_00222 * t.v_0222 + v_01111 * t.v_1111 + 4 * v_01112 * t.v_1112
634 + 6 * v_01122 * t.v_1122 + 4 * v_01222 * t.v_1222 + v_02222 * t.v_2222,
635 v_00001 * t.v_0000 + 4 * v_00011 * t.v_0001 + 4 * v_00012 * t.v_0002
636 + 6 * v_00111 * t.v_0011 + 12 * v_00112 * t.v_0012 + 6 * v_00122 * t.v_0022
637 + 4 * v_01111 * t.v_0111 + 12 * v_01112 * t.v_0112 + 12 * v_01122 * t.v_0122
638 + 4 * v_01222 * t.v_0222 + v_11111 * t.v_1111 + 4 * v_11112 * t.v_1112
639 + 6 * v_11122 * t.v_1122 + 4 * v_11222 * t.v_1222 + v_12222 * t.v_2222,
640 v_00002 * t.v_0000 + 4 * v_00012 * t.v_0001 + 4 * v_00022 * t.v_0002
641 + 6 * v_00112 * t.v_0011 + 12 * v_00122 * t.v_0012 + 6 * v_00222 * t.v_0022
642 + 4 * v_01112 * t.v_0111 + 12 * v_01122 * t.v_0112 + 12 * v_01222 * t.v_0122
643 + 4 * v_02222 * t.v_0222 + v_11112 * t.v_1111 + 4 * v_11122 * t.v_1112
644 + 6 * v_11222 * t.v_1122 + 4 * v_12222 * t.v_1222 + v_22222 * t.v_2222};
645 }
646
647 inline SymTensor3d_2<T> inner(const SymTensor3d_3<T> &t) const {
648 return SymTensor3d_2<T>{
649 v_00000 * t.v_000 + 3 * v_00001 * t.v_001 + 3 * v_00002 * t.v_002
650 + 3 * v_00011 * t.v_011 + 6 * v_00012 * t.v_012 + 3 * v_00022 * t.v_022
651 + v_00111 * t.v_111 + 3 * v_00112 * t.v_112 + 3 * v_00122 * t.v_122
652 + v_00222 * t.v_222,
653 v_00001 * t.v_000 + 3 * v_00011 * t.v_001 + 3 * v_00012 * t.v_002
654 + 3 * v_00111 * t.v_011 + 6 * v_00112 * t.v_012 + 3 * v_00122 * t.v_022
655 + v_01111 * t.v_111 + 3 * v_01112 * t.v_112 + 3 * v_01122 * t.v_122
656 + v_01222 * t.v_222,
657 v_00002 * t.v_000 + 3 * v_00012 * t.v_001 + 3 * v_00022 * t.v_002
658 + 3 * v_00112 * t.v_011 + 6 * v_00122 * t.v_012 + 3 * v_00222 * t.v_022
659 + v_01112 * t.v_111 + 3 * v_01122 * t.v_112 + 3 * v_01222 * t.v_122
660 + v_02222 * t.v_222,
661 v_00011 * t.v_000 + 3 * v_00111 * t.v_001 + 3 * v_00112 * t.v_002
662 + 3 * v_01111 * t.v_011 + 6 * v_01112 * t.v_012 + 3 * v_01122 * t.v_022
663 + v_11111 * t.v_111 + 3 * v_11112 * t.v_112 + 3 * v_11122 * t.v_122
664 + v_11222 * t.v_222,
665 v_00012 * t.v_000 + 3 * v_00112 * t.v_001 + 3 * v_00122 * t.v_002
666 + 3 * v_01112 * t.v_011 + 6 * v_01122 * t.v_012 + 3 * v_01222 * t.v_022
667 + v_11112 * t.v_111 + 3 * v_11122 * t.v_112 + 3 * v_11222 * t.v_122
668 + v_12222 * t.v_222,
669 v_00022 * t.v_000 + 3 * v_00122 * t.v_001 + 3 * v_00222 * t.v_002
670 + 3 * v_01122 * t.v_011 + 6 * v_01222 * t.v_012 + 3 * v_02222 * t.v_022
671 + v_11122 * t.v_111 + 3 * v_11222 * t.v_112 + 3 * v_12222 * t.v_122
672 + v_22222 * t.v_222};
673 }
674
675 inline SymTensor3d_3<T> inner(const SymTensor3d_2<T> &t) const {
676 return SymTensor3d_3<T>{
677 v_00000 * t.v_00 + 2 * v_00001 * t.v_01 + 2 * v_00002 * t.v_02 + v_00011 * t.v_11
678 + 2 * v_00012 * t.v_12 + v_00022 * t.v_22,
679 v_00001 * t.v_00 + 2 * v_00011 * t.v_01 + 2 * v_00012 * t.v_02 + v_00111 * t.v_11
680 + 2 * v_00112 * t.v_12 + v_00122 * t.v_22,
681 v_00002 * t.v_00 + 2 * v_00012 * t.v_01 + 2 * v_00022 * t.v_02 + v_00112 * t.v_11
682 + 2 * v_00122 * t.v_12 + v_00222 * t.v_22,
683 v_00011 * t.v_00 + 2 * v_00111 * t.v_01 + 2 * v_00112 * t.v_02 + v_01111 * t.v_11
684 + 2 * v_01112 * t.v_12 + v_01122 * t.v_22,
685 v_00012 * t.v_00 + 2 * v_00112 * t.v_01 + 2 * v_00122 * t.v_02 + v_01112 * t.v_11
686 + 2 * v_01122 * t.v_12 + v_01222 * t.v_22,
687 v_00022 * t.v_00 + 2 * v_00122 * t.v_01 + 2 * v_00222 * t.v_02 + v_01122 * t.v_11
688 + 2 * v_01222 * t.v_12 + v_02222 * t.v_22,
689 v_00111 * t.v_00 + 2 * v_01111 * t.v_01 + 2 * v_01112 * t.v_02 + v_11111 * t.v_11
690 + 2 * v_11112 * t.v_12 + v_11122 * t.v_22,
691 v_00112 * t.v_00 + 2 * v_01112 * t.v_01 + 2 * v_01122 * t.v_02 + v_11112 * t.v_11
692 + 2 * v_11122 * t.v_12 + v_11222 * t.v_22,
693 v_00122 * t.v_00 + 2 * v_01122 * t.v_01 + 2 * v_01222 * t.v_02 + v_11122 * t.v_11
694 + 2 * v_11222 * t.v_12 + v_12222 * t.v_22,
695 v_00222 * t.v_00 + 2 * v_01222 * t.v_01 + 2 * v_02222 * t.v_02 + v_11222 * t.v_11
696 + 2 * v_12222 * t.v_12 + v_22222 * t.v_22};
697 }
698
699 inline SymTensor3d_4<T> inner(const SymTensor3d_1<T> &t) const {
700
701 return SymTensor3d_4<T>{
702 v_00000 * t.v_0 + v_00001 * t.v_1 + v_00002 * t.v_2,
703 v_00001 * t.v_0 + v_00011 * t.v_1 + v_00012 * t.v_2,
704 v_00002 * t.v_0 + v_00012 * t.v_1 + v_00022 * t.v_2,
705 v_00011 * t.v_0 + v_00111 * t.v_1 + v_00112 * t.v_2,
706 v_00012 * t.v_0 + v_00112 * t.v_1 + v_00122 * t.v_2,
707 v_00022 * t.v_0 + v_00122 * t.v_1 + v_00222 * t.v_2,
708 v_00111 * t.v_0 + v_01111 * t.v_1 + v_01112 * t.v_2,
709 v_00112 * t.v_0 + v_01112 * t.v_1 + v_01122 * t.v_2,
710 v_00122 * t.v_0 + v_01122 * t.v_1 + v_01222 * t.v_2,
711 v_00222 * t.v_0 + v_01222 * t.v_1 + v_02222 * t.v_2,
712 v_01111 * t.v_0 + v_11111 * t.v_1 + v_11112 * t.v_2,
713 v_01112 * t.v_0 + v_11112 * t.v_1 + v_11122 * t.v_2,
714 v_01122 * t.v_0 + v_11122 * t.v_1 + v_11222 * t.v_2,
715 v_01222 * t.v_0 + v_11222 * t.v_1 + v_12222 * t.v_2,
716 v_02222 * t.v_0 + v_12222 * t.v_1 + v_22222 * t.v_2};
717 }
718
719 inline SymTensor3d_5 inner(const T scal) const {
720 return SymTensor3d_5<T>{v_00000 * scal, v_00001 * scal, v_00002 * scal, v_00011 * scal,
721 v_00012 * scal, v_00022 * scal, v_00111 * scal, v_00112 * scal,
722 v_00122 * scal, v_00222 * scal, v_01111 * scal, v_01112 * scal,
723 v_01122 * scal, v_01222 * scal, v_02222 * scal, v_11111 * scal,
724 v_11112 * scal, v_11122 * scal, v_11222 * scal, v_12222 * scal,
725 v_22222 * scal};
726 }
727
728 inline SymTensor3d_5 &operator*=(const T scal) {
729
730 v_00000 *= scal;
731 v_00001 *= scal;
732 v_00002 *= scal;
733 v_00011 *= scal;
734 v_00012 *= scal;
735 v_00022 *= scal;
736 v_00111 *= scal;
737 v_00112 *= scal;
738 v_00122 *= scal;
739 v_00222 *= scal;
740 v_01111 *= scal;
741 v_01112 *= scal;
742 v_01122 *= scal;
743 v_01222 *= scal;
744 v_02222 *= scal;
745 v_11111 *= scal;
746 v_11112 *= scal;
747 v_11122 *= scal;
748 v_11222 *= scal;
749 v_12222 *= scal;
750 v_22222 *= scal;
751
752 return *this;
753 }
754
755 SymTensor3d_5 operator*(const T &scal) const {
756 return SymTensor3d_5<T>{v_00000 * scal, v_00001 * scal, v_00002 * scal, v_00011 * scal,
757 v_00012 * scal, v_00022 * scal, v_00111 * scal, v_00112 * scal,
758 v_00122 * scal, v_00222 * scal, v_01111 * scal, v_01112 * scal,
759 v_01122 * scal, v_01222 * scal, v_02222 * scal, v_11111 * scal,
760 v_11112 * scal, v_11122 * scal, v_11222 * scal, v_12222 * scal,
761 v_22222 * scal};
762 }
763
764 inline SymTensor3d_5 &operator+=(const SymTensor3d_5 other) {
765
766 v_00000 += other.v_00000;
767 v_00001 += other.v_00001;
768 v_00002 += other.v_00002;
769 v_00011 += other.v_00011;
770 v_00012 += other.v_00012;
771 v_00022 += other.v_00022;
772 v_00111 += other.v_00111;
773 v_00112 += other.v_00112;
774 v_00122 += other.v_00122;
775 v_00222 += other.v_00222;
776 v_01111 += other.v_01111;
777 v_01112 += other.v_01112;
778 v_01122 += other.v_01122;
779 v_01222 += other.v_01222;
780 v_02222 += other.v_02222;
781 v_11111 += other.v_11111;
782 v_11112 += other.v_11112;
783 v_11122 += other.v_11122;
784 v_11222 += other.v_11222;
785 v_12222 += other.v_12222;
786 v_22222 += other.v_22222;
787
788 return *this;
789 }
790
791 SymTensor3d_5 operator+(const SymTensor3d_5 &t2) const {
792 return SymTensor3d_5<T>{
793 v_00000 + t2.v_00000, v_00001 + t2.v_00001, v_00002 + t2.v_00002,
794 v_00011 + t2.v_00011, v_00012 + t2.v_00012, v_00022 + t2.v_00022,
795 v_00111 + t2.v_00111, v_00112 + t2.v_00112, v_00122 + t2.v_00122,
796 v_00222 + t2.v_00222, v_01111 + t2.v_01111, v_01112 + t2.v_01112,
797 v_01122 + t2.v_01122, v_01222 + t2.v_01222, v_02222 + t2.v_02222,
798 v_11111 + t2.v_11111, v_11112 + t2.v_11112, v_11122 + t2.v_11122,
799 v_11222 + t2.v_11222, v_12222 + t2.v_12222, v_22222 + t2.v_22222};
800 }
801
802 SymTensor3d_5 operator-(const SymTensor3d_5 &t2) const {
803 return SymTensor3d_5<T>{
804 v_00000 - t2.v_00000, v_00001 - t2.v_00001, v_00002 - t2.v_00002,
805 v_00011 - t2.v_00011, v_00012 - t2.v_00012, v_00022 - t2.v_00022,
806 v_00111 - t2.v_00111, v_00112 - t2.v_00112, v_00122 - t2.v_00122,
807 v_00222 - t2.v_00222, v_01111 - t2.v_01111, v_01112 - t2.v_01112,
808 v_01122 - t2.v_01122, v_01222 - t2.v_01222, v_02222 - t2.v_02222,
809 v_11111 - t2.v_11111, v_11112 - t2.v_11112, v_11122 - t2.v_11122,
810 v_11222 - t2.v_11222, v_12222 - t2.v_12222, v_22222 - t2.v_22222};
811 }
812
813 template<class Tacc>
814 inline void store(Tacc &&acc, u32 offset) const {
815 acc[offset + 0] = v_00000;
816 acc[offset + 1] = v_00001;
817 acc[offset + 2] = v_00002;
818 acc[offset + 3] = v_00011;
819 acc[offset + 4] = v_00012;
820 acc[offset + 5] = v_00022;
821 acc[offset + 6] = v_00111;
822 acc[offset + 7] = v_00112;
823 acc[offset + 8] = v_00122;
824 acc[offset + 9] = v_00222;
825 acc[offset + 10] = v_01111;
826 acc[offset + 11] = v_01112;
827 acc[offset + 12] = v_01122;
828 acc[offset + 13] = v_01222;
829 acc[offset + 14] = v_02222;
830 acc[offset + 15] = v_11111;
831 acc[offset + 16] = v_11112;
832 acc[offset + 17] = v_11122;
833 acc[offset + 18] = v_11222;
834 acc[offset + 19] = v_12222;
835 acc[offset + 20] = v_22222;
836 }
837
838 template<class Tacc>
839 inline static SymTensor3d_5 load(Tacc &&acc, u32 offset) {
840 return SymTensor3d_5<T>{acc[offset + 0], acc[offset + 1], acc[offset + 2],
841 acc[offset + 3], acc[offset + 4], acc[offset + 5],
842 acc[offset + 6], acc[offset + 7], acc[offset + 8],
843 acc[offset + 9], acc[offset + 10], acc[offset + 11],
844 acc[offset + 12], acc[offset + 13], acc[offset + 14],
845 acc[offset + 15], acc[offset + 16], acc[offset + 17],
846 acc[offset + 18], acc[offset + 19], acc[offset + 20]};
847 }
848 };
849
850 // rank 5 ops
851 template<class T>
852 T operator*(const SymTensor3d_5<T> &a, const SymTensor3d_5<T> &b) {
853 return a.inner(b);
854 }
855
856 template<class T>
857 SymTensor3d_1<T> operator*(const SymTensor3d_5<T> &a, const SymTensor3d_4<T> &b) {
858 return a.inner(b);
859 }
860
861 template<class T>
862 SymTensor3d_2<T> operator*(const SymTensor3d_5<T> &a, const SymTensor3d_3<T> &b) {
863 return a.inner(b);
864 }
865
866 template<class T>
867 SymTensor3d_3<T> operator*(const SymTensor3d_5<T> &a, const SymTensor3d_2<T> &b) {
868 return a.inner(b);
869 }
870
871 template<class T>
872 SymTensor3d_4<T> operator*(const SymTensor3d_5<T> &a, const SymTensor3d_1<T> &b) {
873 return a.inner(b);
874 }
875
876 template<class T>
877 SymTensor3d_5<T> operator*(const T &a, const SymTensor3d_5<T> &b) {
878 return b * a;
879 }
880
881 template<class T>
882 inline SymTensor3d_4<T> operator*(const SymTensor3d_1<T> &a, const SymTensor3d_5<T> &b) {
883 return b * a;
884 }
885
886 template<class T>
887 inline SymTensor3d_3<T> operator*(const SymTensor3d_2<T> &a, const SymTensor3d_5<T> &b) {
888 return b * a;
889 }
890
891 template<class T>
892 inline SymTensor3d_2<T> operator*(const SymTensor3d_3<T> &a, const SymTensor3d_5<T> &b) {
893 return b * a;
894 }
895
896 template<class T>
897 inline SymTensor3d_1<T> operator*(const SymTensor3d_4<T> &a, const SymTensor3d_5<T> &b) {
898 return b * a;
899 }
900
901 // rank 4 ops
902 template<class T>
903 T operator*(const SymTensor3d_4<T> &a, const SymTensor3d_4<T> &b) {
904 return a.inner(b);
905 }
906
907 template<class T>
908 SymTensor3d_1<T> operator*(const SymTensor3d_4<T> &a, const SymTensor3d_3<T> &b) {
909 return a.inner(b);
910 }
911
912 template<class T>
913 SymTensor3d_2<T> operator*(const SymTensor3d_4<T> &a, const SymTensor3d_2<T> &b) {
914 return a.inner(b);
915 }
916
917 template<class T>
918 SymTensor3d_3<T> operator*(const SymTensor3d_4<T> &a, const SymTensor3d_1<T> &b) {
919 return a.inner(b);
920 }
921
922 template<class T>
923 SymTensor3d_4<T> operator*(const T &a, const SymTensor3d_4<T> &b) {
924 return b * a;
925 }
926
927 template<class T>
928 inline SymTensor3d_3<T> operator*(const SymTensor3d_1<T> &a, const SymTensor3d_4<T> &b) {
929 return b * a;
930 }
931
932 template<class T>
933 inline SymTensor3d_2<T> operator*(const SymTensor3d_2<T> &a, const SymTensor3d_4<T> &b) {
934 return b * a;
935 }
936
937 template<class T>
938 inline SymTensor3d_1<T> operator*(const SymTensor3d_3<T> &a, const SymTensor3d_4<T> &b) {
939 return b * a;
940 }
941
942 // rank 3 ops
943 template<class T>
944 T operator*(const SymTensor3d_3<T> &a, const SymTensor3d_3<T> &b) {
945 return a.inner(b);
946 }
947 template<class T>
948 SymTensor3d_1<T> operator*(const SymTensor3d_3<T> &a, const SymTensor3d_2<T> &b) {
949 return a.inner(b);
950 }
951 template<class T>
952 SymTensor3d_2<T> operator*(const SymTensor3d_3<T> &a, const SymTensor3d_1<T> &b) {
953 return a.inner(b);
954 }
955
956 template<class T>
957 SymTensor3d_1<T> operator*(const SymTensor3d_2<T> &a, const SymTensor3d_3<T> &b) {
958 return b * a;
959 }
960 template<class T>
961 SymTensor3d_2<T> operator*(const SymTensor3d_1<T> &a, const SymTensor3d_3<T> &b) {
962 return b * a;
963 }
964 template<class T>
965 SymTensor3d_3<T> operator*(const T &a, const SymTensor3d_3<T> &b) {
966 return b * a;
967 }
968
969 // rank 2
970
971 template<class T>
972 T operator*(const SymTensor3d_2<T> &a, const SymTensor3d_2<T> &b) {
973 return a.inner(b);
974 }
975
976 template<class T>
977 SymTensor3d_1<T> operator*(const SymTensor3d_2<T> &a, const SymTensor3d_1<T> &b) {
978 return a.inner(b);
979 }
980
981 template<class T>
982 SymTensor3d_1<T> operator*(const SymTensor3d_1<T> &a, const SymTensor3d_2<T> &b) {
983 return b.inner(a);
984 }
985
986 template<class T>
987 SymTensor3d_2<T> operator*(const T &a, const SymTensor3d_2<T> &b) {
988 return b * a;
989 }
990
991 // rank 1
992
993 template<class T>
994 T operator*(const SymTensor3d_1<T> &a, const SymTensor3d_1<T> &b) {
995 return a.inner(b);
996 }
997
998 template<class T>
999 SymTensor3d_1<T> operator*(const T &a, const SymTensor3d_1<T> &b) {
1000 return b * a;
1001 }
1002
1003} // namespace shammath
std::uint32_t u32
32 bit unsigned integer
namespace for math utility
Definition AABB.hpp:26