Note
Go to the end to download the full example code.
Regression test : SPH Kelvin-Helmholtz instability#
This test is used to check that the SPH Kelvin-Helmholtz instability setup is able to reproduce the same results as the reference file.
9 import shamrock
10
11 # If we use the shamrock executable to run this script instead of the python interpreter,
12 # we should not initialize the system as the shamrock executable needs to handle specific MPI logic
13 if not shamrock.sys.is_initialized():
14 shamrock.change_loglevel(1)
15 shamrock.sys.init("0:0")
Setup parameters
20 import numpy as np
21
22 kernel = "M6" # SPH kernel to use
23 resol = 64 # number of particles in the x & y direction
24 thick = 6 # number of particles in the z direction
25
26 # CFLs
27 C_cour = 0.3
28 C_force = 0.25
29
30 gamma = 1.4
31
32 vslip = 1 # slip speed between the two layers
33
34 rho_1 = 1
35
36 fact = 2 / 3
37 rho_2 = rho_1 / (fact**3)
38
39 P_1 = 3.5
40 P_2 = 3.5
41
42 render_gif = True
43
44 dump_folder = "_to_trash"
45 sim_name = "kh_sph"
46
47 u_1 = P_1 / ((gamma - 1) * rho_1)
48 u_2 = P_2 / ((gamma - 1) * rho_2)
49
50 print("Mach number 1 :", vslip / np.sqrt(gamma * P_1 / rho_1))
51 print("Mach number 2 :", vslip / np.sqrt(gamma * P_2 / rho_2))
52
53
54 import os
55
56 # Create the dump directory if it does not exist
57 if shamrock.sys.world_rank() == 0:
58 os.makedirs(dump_folder, exist_ok=True)
Configure the solver
63 ctx = shamrock.Context()
64 ctx.pdata_layout_new()
65
66 model = shamrock.get_Model_SPH(context=ctx, vector_type="f64_3", sph_kernel=kernel)
67
68 cfg = model.gen_default_config()
69 cfg.set_artif_viscosity_VaryingCD10(
70 alpha_min=0.0, alpha_max=1, sigma_decay=0.1, alpha_u=1, beta_AV=2
71 )
72 cfg.set_boundary_periodic()
73 cfg.set_eos_adiabatic(gamma)
74 cfg.print_status()
75 model.set_solver_config(cfg)
76
77 # Set scheduler criteria to effectively disable patch splitting and merging.
78 crit_split = int(1e9)
79 crit_merge = 1
80 model.init_scheduler(crit_split, crit_merge)
Setup the simulation
85 # Compute box size
86 (xs, ys, zs) = model.get_box_dim_fcc_3d(1, resol, resol, thick)
87 dr = 1 / xs
88 (xs, ys, zs) = model.get_box_dim_fcc_3d(dr, resol, resol, thick)
89
90 model.resize_simulation_box((-xs / 2, -ys / 2, -zs / 2), (xs / 2, ys / 2, zs / 2))
91
92 # rho1 domain
93 y_interface = ys / 4
94 model.add_cube_fcc_3d(dr, (-xs / 2, -ys / 2, -zs / 2), (xs / 2, -y_interface, zs / 2))
95 model.add_cube_fcc_3d(dr, (-xs / 2, y_interface, -zs / 2), (xs / 2, ys / 2, zs / 2))
96
97 # rho 2 domain
98 model.add_cube_fcc_3d(dr * fact, (-xs / 2, -y_interface, -zs / 2), (xs / 2, y_interface, zs / 2))
99
100 model.set_value_in_a_box(
101 "uint", "f64", u_1, (-xs / 2, -ys / 2, -zs / 2), (xs / 2, -y_interface, zs / 2)
102 )
103 model.set_value_in_a_box(
104 "uint", "f64", u_1, (-xs / 2, y_interface, -zs / 2), (xs / 2, ys / 2, zs / 2)
105 )
106
107 model.set_value_in_a_box(
108 "uint", "f64", u_2, (-xs / 2, -y_interface, -zs / 2), (xs / 2, y_interface, zs / 2)
109 )
110
111
112 # the velocity function to trigger KH
113 def vel_func(r):
114 x, y, z = r
115
116 ampl = 0.01
117 n = 2
118 pert = np.sin(2 * np.pi * n * x / (xs))
119
120 sigma = 0.05 / (2**0.5)
121 gauss1 = np.exp(-((y - y_interface) ** 2) / (2 * sigma * sigma))
122 gauss2 = np.exp(-((y + y_interface) ** 2) / (2 * sigma * sigma))
123 pert *= gauss1 + gauss2
124
125 # Alternative formula (See T. Tricco paper)
126 # interf_sz = ys/32
127 # vx = np.arctan(y/interf_sz)/np.pi
128
129 vx = 0
130 if np.abs(y) > y_interface:
131 vx = vslip / 2
132 else:
133 vx = -vslip / 2
134
135 return (vx, ampl * pert, 0)
136
137
138 model.set_field_value_lambda_f64_3("vxyz", vel_func)
139
140 vol_b = xs * ys * zs
141
142 totmass = (rho_1 * vol_b / 2) + (rho_2 * vol_b / 2)
143
144 pmass = model.total_mass_to_part_mass(totmass)
145 model.set_particle_mass(pmass)
146
147 print("Total mass :", totmass)
148 print("Current part mass :", pmass)
149
150 model.set_cfl_cour(C_cour)
151 model.set_cfl_force(C_force)
152
153 model.timestep()
Save state function adapted from https://gist.github.com/gilliss/7e1d451b42441e77ae33a8b790eeeb73
161 def save_collected_data(data_dict, fpath):
162
163 print(f"Saving data to {fpath}")
164
165 import h5py
166
167 # Open HDF5 file and write in the data_dict structure and info
168 f = h5py.File(fpath, "w")
169 for dset_name in data_dict:
170 dset = f.create_dataset(dset_name, data=data_dict[dset_name])
171 f.close()
172
173
174 def load_collected_data(fpath):
175
176 print(f"Loading data from {fpath}")
177
178 if not os.path.exists(fpath):
179 print(f"File {fpath} does not exist")
180 raise FileNotFoundError(f"File {fpath} does not exist")
181
182 import h5py
183
184 # Re-open HDF5 file and read out the data_dict structure and info
185 f = h5py.File(fpath, "r")
186
187 data_dict = {}
188 for dset_name in f.keys():
189 data_dict[dset_name] = f[dset_name][:]
190
191 f.close()
192
193 return data_dict
194
195
196 def check_regression(data_dict1, data_dict2, tolerances):
197
198 # Compare if keys sets match
199 if set(data_dict1.keys()) != set(data_dict2.keys()):
200 print("Data keys sets do not match")
201 raise ValueError(
202 f"Data keys sets do not match: {set(data_dict1.keys())} != {set(data_dict2.keys())}"
203 )
204
205 # Compare if tolerances are defined for all keys
206 if set(tolerances.keys()) != set(data_dict1.keys()):
207 print("Tolerances keys sets do not match")
208 raise ValueError(
209 f"Tolerances keys sets do not match: {set(tolerances.keys())} != {set(data_dict1.keys())}"
210 )
211
212 # Compare if values are equal
213 for dset_name in data_dict1:
214
215 # Compare same size
216 if data_dict1[dset_name].shape != data_dict2[dset_name].shape:
217 print(f"Data {dset_name} has different shape")
218 print(f"shape: {data_dict1[dset_name].shape} != {data_dict2[dset_name].shape}")
219 raise ValueError(f"Data {dset_name} has different shape")
220
221 # Compare values
222 delta = np.isclose(
223 data_dict1[dset_name],
224 data_dict2[dset_name],
225 rtol=tolerances[dset_name][0],
226 atol=tolerances[dset_name][1],
227 )
228
229 offenses = 0
230
231 for i in range(len(data_dict1[dset_name])):
232 if not np.all(delta[i]):
233 if False:
234 print(
235 f"Data {dset_name} is not equal at index {i}, rtol={tolerances[dset_name][0]}, atol={tolerances[dset_name][1]}"
236 )
237 print(f" value 1: {data_dict1[dset_name][i]}")
238 print(f" value 2: {data_dict2[dset_name][i]}")
239 print(
240 f" absolute diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i])}"
241 )
242 print(
243 f" relative diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i]) / data_dict1[dset_name][i]}"
244 )
245 offenses += 1
246
247 if offenses > 0:
248 print(
249 f"Data {dset_name} has {offenses} offenses, absolute diff: {np.abs(data_dict1[dset_name] - data_dict2[dset_name]).max()}"
250 )
251 raise ValueError(f"Data {dset_name} is not equal")
252
253 print(" -> Regression test passed successfully")
254
255
256 def save_state(iplot):
257 data_dict = ctx.collect_data()
258 save_collected_data(data_dict, os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5"))
Running the simulation
264 t_sum = 0
265 t_target = 0.1
266
267 save_state(0)
268
269 i_dump = 1
270 dt_dump = 0.05
271
272 while t_sum < t_target:
273 model.evolve_until(t_sum + dt_dump)
274
275 save_state(i_dump)
276
277 t_sum += dt_dump
278 i_dump += 1
Check regression
283 reference_folder = "reference-files/regression_sph_kh_22_08_25"
284
285 tolerances = [
286 {
287 "xyz": (1e-15, 1e-15),
288 "vxyz": (1e-17, 1e-17),
289 "hpart": (1e-16, 1e-16),
290 "duint": (1e-13, 1e-13),
291 "dtdivv": (1e-13, 1e-13),
292 "curlv": (1e-12, 1e-12),
293 "soundspeed": (1e-15, 1e-15),
294 "uint": (1e-20, 1e-20),
295 "axyz_ext": (1e-20, 1e-20),
296 "alpha_AV": (1e-20, 1e-20),
297 "divv": (1e-12, 1e-12),
298 "axyz": (1e-11, 1e-11),
299 },
300 {
301 "xyz": (1e-14, 1e-14),
302 "vxyz": (1e-12, 1e-12),
303 "hpart": (1e-15, 1e-15),
304 "duint": (1e-10, 1e-10),
305 "dtdivv": (1e-8, 1e-8),
306 "curlv": (1e-12, 1e-12),
307 "soundspeed": (1e-13, 1e-13),
308 "uint": (1e-13, 1e-13),
309 "axyz_ext": (1e-20, 1e-20),
310 "alpha_AV": (1e-10, 1e-10),
311 "divv": (1e-11, 1e-11),
312 "axyz": (1e-10, 1e-10),
313 },
314 {
315 "xyz": (1e-14, 1e-14),
316 "vxyz": (1e-12, 1e-12),
317 "hpart": (1e-15, 1e-15),
318 "duint": (1e-10, 1e-10),
319 "dtdivv": (1e-8, 1e-8),
320 "curlv": (1e-11, 1e-11),
321 "soundspeed": (1e-13, 1e-13),
322 "uint": (1e-13, 1e-13),
323 "axyz_ext": (1e-20, 1e-20),
324 "alpha_AV": (1e-9, 1e-9),
325 "divv": (1e-10, 1e-10),
326 "axyz": (1e-10, 1e-10),
327 },
328 ]
329
330 for iplot in range(i_dump):
331
332 fpath_cur = os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5")
333 fpath_ref = os.path.join(reference_folder, f"{sim_name}_data_{iplot:04}.h5")
334
335 data_dict_cur = load_collected_data(fpath_cur)
336 data_dict_ref = load_collected_data(fpath_ref)
337
338 check_regression(data_dict_ref, data_dict_cur, tolerances[iplot])
Estimated memory usage: 0 MB