Note
Go to the end to download the full example code.
Regression test : SPH Kelvin-Helmholtz instability#
This test is used to check that the SPH Kelvin-Helmholtz instability setup is able to reproduce the same results as the reference file.
9 import shamrock
10
11 # If we use the shamrock executable to run this script instead of the python interpreter,
12 # we should not initialize the system as the shamrock executable needs to handle specific MPI logic
13 if not shamrock.sys.is_initialized():
14 shamrock.change_loglevel(1)
15 shamrock.sys.init("0:0")
Setup parameters
20 import numpy as np
21
22 kernel = "M6" # SPH kernel to use
23 resol = 64 # number of particles in the x & y direction
24 thick = 6 # number of particles in the z direction
25
26 # CFLs
27 C_cour = 0.3
28 C_force = 0.25
29
30 gamma = 1.4
31
32 vslip = 1 # slip speed between the two layers
33
34 rho_1 = 1
35
36 fact = 2 / 3
37 rho_2 = rho_1 / (fact**3)
38
39 P_1 = 3.5
40 P_2 = 3.5
41
42 render_gif = True
43
44 dump_folder = "_to_trash"
45 sim_name = "kh_sph"
46
47 u_1 = P_1 / ((gamma - 1) * rho_1)
48 u_2 = P_2 / ((gamma - 1) * rho_2)
49
50 print("Mach number 1 :", vslip / np.sqrt(gamma * P_1 / rho_1))
51 print("Mach number 2 :", vslip / np.sqrt(gamma * P_2 / rho_2))
52
53
54 import os
55
56 # Create the dump directory if it does not exist
57 if shamrock.sys.world_rank() == 0:
58 os.makedirs(dump_folder, exist_ok=True)
Configure the solver
63 ctx = shamrock.Context()
64 ctx.pdata_layout_new()
65
66 model = shamrock.get_Model_SPH(context=ctx, vector_type="f64_3", sph_kernel=kernel)
67
68 cfg = model.gen_default_config()
69 cfg.set_artif_viscosity_VaryingCD10(
70 alpha_min=0.0, alpha_max=1, sigma_decay=0.1, alpha_u=1, beta_AV=2
71 )
72 cfg.set_boundary_periodic()
73 cfg.set_eos_adiabatic(gamma)
74 cfg.print_status()
75 model.set_solver_config(cfg)
76
77 # Set scheduler criteria to effectively disable patch splitting and merging.
78 crit_split = int(1e9)
79 crit_merge = 1
80 model.init_scheduler(crit_split, crit_merge)
Setup the simulation
85 # Compute box size
86 (xs, ys, zs) = model.get_box_dim_fcc_3d(1, resol, resol, thick)
87 dr = 1 / xs
88 (xs, ys, zs) = model.get_box_dim_fcc_3d(dr, resol, resol, thick)
89
90 model.resize_simulation_box((-xs / 2, -ys / 2, -zs / 2), (xs / 2, ys / 2, zs / 2))
91
92 # rho1 domain
93 y_interface = ys / 4
94 model.add_cube_fcc_3d(dr, (-xs / 2, -ys / 2, -zs / 2), (xs / 2, -y_interface, zs / 2))
95 model.add_cube_fcc_3d(dr, (-xs / 2, y_interface, -zs / 2), (xs / 2, ys / 2, zs / 2))
96
97 # rho 2 domain
98 model.add_cube_fcc_3d(dr * fact, (-xs / 2, -y_interface, -zs / 2), (xs / 2, y_interface, zs / 2))
99
100 model.set_value_in_a_box(
101 "uint", "f64", u_1, (-xs / 2, -ys / 2, -zs / 2), (xs / 2, -y_interface, zs / 2)
102 )
103 model.set_value_in_a_box(
104 "uint", "f64", u_1, (-xs / 2, y_interface, -zs / 2), (xs / 2, ys / 2, zs / 2)
105 )
106
107 model.set_value_in_a_box(
108 "uint", "f64", u_2, (-xs / 2, -y_interface, -zs / 2), (xs / 2, y_interface, zs / 2)
109 )
110
111
112 # the velocity function to trigger KH
113 def vel_func(r):
114 x, y, z = r
115
116 ampl = 0.01
117 n = 2
118 pert = np.sin(2 * np.pi * n * x / (xs))
119
120 sigma = 0.05 / (2**0.5)
121 gauss1 = np.exp(-((y - y_interface) ** 2) / (2 * sigma * sigma))
122 gauss2 = np.exp(-((y + y_interface) ** 2) / (2 * sigma * sigma))
123 pert *= gauss1 + gauss2
124
125 # Alternative formula (See T. Tricco paper)
126 # interf_sz = ys/32
127 # vx = np.arctan(y/interf_sz)/np.pi
128
129 vx = 0
130 if np.abs(y) > y_interface:
131 vx = vslip / 2
132 else:
133 vx = -vslip / 2
134
135 return (vx, ampl * pert, 0)
136
137
138 model.set_field_value_lambda_f64_3("vxyz", vel_func)
139
140 vol_b = xs * ys * zs
141
142 totmass = (rho_1 * vol_b / 2) + (rho_2 * vol_b / 2)
143
144 pmass = model.total_mass_to_part_mass(totmass)
145 model.set_particle_mass(pmass)
146
147 print("Total mass :", totmass)
148 print("Current part mass :", pmass)
149
150 model.set_cfl_cour(C_cour)
151 model.set_cfl_force(C_force)
152
153 model.timestep()
Save state function adapted from https://gist.github.com/gilliss/7e1d451b42441e77ae33a8b790eeeb73
161 def save_collected_data(data_dict, fpath):
162 print(f"Saving data to {fpath}")
163
164 import h5py
165
166 # Open HDF5 file and write in the data_dict structure and info
167 f = h5py.File(fpath, "w")
168 for dset_name in data_dict:
169 dset = f.create_dataset(dset_name, data=data_dict[dset_name])
170 f.close()
171
172
173 def load_collected_data(fpath):
174 print(f"Loading data from {fpath}")
175
176 if not os.path.exists(fpath):
177 print(f"File {fpath} does not exist")
178 raise FileNotFoundError(f"File {fpath} does not exist")
179
180 import h5py
181
182 # Re-open HDF5 file and read out the data_dict structure and info
183 f = h5py.File(fpath, "r")
184
185 data_dict = {}
186 for dset_name in f.keys():
187 data_dict[dset_name] = f[dset_name][:]
188
189 f.close()
190
191 return data_dict
192
193
194 def check_regression(data_dict1, data_dict2, tolerances):
195 # Compare if keys sets match
196 if set(data_dict1.keys()) != set(data_dict2.keys()):
197 print("Data keys sets do not match")
198 raise ValueError(
199 f"Data keys sets do not match: {set(data_dict1.keys())} != {set(data_dict2.keys())}"
200 )
201
202 # Compare if tolerances are defined for all keys
203 if set(tolerances.keys()) != set(data_dict1.keys()):
204 print("Tolerances keys sets do not match")
205 raise ValueError(
206 f"Tolerances keys sets do not match: {set(tolerances.keys())} != {set(data_dict1.keys())}"
207 )
208
209 # Compare if values are equal
210 for dset_name in data_dict1:
211 # Compare same size
212 if data_dict1[dset_name].shape != data_dict2[dset_name].shape:
213 print(f"Data {dset_name} has different shape")
214 print(f"shape: {data_dict1[dset_name].shape} != {data_dict2[dset_name].shape}")
215 raise ValueError(f"Data {dset_name} has different shape")
216
217 # Compare values
218 delta = np.isclose(
219 data_dict1[dset_name],
220 data_dict2[dset_name],
221 rtol=tolerances[dset_name][0],
222 atol=tolerances[dset_name][1],
223 )
224
225 offenses = 0
226
227 for i in range(len(data_dict1[dset_name])):
228 if not np.all(delta[i]):
229 if False:
230 print(
231 f"Data {dset_name} is not equal at index {i}, rtol={tolerances[dset_name][0]}, atol={tolerances[dset_name][1]}"
232 )
233 print(f" value 1: {data_dict1[dset_name][i]}")
234 print(f" value 2: {data_dict2[dset_name][i]}")
235 print(
236 f" absolute diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i])}"
237 )
238 print(
239 f" relative diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i]) / data_dict1[dset_name][i]}"
240 )
241 offenses += 1
242
243 if offenses > 0:
244 print(
245 f"Data {dset_name} has {offenses} offenses, absolute diff: {np.abs(data_dict1[dset_name] - data_dict2[dset_name]).max()}"
246 )
247 raise ValueError(f"Data {dset_name} is not equal")
248
249 print(" -> Regression test passed successfully")
250
251
252 def save_state(iplot):
253 data_dict = ctx.collect_data()
254 save_collected_data(data_dict, os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5"))
Running the simulation
260 t_sum = 0
261 t_target = 0.1
262
263 save_state(0)
264
265 i_dump = 1
266 dt_dump = 0.05
267
268 while t_sum < t_target:
269 model.evolve_until(t_sum + dt_dump)
270
271 save_state(i_dump)
272
273 t_sum += dt_dump
274 i_dump += 1
Check regression
279 reference_folder = "reference-files/regression_sph_kh_22_08_25"
280
281 tolerances = [
282 {
283 "xyz": (1e-15, 1e-15),
284 "vxyz": (1e-17, 1e-17),
285 "hpart": (1e-16, 1e-16),
286 "duint": (1e-13, 1e-13),
287 "dtdivv": (1e-13, 1e-13),
288 "curlv": (1e-12, 1e-12),
289 "soundspeed": (1e-15, 1e-15),
290 "uint": (1e-20, 1e-20),
291 "axyz_ext": (1e-20, 1e-20),
292 "alpha_AV": (1e-20, 1e-20),
293 "divv": (1e-12, 1e-12),
294 "axyz": (1e-11, 1e-11),
295 },
296 {
297 "xyz": (1e-14, 1e-14),
298 "vxyz": (1e-12, 1e-12),
299 "hpart": (1e-15, 1e-15),
300 "duint": (1e-10, 1e-10),
301 "dtdivv": (1e-8, 1e-8),
302 "curlv": (1e-12, 1e-12),
303 "soundspeed": (1e-13, 1e-13),
304 "uint": (1e-13, 1e-13),
305 "axyz_ext": (1e-20, 1e-20),
306 "alpha_AV": (1e-10, 1e-10),
307 "divv": (1e-11, 1e-11),
308 "axyz": (1e-10, 1e-10),
309 },
310 {
311 "xyz": (1e-14, 1e-14),
312 "vxyz": (1e-12, 1e-12),
313 "hpart": (1e-15, 1e-15),
314 "duint": (1e-10, 1e-10),
315 "dtdivv": (1e-8, 1e-8),
316 "curlv": (1e-11, 1e-11),
317 "soundspeed": (1e-13, 1e-13),
318 "uint": (1e-13, 1e-13),
319 "axyz_ext": (1e-20, 1e-20),
320 "alpha_AV": (1e-9, 1e-9),
321 "divv": (1e-10, 1e-10),
322 "axyz": (1e-10, 1e-10),
323 },
324 ]
325
326 for iplot in range(i_dump):
327 fpath_cur = os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5")
328 fpath_ref = os.path.join(reference_folder, f"{sim_name}_data_{iplot:04}.h5")
329
330 data_dict_cur = load_collected_data(fpath_cur)
331 data_dict_ref = load_collected_data(fpath_ref)
332
333 check_regression(data_dict_ref, data_dict_cur, tolerances[iplot])
Estimated memory usage: 0 MB