Note
Go to the end to download the full example code.
Regression test : SPH disc#
This test is used to check that the SPH disc setup is able to reproduce the same results as the reference file.
9 import shamrock
10
11 # If we use the shamrock executable to run this script instead of the python interpreter,
12 # we should not initialize the system as the shamrock executable needs to handle specific MPI logic
13 if not shamrock.sys.is_initialized():
14 shamrock.change_loglevel(1)
15 shamrock.sys.init("0:0")
Setup parameters
20 import numpy as np
21
22 kernel = "M4" # SPH kernel to use
23 Npart = 100000
24 disc_mass = 0.01 # sol mass
25 center_mass = 1
26 center_racc = 0.1
27
28 rout = 10
29 rin = 1
30
31 # alpha_ss ~ alpha_AV * 0.08
32 alpha_AV = 1e-3 / 0.08
33 alpha_u = 1
34 beta_AV = 2
35
36 q = 0.5
37 p = 3.0 / 2.0
38 r0 = 1
39
40 C_cour = 0.3
41 C_force = 0.25
42
43 H_r_in = 0.05
44
45 dump_folder = "_to_trash"
46 sim_name = "disc_sph"
47
48
49 import os
50
51 # Create the dump directory if it does not exist
52 if shamrock.sys.world_rank() == 0:
53 os.makedirs(dump_folder, exist_ok=True)
Deduced quantities
59 si = shamrock.UnitSystem()
60 sicte = shamrock.Constants(si)
61 codeu = shamrock.UnitSystem(
62 unit_time=3600 * 24 * 365,
63 unit_length=sicte.au(),
64 unit_mass=sicte.sol_mass(),
65 )
66 ucte = shamrock.Constants(codeu)
67
68 # Deduced quantities
69 pmass = disc_mass / Npart
70 bmin = (-rout * 2, -rout * 2, -rout * 2)
71 bmax = (rout * 2, rout * 2, rout * 2)
72 G = ucte.G()
73
74
75 def sigma_profile(r):
76 sigma_0 = 1
77 return sigma_0 * (r / rin) ** (-p)
78
79
80 def kep_profile(r):
81 return (G * center_mass / r) ** 0.5
82
83
84 def omega_k(r):
85 return kep_profile(r) / r
86
87
88 def cs_profile(r):
89 cs_in = (H_r_in * rin) * omega_k(rin)
90 return ((r / rin) ** (-q)) * cs_in
91
92
93 cs0 = cs_profile(rin)
94
95
96 def rot_profile(r):
97 # return kep_profile(r)
98
99 # subkeplerian correction
100 return ((kep_profile(r) ** 2) - (2 * p + q) * cs_profile(r) ** 2) ** 0.5
101
102
103 def H_profile(r):
104 H = cs_profile(r) / omega_k(r)
105 # fact = (2.**0.5) * 3.
106 fact = 1
107 return fact * H # factor taken from phantom, to fasten thermalizing
Configure the solver
112 ctx = shamrock.Context()
113 ctx.pdata_layout_new()
114
115 model = shamrock.get_Model_SPH(context=ctx, vector_type="f64_3", sph_kernel=kernel)
116
117 cfg = model.gen_default_config()
118 cfg.set_artif_viscosity_ConstantDisc(alpha_u=alpha_u, alpha_AV=alpha_AV, beta_AV=beta_AV)
119 cfg.set_eos_locally_isothermalLP07(cs0=cs0, q=q, r0=r0)
120 cfg.print_status()
121 cfg.set_units(codeu)
122 model.set_solver_config(cfg)
123
124 # Set scheduler criteria to effectively disable patch splitting and merging.
125 crit_split = int(1e9)
126 crit_merge = 1
127 model.init_scheduler(crit_split, crit_merge)
128 model.resize_simulation_box(bmin, bmax)
Setup the sink particles
133 sink_list = [
134 {"mass": center_mass, "racc": center_racc, "pos": (0, 0, 0), "vel": (0, 0, 0)},
135 ]
136
137 model.set_particle_mass(pmass)
138 for s in sink_list:
139 mass = s["mass"]
140 x, y, z = s["pos"]
141 vx, vy, vz = s["vel"]
142 racc = s["racc"]
143
144 print("add sink : mass {} pos {} vel {} racc {}".format(mass, (x, y, z), (vx, vy, vz), racc))
145
146 model.add_sink(mass, (x, y, z), (vx, vy, vz), racc)
Setup the simulation
151 setup = model.get_setup()
152 gen_disc = setup.make_generator_disc_mc(
153 part_mass=pmass,
154 disc_mass=disc_mass,
155 r_in=rin,
156 r_out=rout,
157 sigma_profile=sigma_profile,
158 H_profile=H_profile,
159 rot_profile=rot_profile,
160 cs_profile=cs_profile,
161 random_seed=666,
162 )
163 # print(comb.get_dot())
164 setup.apply_setup(gen_disc)
165
166 model.set_cfl_cour(C_cour)
167 model.set_cfl_force(C_force)
168
169 model.change_htolerance(1.3)
170 model.timestep()
171 model.change_htolerance(1.1)
Save state function adapted from https://gist.github.com/gilliss/7e1d451b42441e77ae33a8b790eeeb73
179 def save_collected_data(data_dict, fpath):
180
181 print(f"Saving data to {fpath}")
182
183 import h5py
184
185 # Open HDF5 file and write in the data_dict structure and info
186 f = h5py.File(fpath, "w")
187 for dset_name in data_dict:
188 dset = f.create_dataset(dset_name, data=data_dict[dset_name])
189 f.close()
190
191
192 def load_collected_data(fpath):
193
194 print(f"Loading data from {fpath}")
195
196 if not os.path.exists(fpath):
197 print(f"File {fpath} does not exist")
198 raise FileNotFoundError(f"File {fpath} does not exist")
199
200 import h5py
201
202 # Re-open HDF5 file and read out the data_dict structure and info
203 f = h5py.File(fpath, "r")
204
205 data_dict = {}
206 for dset_name in f.keys():
207 data_dict[dset_name] = f[dset_name][:]
208
209 f.close()
210
211 return data_dict
212
213
214 def check_regression(data_dict1, data_dict2, tolerances):
215
216 # Compare if keys sets match
217 if set(data_dict1.keys()) != set(data_dict2.keys()):
218 print("Data keys sets do not match")
219 raise ValueError(
220 f"Data keys sets do not match: {set(data_dict1.keys())} != {set(data_dict2.keys())}"
221 )
222
223 # Compare if tolerances are defined for all keys
224 if set(tolerances.keys()) != set(data_dict1.keys()):
225 print("Tolerances keys sets do not match")
226 raise ValueError(
227 f"Tolerances keys sets do not match: {set(tolerances.keys())} != {set(data_dict1.keys())}"
228 )
229
230 # Compare if values are equal
231 for dset_name in data_dict1:
232
233 # Compare same size
234 if data_dict1[dset_name].shape != data_dict2[dset_name].shape:
235 print(f"Data {dset_name} has different shape")
236 print(f"shape: {data_dict1[dset_name].shape} != {data_dict2[dset_name].shape}")
237 raise ValueError(f"Data {dset_name} has different shape")
238
239 # Compare values
240 delta = np.isclose(
241 data_dict1[dset_name],
242 data_dict2[dset_name],
243 rtol=tolerances[dset_name][0],
244 atol=tolerances[dset_name][1],
245 )
246
247 offenses = 0
248
249 for i in range(len(data_dict1[dset_name])):
250 if not np.all(delta[i]):
251 if False:
252 print(
253 f"Data {dset_name} is not equal at index {i}, rtol={tolerances[dset_name][0]}, atol={tolerances[dset_name][1]}"
254 )
255 print(f" value 1: {data_dict1[dset_name][i]}")
256 print(f" value 2: {data_dict2[dset_name][i]}")
257 print(
258 f" absolute diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i])}"
259 )
260 print(
261 f" relative diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i]) / data_dict1[dset_name][i]}"
262 )
263 offenses += 1
264
265 if offenses > 0:
266 print(
267 f"Data {dset_name} has {offenses} offenses, absolute diff: {np.abs(data_dict1[dset_name] - data_dict2[dset_name]).max()}"
268 )
269 raise ValueError(f"Data {dset_name} is not equal")
270
271 print(" -> Regression test passed successfully")
272
273
274 def save_state(iplot):
275 data_dict = ctx.collect_data()
276 save_collected_data(data_dict, os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5"))
Running the simulation
282 t_sum = 0
283 t_target = 0.1
284
285 save_state(0)
286
287 i_dump = 1
288 dt_dump = 0.05
289
290 while t_sum < t_target:
291 model.evolve_until(t_sum + dt_dump)
292
293 save_state(i_dump)
294
295 t_sum += dt_dump
296 i_dump += 1
Check regression
301 reference_folder = "reference-files/regression_sph_disc"
302
303 tolerances = [
304 {
305 "vxyz": (1e-15, 1e-15),
306 "hpart": (1e-15, 1e-15),
307 "duint": (1e-14, 1e-14),
308 "axyz": (1e-13, 1e-13),
309 "xyz": (1e-15, 1e-15),
310 "axyz_ext": (1e-14, 1e-14),
311 "uint": (1e-20, 1e-20),
312 },
313 {
314 "vxyz": (1e-14, 1e-14),
315 "hpart": (1e-15, 1e-15),
316 "duint": (1e-14, 1e-14),
317 "axyz": (1e-13, 1e-13),
318 "xyz": (1e-15, 1e-15),
319 "axyz_ext": (1e-14, 1e-14),
320 "uint": (1e-15, 1e-15),
321 },
322 {
323 "vxyz": (1e-14, 1e-14),
324 "hpart": (1e-14, 1e-14),
325 "duint": (1e-13, 1e-13),
326 "axyz": (1e-13, 1e-13),
327 "xyz": (1e-15, 1e-15),
328 "axyz_ext": (1e-14, 1e-14),
329 "uint": (1e-15, 1e-15),
330 },
331 ]
332
333 for iplot in range(i_dump):
334
335 fpath_cur = os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5")
336 fpath_ref = os.path.join(reference_folder, f"{sim_name}_data_{iplot:04}.h5")
337
338 data_dict_cur = load_collected_data(fpath_cur)
339 data_dict_ref = load_collected_data(fpath_ref)
340
341 check_regression(data_dict_ref, data_dict_cur, tolerances[iplot])
Estimated memory usage: 0 MB