Regression test : Godunov soundwave 3D#

This test is used to check that the Godunov soundwave setup is able to reproduce the same results as the reference file.

 9 import os
10
11 import matplotlib.pyplot as plt
12 import numpy as np
13
14 import shamrock
15
16 # If we use the shamrock executable to run this script instead of the python interpreter,
17 # we should not initialize the system as the shamrock executable needs to handle specific MPI logic
18 if not shamrock.sys.is_initialized():
19     shamrock.change_loglevel(1)
20     shamrock.sys.init("0:0")
21
22
23 tmax = 1.0
24 do_plot = True
25
26 multx = 1
27 multy = 1
28 multz = 1
29
30 sz = 1 << 1
31 base = 16
32
33
34 dump_folder = "_to_trash"
35 sim_name = "soundwave_3d_godunov"
36
37
38 # Create the dump directory if it does not exist
39 if shamrock.sys.world_rank() == 0:
40     os.makedirs(dump_folder, exist_ok=True)

Set config

45 ctx = shamrock.Context()
46 ctx.pdata_layout_new()
47
48 model = shamrock.get_Model_Ramses(context=ctx, vector_type="f64_3", grid_repr="i64_3")
49
50 cfg = model.gen_default_config()
51 scale_fact = 1 / (sz * base * multx)
52 cfg.set_scale_factor(scale_fact)
53 cfg.set_eos_gamma(1.4)
54 model.set_solver_config(cfg)

Setup

 58 model.init_scheduler(int(1e3), 1)
 59 model.make_base_grid((0, 0, 0), (sz, sz, sz), (base * multx, base * multy, base * multz))
 60
 61 kx, ky, kz = 2 * np.pi, 0, 0
 62 delta_rho = 0
 63 delta_v = 1e-5
 64
 65
 66 def rho_map(rmin, rmax):
 67
 68     x_min, y_min, z_min = rmin
 69     x_max, y_max, z_max = rmax
 70
 71     x = (x_min + x_max) / 2
 72     y = (y_min + y_max) / 2
 73     z = (z_min + z_max) / 2
 74
 75     # shift to center
 76     x -= 0.5
 77     y -= 0.5
 78     z -= 0.5
 79
 80     x += 0.1  # just for the test to not be centered on the origin
 81     y += 0.2
 82     z -= 0.07
 83
 84     if x**2 + y**2 + z**2 < 0.1:
 85         return 2.0
 86
 87     return 1.0
 88
 89
 90 def rhoe_map(rmin, rmax):
 91     rho = rho_map(rmin, rmax)
 92     return 1.0 * rho
 93
 94
 95 def rhovel_map(rmin, rmax):
 96     rho = rho_map(rmin, rmax)
 97     return (0 * rho, 0 * rho, 0 * rho)
 98
 99
100 model.set_field_value_lambda_f64("rho", rho_map)
101 model.set_field_value_lambda_f64("rhoetot", rhoe_map)
102 model.set_field_value_lambda_f64_3("rhovel", rhovel_map)

Save state function adapted from https://gist.github.com/gilliss/7e1d451b42441e77ae33a8b790eeeb73

110 def save_collected_data(data_dict, fpath):
111
112     print(f"Saving data to {fpath}")
113
114     import h5py
115
116     # Open HDF5 file and write in the data_dict structure and info
117     f = h5py.File(fpath, "w")
118     for dset_name in data_dict:
119         dset = f.create_dataset(dset_name, data=data_dict[dset_name])
120     f.close()
121
122
123 def load_collected_data(fpath):
124
125     print(f"Loading data from {fpath}")
126
127     if not os.path.exists(fpath):
128         print(f"File {fpath} does not exist")
129         raise FileNotFoundError(f"File {fpath} does not exist")
130
131     import h5py
132
133     # Re-open HDF5 file and read out the data_dict structure and info
134     f = h5py.File(fpath, "r")
135
136     data_dict = {}
137     for dset_name in f.keys():
138         data_dict[dset_name] = f[dset_name][:]
139
140     f.close()
141
142     return data_dict
143
144
145 def check_regression(data_dict1, data_dict2, tolerances):
146
147     # Compare if keys sets match
148     if set(data_dict1.keys()) != set(data_dict2.keys()):
149         print("Data keys sets do not match")
150         raise ValueError(
151             f"Data keys sets do not match: {set(data_dict1.keys())} != {set(data_dict2.keys())}"
152         )
153
154     # Compare if tolerances are defined for all keys
155     if set(tolerances.keys()) != set(data_dict1.keys()):
156         print("Tolerances keys sets do not match")
157         raise ValueError(
158             f"Tolerances keys sets do not match: {set(tolerances.keys())} != {set(data_dict1.keys())}"
159         )
160
161     # Compare if values are equal
162     for dset_name in data_dict1:
163
164         # Compare same size
165         if data_dict1[dset_name].shape != data_dict2[dset_name].shape:
166             print(f"Data {dset_name} has different shape")
167             print(f"shape: {data_dict1[dset_name].shape} != {data_dict2[dset_name].shape}")
168             raise ValueError(f"Data {dset_name} has different shape")
169
170         # Compare values
171         delta = np.isclose(
172             data_dict1[dset_name],
173             data_dict2[dset_name],
174             rtol=tolerances[dset_name][0],
175             atol=tolerances[dset_name][1],
176         )
177
178         offenses = 0
179
180         for i in range(len(data_dict1[dset_name])):
181             if not np.all(delta[i]):
182                 if False:
183                     print(
184                         f"Data {dset_name} is not equal at index {i}, rtol={tolerances[dset_name][0]}, atol={tolerances[dset_name][1]}"
185                     )
186                     print(f"    value 1: {data_dict1[dset_name][i]}")
187                     print(f"    value 2: {data_dict2[dset_name][i]}")
188                     print(
189                         f"    absolute diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i])}"
190                     )
191                     print(
192                         f"    relative diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i]) / data_dict1[dset_name][i]}"
193                     )
194                 offenses += 1
195
196         if offenses > 0:
197             print(
198                 f"Data {dset_name} has {offenses} offenses, absolute diff: {np.abs(data_dict1[dset_name] - data_dict2[dset_name]).max()}"
199             )
200             raise ValueError(f"Data {dset_name} is not equal")
201
202     print(" -> Regression test passed successfully")
203
204
205 def save_state(iplot):
206     data_dict = ctx.collect_data()
207
208     if shamrock.sys.world_rank() == 0:
209         print(f"Saving data to {os.path.join(dump_folder, f'{sim_name}_data_{iplot:04}.h5')}")
210         save_collected_data(data_dict, os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5"))

Running the simulation

216 save_state(0)
217
218 t = [0.01 * i for i in range(100)]
219 # enumerate t
220 for i, t in enumerate(t):
221     model.evolve_until(t)
222     model.dump_vtk(os.path.join(dump_folder, sim_name + "_" + str(i) + ".vtk"))
223
224
225 save_state(1)

Check regression

231 reference_folder = "reference-files/regression_godunov_soundwave_3d"
232
233 tolerances = [
234     {
235         "cell_min": (1e-20, 1e-20),
236         "cell_max": (1e-20, 1e-20),
237         "rho": (1e-20, 1e-20),
238         "rhovel": (1e-20, 1e-20),
239         "rhoetot": (1e-20, 1e-20),
240     },
241     {
242         "cell_min": (1e-20, 1e-20),
243         "cell_max": (1e-20, 1e-20),
244         "rho": (1e-15, 1e-15),
245         "rhovel": (1e-15, 1e-15),
246         "rhoetot": (1e-15, 1e-15),
247     },
248 ]
249
250 for istate in [0, 1]:
251     if shamrock.sys.world_rank() == 0:
252         print(f"Checking regression for state {istate}")
253
254         fpath_cur = os.path.join(dump_folder, f"{sim_name}_data_{istate:04}.h5")
255         fpath_ref = os.path.join(reference_folder, f"{sim_name}_data_{istate:04}.h5")
256
257         data_dict_cur = load_collected_data(fpath_cur)
258         data_dict_ref = load_collected_data(fpath_ref)
259
260         check_regression(data_dict_ref, data_dict_cur, tolerances[istate])

Estimated memory usage: 0 MB

Gallery generated by Sphinx-Gallery