Regression test : Godunov soundwave 3D#

This test is used to check that the Godunov soundwave setup is able to reproduce the same results as the reference file.

 9 import os
10
11 import matplotlib.pyplot as plt
12 import numpy as np
13
14 import shamrock
15
16 # If we use the shamrock executable to run this script instead of the python interpreter,
17 # we should not initialize the system as the shamrock executable needs to handle specific MPI logic
18 if not shamrock.sys.is_initialized():
19     shamrock.change_loglevel(1)
20     shamrock.sys.init("0:0")
21
22
23 tmax = 1.0
24 do_plot = True
25
26 multx = 1
27 multy = 1
28 multz = 1
29
30 sz = 1 << 1
31 base = 16
32
33
34 dump_folder = "_to_trash"
35 sim_name = "soundwave_3d_godunov"
36
37
38 # Create the dump directory if it does not exist
39 if shamrock.sys.world_rank() == 0:
40     os.makedirs(dump_folder, exist_ok=True)

Set config

45 ctx = shamrock.Context()
46 ctx.pdata_layout_new()
47
48 model = shamrock.get_Model_Ramses(context=ctx, vector_type="f64_3", grid_repr="i64_3")
49
50 cfg = model.gen_default_config()
51 scale_fact = 1 / (sz * base * multx)
52 cfg.set_scale_factor(scale_fact)
53 cfg.set_eos_gamma(1.4)
54 model.set_solver_config(cfg)

Setup

 58 model.init_scheduler(int(1e3), 1)
 59 model.make_base_grid((0, 0, 0), (sz, sz, sz), (base * multx, base * multy, base * multz))
 60
 61 kx, ky, kz = 2 * np.pi, 0, 0
 62 delta_rho = 0
 63 delta_v = 1e-5
 64
 65
 66 def rho_map(rmin, rmax):
 67     x_min, y_min, z_min = rmin
 68     x_max, y_max, z_max = rmax
 69
 70     x = (x_min + x_max) / 2
 71     y = (y_min + y_max) / 2
 72     z = (z_min + z_max) / 2
 73
 74     # shift to center
 75     x -= 0.5
 76     y -= 0.5
 77     z -= 0.5
 78
 79     x += 0.1  # just for the test to not be centered on the origin
 80     y += 0.2
 81     z -= 0.07
 82
 83     if x**2 + y**2 + z**2 < 0.1:
 84         return 2.0
 85
 86     return 1.0
 87
 88
 89 def rhoe_map(rmin, rmax):
 90     rho = rho_map(rmin, rmax)
 91     return 1.0 * rho
 92
 93
 94 def rhovel_map(rmin, rmax):
 95     rho = rho_map(rmin, rmax)
 96     return (0 * rho, 0 * rho, 0 * rho)
 97
 98
 99 model.set_field_value_lambda_f64("rho", rho_map)
100 model.set_field_value_lambda_f64("rhoetot", rhoe_map)
101 model.set_field_value_lambda_f64_3("rhovel", rhovel_map)

Save state function adapted from https://gist.github.com/gilliss/7e1d451b42441e77ae33a8b790eeeb73

109 def save_collected_data(data_dict, fpath):
110     print(f"Saving data to {fpath}")
111
112     import h5py
113
114     # Open HDF5 file and write in the data_dict structure and info
115     f = h5py.File(fpath, "w")
116     for dset_name in data_dict:
117         dset = f.create_dataset(dset_name, data=data_dict[dset_name])
118     f.close()
119
120
121 def load_collected_data(fpath):
122     print(f"Loading data from {fpath}")
123
124     if not os.path.exists(fpath):
125         print(f"File {fpath} does not exist")
126         raise FileNotFoundError(f"File {fpath} does not exist")
127
128     import h5py
129
130     # Re-open HDF5 file and read out the data_dict structure and info
131     f = h5py.File(fpath, "r")
132
133     data_dict = {}
134     for dset_name in f.keys():
135         data_dict[dset_name] = f[dset_name][:]
136
137     f.close()
138
139     return data_dict
140
141
142 def check_regression(data_dict1, data_dict2, tolerances):
143     # Compare if keys sets match
144     if set(data_dict1.keys()) != set(data_dict2.keys()):
145         print("Data keys sets do not match")
146         raise ValueError(
147             f"Data keys sets do not match: {set(data_dict1.keys())} != {set(data_dict2.keys())}"
148         )
149
150     # Compare if tolerances are defined for all keys
151     if set(tolerances.keys()) != set(data_dict1.keys()):
152         print("Tolerances keys sets do not match")
153         raise ValueError(
154             f"Tolerances keys sets do not match: {set(tolerances.keys())} != {set(data_dict1.keys())}"
155         )
156
157     # Compare if values are equal
158     for dset_name in data_dict1:
159         # Compare same size
160         if data_dict1[dset_name].shape != data_dict2[dset_name].shape:
161             print(f"Data {dset_name} has different shape")
162             print(f"shape: {data_dict1[dset_name].shape} != {data_dict2[dset_name].shape}")
163             raise ValueError(f"Data {dset_name} has different shape")
164
165         # Compare values
166         delta = np.isclose(
167             data_dict1[dset_name],
168             data_dict2[dset_name],
169             rtol=tolerances[dset_name][0],
170             atol=tolerances[dset_name][1],
171         )
172
173         offenses = 0
174
175         for i in range(len(data_dict1[dset_name])):
176             if not np.all(delta[i]):
177                 if False:
178                     print(
179                         f"Data {dset_name} is not equal at index {i}, rtol={tolerances[dset_name][0]}, atol={tolerances[dset_name][1]}"
180                     )
181                     print(f"    value 1: {data_dict1[dset_name][i]}")
182                     print(f"    value 2: {data_dict2[dset_name][i]}")
183                     print(
184                         f"    absolute diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i])}"
185                     )
186                     print(
187                         f"    relative diff: {np.abs(data_dict1[dset_name][i] - data_dict2[dset_name][i]) / data_dict1[dset_name][i]}"
188                     )
189                 offenses += 1
190
191         if offenses > 0:
192             print(
193                 f"Data {dset_name} has {offenses} offenses, absolute diff: {np.abs(data_dict1[dset_name] - data_dict2[dset_name]).max()}"
194             )
195             raise ValueError(f"Data {dset_name} is not equal")
196
197     print(" -> Regression test passed successfully")
198
199
200 def save_state(iplot):
201     data_dict = ctx.collect_data()
202
203     if shamrock.sys.world_rank() == 0:
204         print(f"Saving data to {os.path.join(dump_folder, f'{sim_name}_data_{iplot:04}.h5')}")
205         save_collected_data(data_dict, os.path.join(dump_folder, f"{sim_name}_data_{iplot:04}.h5"))

Running the simulation

211 save_state(0)
212
213 t = [0.01 * i for i in range(100)]
214 # enumerate t
215 for i, t in enumerate(t):
216     model.evolve_until(t)
217     model.dump_vtk(os.path.join(dump_folder, sim_name + "_" + str(i) + ".vtk"))
218
219
220 save_state(1)

Check regression

226 reference_folder = "reference-files/regression_godunov_soundwave_3d"
227
228 tolerances = [
229     {
230         "cell_min": (1e-20, 1e-20),
231         "cell_max": (1e-20, 1e-20),
232         "rho": (1e-20, 1e-20),
233         "rhovel": (1e-20, 1e-20),
234         "rhoetot": (1e-20, 1e-20),
235     },
236     {
237         "cell_min": (1e-20, 1e-20),
238         "cell_max": (1e-20, 1e-20),
239         "rho": (1e-15, 1e-15),
240         "rhovel": (1e-15, 1e-15),
241         "rhoetot": (1e-15, 1e-15),
242     },
243 ]
244
245 for istate in [0, 1]:
246     if shamrock.sys.world_rank() == 0:
247         print(f"Checking regression for state {istate}")
248
249         fpath_cur = os.path.join(dump_folder, f"{sim_name}_data_{istate:04}.h5")
250         fpath_ref = os.path.join(reference_folder, f"{sim_name}_data_{istate:04}.h5")
251
252         data_dict_cur = load_collected_data(fpath_cur)
253         data_dict_ref = load_collected_data(fpath_ref)
254
255         check_regression(data_dict_ref, data_dict_cur, tolerances[istate])

Estimated memory usage: 0 MB

Gallery generated by Sphinx-Gallery