35 u64 remaining_n = nval_max - nval_current;
36 u64 to_skip = std::min(remaining_n, n);
38 eng_global.discard(to_skip);
39 nval_current += to_skip;
40 if (nval_current == nval_max) {
45 std::vector<u64> next_n_sequential(
u64 val_count) {
51 u64 to_generate = std::min(val_count, nval_max - nval_current);
53 std::vector<u64> ret(to_generate);
54 for (
u64 i = 0; i < to_generate; i++) {
55 ret[i] = eng_global();
58 nval_current += to_generate;
59 if (nval_current == nval_max) {
65 std::vector<u64> next_n_parallel(
u64 val_count) {
71 auto gen_info = shamalgs::collective::fetch_view(val_count);
74 u64 skip_start = gen_info.head_offset;
75 u64 gen_cnt = val_count;
76 u64 skip_end = gen_info.total_byte_count - val_count - gen_info.head_offset;
79 "InvariantParallelGenerator",
85 skip_start + gen_cnt + skip_end);
88 std::vector<u64> ret = next_n_sequential(gen_cnt);
95 : eng_global(eng), nval_max(nval_max), nval_current(0), done(
false) {
119 std::vector<u64>
next_n(
u64 val_count,
bool sequential =
false) {
121 u64 sum_ranks = collective::allreduce_sum<u64>(val_count);
122 return next_n_sequential(sum_ranks);
124 return next_n_parallel(val_count);
133 Engine duplicated_eng = eng_global;
134 u64 check_val = duplicated_eng();
136 std::vector<u64> collected_data{};
139 for (
u64 val : collected_data) {
140 if (val != check_val) {
std::vector< int > vector_allgatherv(const std::vector< T > &send_vec, const MPI_Datatype &send_type, std::vector< T > &recv_vec, const MPI_Datatype &recv_type, const MPI_Comm comm)
allgatherv on vector with size query (size querying variant of vector_allgatherv_ks) //TODO add fault...