diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index a7e503a055..eae79dd28a 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -39,6 +39,9 @@ int main() extents[i] = rc.getExtent(); } + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. iteration.close(); for (size_t i = 0; i < 3; ++i) @@ -55,6 +58,14 @@ int main() } } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; #else std::cout << "The streaming example requires that openPMD has been built " diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index 4cd29c46b2..5d0f688b94 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -53,3 +53,10 @@ print("dim: {}".format(dim)) chunk = loadedChunks[i] print(chunk) + + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() diff --git a/examples/10_streaming_write.cpp b/examples/10_streaming_write.cpp index 1c12e034f1..57bbcb6287 100644 --- a/examples/10_streaming_write.cpp +++ b/examples/10_streaming_write.cpp @@ -45,6 +45,14 @@ int main() iteration.close(); } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; #else std::cout << "The streaming example requires that openPMD has been built " diff --git a/examples/10_streaming_write.py b/examples/10_streaming_write.py index 514b815202..956b683b05 100755 --- a/examples/10_streaming_write.py +++ b/examples/10_streaming_write.py @@ -80,3 +80,10 @@ # If not closing an iteration explicitly, it will be implicitly closed # upon creating the next iteration. iteration.close() + + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() diff --git a/examples/11_particle_dataframe.py b/examples/11_particle_dataframe.py index 9b5e626705..7e0cad065c 100755 --- a/examples/11_particle_dataframe.py +++ b/examples/11_particle_dataframe.py @@ -96,3 +96,5 @@ idx_max * E.grid_spacing + E.grid_global_offset) print("maximum intensity I={} at index={} z={}mu".format( Intensity_max, idx_max, pos_max[2])) + + s.close() diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index 6afcb18fe4..f60746bff9 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -84,6 +84,14 @@ void span_write(std::string const &filename) } iteration.close(); } + + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + */ + series.close(); } int main() diff --git a/examples/12_span_write.py b/examples/12_span_write.py index c776bd04a7..bfe0f69784 100644 --- a/examples/12_span_write.py +++ b/examples/12_span_write.py @@ -27,6 +27,13 @@ def span_write(filename): j += 1 iteration.close() + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() + if __name__ == "__main__": for ext in io.file_extensions: diff --git a/examples/13_write_dynamic_configuration.cpp b/examples/13_write_dynamic_configuration.cpp index 06ef1e8e77..a398eccf27 100644 --- a/examples/13_write_dynamic_configuration.cpp +++ b/examples/13_write_dynamic_configuration.cpp @@ -128,5 +128,13 @@ chunks = "auto" iteration.close(); } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; } diff --git a/examples/13_write_dynamic_configuration.py b/examples/13_write_dynamic_configuration.py index ce96456f03..8670961592 100644 --- a/examples/13_write_dynamic_configuration.py +++ b/examples/13_write_dynamic_configuration.py @@ -146,6 +146,13 @@ def main(): # upon creating the next iteration. iteration.close() + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() + if __name__ == "__main__": main() diff --git a/examples/1_structure.cpp b/examples/1_structure.cpp index dc5056a6c4..fe4381884f 100644 --- a/examples/1_structure.cpp +++ b/examples/1_structure.cpp @@ -39,7 +39,8 @@ int main() * to the openPMD standard. Creation of new elements happens on access * inside the tree-like structure. Required attributes are initialized to * reasonable defaults for every object. */ - ParticleSpecies electrons = series.iterations[1].particles["electrons"]; + ParticleSpecies electrons = + series.writeIterations()[1].particles["electrons"]; /* Data to be moved from memory to persistent storage is structured into * Records, each holding an unbounded number of RecordComponents. If a @@ -59,9 +60,17 @@ int main() electrons["positionOffset"]["x"].resetDataset(dataset); electrons["positionOffset"]["x"].makeConstant(22.0); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[1].close(); + /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2_read_serial.cpp b/examples/2_read_serial.cpp index e944ef12bf..8fb3ccb190 100644 --- a/examples/2_read_serial.cpp +++ b/examples/2_read_serial.cpp @@ -91,7 +91,11 @@ int main() } auto all_data = E_x.loadChunk(); - series.flush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + i.close(); cout << "Full E/x starts with:\n\t{"; for (size_t col = 0; col < extent[1] && col < 5; ++col) cout << all_data.get()[col] << ", "; @@ -103,5 +107,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2_read_serial.py b/examples/2_read_serial.py index d24841775a..87b5568306 100644 --- a/examples/2_read_serial.py +++ b/examples/2_read_serial.py @@ -61,7 +61,11 @@ # print("") all_data = E_x.load_chunk() - series.flush() + + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + i.close() print("Full E/x is of shape {0} and starts with:".format(all_data.shape)) print(all_data[0, 0, :5]) diff --git a/examples/2a_read_thetaMode_serial.cpp b/examples/2a_read_thetaMode_serial.cpp index 8085e242b2..a796e66447 100644 --- a/examples/2a_read_thetaMode_serial.cpp +++ b/examples/2a_read_thetaMode_serial.cpp @@ -69,11 +69,17 @@ int main() // toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) // series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + i.close(); + /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2a_read_thetaMode_serial.py b/examples/2a_read_thetaMode_serial.py index 907f6634aa..07021c1f36 100644 --- a/examples/2a_read_thetaMode_serial.py +++ b/examples/2a_read_thetaMode_serial.py @@ -51,6 +51,13 @@ # E_z_yz = toCartesianSliceYZ(E_z_modes)[:, :] # (y, z) # series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + i.close() + # The files in 'series' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. # One can close the object explicitly to trigger this. diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index 155425eaaa..a66db6c080 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) cout << "Created an empty " << series.iterationEncoding() << " Series\n"; MeshRecordComponent rho = - series.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; + series.writeIterations()[1].meshes["rho"][MeshRecordComponent::SCALAR]; cout << "Created a scalar mesh Record with all required openPMD " "attributes\n"; @@ -67,7 +67,11 @@ int main(int argc, char *argv[]) cout << "Stored the whole Dataset contents as a single chunk, " "ready to write content\n"; - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[1].close(); + cout << "Dataset content has been fully written\n"; /* The files in 'series' are still open until the object is destroyed, on @@ -76,5 +80,6 @@ int main(int argc, char *argv[]) * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py index 320acd027e..8e136f9512 100644 --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -28,7 +28,7 @@ print("Created an empty {0} Series".format(series.iteration_encoding)) print(len(series.iterations)) - rho = series.iterations[1]. \ + rho = series.write_iterations()[1]. \ meshes["rho"][io.Mesh_Record_Component.SCALAR] dataset = io.Dataset(data.dtype, data.shape) @@ -47,7 +47,10 @@ print("Stored the whole Dataset contents as a single chunk, " + "ready to write content") - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.write_iterations()[1].close() print("Dataset content has been fully written") # The files in 'series' are still open until the series is closed, at which diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index 56fd703799..9367e43f70 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -51,7 +51,7 @@ int main() geos << "m=" << num_modes << ";imag=+"; std::string const geometryParameters = geos.str(); - Mesh E = series.iterations[0].meshes["E"]; + Mesh E = series.writeIterations()[0].meshes["E"]; E.setGeometry(Mesh::Geometry::thetaMode); E.setGeometryParameters(geometryParameters); E.setDataOrder(Mesh::DataOrder::C); @@ -84,7 +84,10 @@ int main() E_t.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[0].close(); /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. @@ -92,5 +95,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py index e5c4419505..ec81435558 100644 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -30,7 +30,7 @@ geometry_parameters = "m={0};imag=+".format(num_modes) - E = series.iterations[0].meshes["E"] + E = series.write_iterations()[0].meshes["E"] E.geometry = io.Geometry.thetaMode E.geometry_parameters = geometry_parameters E.grid_spacing = [1.0, 1.0] @@ -62,7 +62,10 @@ E_t.reset_dataset(io.Dataset(E_t_data.dtype, E_t_data.shape)) E_t.store_chunk(E_t_data) - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.write_iterations()[0].close() # The files in 'series' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index 7cd424ee2a..d4be87a0fc 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -32,7 +32,8 @@ int main() Series series = Series("../samples/3b_write_resizable_particles.h5", Access::CREATE); - ParticleSpecies electrons = series.iterations[0].particles["electrons"]; + ParticleSpecies electrons = + series.writeIterations()[0].particles["electrons"]; // our initial data to write std::vector x{0., 1., 2., 3., 4.}; @@ -78,8 +79,14 @@ int main() rc_xo.resetDataset(dataset); rc_yo.resetDataset(dataset); - // after this call, the provided data buffers can be used again or deleted - series.flush(); + // Attributable::seriesFlush() can be used alternatively if the Series + // handle is not currently in scope + rc_yo.seriesFlush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[0].close(); // rinse and repeat as needed :) @@ -89,5 +96,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py index 227ce06977..440fac7de6 100644 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -60,8 +60,11 @@ rc_xo.reset_dataset(dataset) rc_yo.reset_dataset(dataset) + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. # after this call, the provided data buffers can be used again or deleted - series.flush() + series.write_iterations()[0].close() # rinse and repeat as needed :) diff --git a/examples/4_read_parallel.cpp b/examples/4_read_parallel.cpp index 75f19f4be1..477177cec6 100644 --- a/examples/4_read_parallel.cpp +++ b/examples/4_read_parallel.cpp @@ -55,7 +55,11 @@ int main(int argc, char *argv[]) cout << "Queued the loading of a single chunk per MPI rank from " "disk, " "ready to execute\n"; - series.flush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.iterations[100].close(); if (0 == mpi_rank) cout << "Chunks have been read from disk\n"; @@ -78,6 +82,12 @@ int main(int argc, char *argv[]) // this barrier is not necessary but structures the example output MPI_Barrier(MPI_COMM_WORLD); } + // The files in 'series' are still open until the series is closed, at which + // time it cleanly flushes and closes all open file handles. + // One can close the object explicitly to trigger this. + // Alternatively, this will automatically happen once the garbage collector + // claims (every copy of) the series object. + // In any case, this must happen before MPI_Finalize() is called series.close(); // openPMD::Series MUST be destructed or closed at this point diff --git a/examples/4_read_parallel.py b/examples/4_read_parallel.py index f30d6ffa2d..b36625798f 100644 --- a/examples/4_read_parallel.py +++ b/examples/4_read_parallel.py @@ -36,7 +36,11 @@ if 0 == comm.rank: print("Queued the loading of a single chunk per MPI rank from disk, " "ready to execute") - series.flush() + + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.iterations[100].close() if 0 == comm.rank: print("Chunks have been read from disk") diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index 666de4a3cd..bfe737d9be 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -54,6 +54,10 @@ int main(int argc, char *argv[]) cout << "Created an empty series in parallel with " << mpi_size << " MPI ranks\n"; + // In parallel contexts, it's important to explicitly open iterations. + // This is done automatically when using `Series::writeIterations()`, + // or in read mode `Series::readIterations()`. + series.iterations[1].open(); MeshRecordComponent mymesh = series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; @@ -80,10 +84,20 @@ int main(int argc, char *argv[]) "contribution, " "ready to write content to disk\n"; - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.iterations[100].close(); + if (0 == mpi_rank) cout << "Dataset content has been fully written to disk\n"; + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ series.close(); // openPMD::Series MUST be destructed or closed at this point diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py index d925251834..c956b6eed1 100644 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -37,6 +37,10 @@ print("Created an empty series in parallel with {} MPI ranks".format( comm.size)) + # In parallel contexts, it's important to explicitly open iterations. + # This is done automatically when using `Series.write_iterations()`, + # or in read mode `Series.read_iterations()`. + series.iterations[1].open() mymesh = series.iterations[1]. \ meshes["mymesh"][io.Mesh_Record_Component.SCALAR] @@ -59,7 +63,11 @@ print("Registered a single chunk per MPI rank containing its " "contribution, ready to write content to disk") - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.iterations[1].close() + if 0 == comm.rank: print("Dataset content has been fully written to disk") diff --git a/examples/6_dump_filebased_series.cpp b/examples/6_dump_filebased_series.cpp index 1b2964a5d4..7e67233f15 100644 --- a/examples/6_dump_filebased_series.cpp +++ b/examples/6_dump_filebased_series.cpp @@ -32,43 +32,56 @@ int main() std::cout << '\n'; std::cout << "Read iterations in basePath:\n"; + /* + * A classical loop over the C++-style container + * Direct access to o.iterations allows random-access into all data. + */ for (auto const &i : o.iterations) std::cout << '\t' << i.first << '\n'; std::cout << '\n'; - for (auto const &i : o.iterations) + /* + * A loop that uses o.readIterations(). + * This loop is MPI collective and will open and close iterations + * automatically (closing manually is still recommended before long compute + * operations in order to release data as soon as possible). + * An iteration once closed can not (yet) be re-opened. + */ + for (auto i : o.readIterations()) { - std::cout << "Read attributes in iteration " << i.first << ":\n"; - for (auto const &val : i.second.attributes()) + std::cout << "Read attributes in iteration " << i.iterationIndex + << ":\n"; + for (auto const &val : i.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << i.first << ".time - " << i.second.time() << '\n' - << i.first << ".dt - " << i.second.dt() << '\n' - << i.first << ".timeUnitSI - " << i.second.timeUnitSI() + std::cout << i.iterationIndex << ".time - " << i.time() << '\n' + << i.iterationIndex << ".dt - " << i.dt() << '\n' + << i.iterationIndex << ".timeUnitSI - " << i.timeUnitSI() << '\n' << '\n'; - std::cout << "Read attributes in meshesPath in iteration " << i.first - << ":\n"; - for (auto const &a : i.second.meshes.attributes()) + std::cout << "Read attributes in meshesPath in iteration " + << i.iterationIndex << ":\n"; + for (auto const &a : i.meshes.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; - std::cout << "Read meshes in iteration " << i.first << ":\n"; - for (auto const &m : i.second.meshes) + std::cout << "Read meshes in iteration " << i.iterationIndex << ":\n"; + for (auto const &m : i.meshes) std::cout << '\t' << m.first << '\n'; std::cout << '\n'; - for (auto const &m : i.second.meshes) + for (auto const &m : i.meshes) { std::cout << "Read attributes for mesh " << m.first - << " in iteration " << i.first << ":\n"; + << " in iteration " << i.iterationIndex << ":\n"; for (auto const &val : m.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string meshPrefix = std::to_string(i.first) + '.' + m.first; + std::string meshPrefix = + std::to_string(i.iterationIndex) + '.' + m.first; std::string axisLabels = ""; for (auto const &val : m.second.axisLabels()) axisLabels += val + ", "; @@ -110,8 +123,8 @@ int main() std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string componentPrefix = - std::to_string(i.first) + '.' + m.first + '.' + rc.first; + std::string componentPrefix = std::to_string(i.iterationIndex) + + '.' + m.first + '.' + rc.first; std::string position = ""; for (auto const &val : rc.second.position()) position += std::to_string(val) + ", "; @@ -123,27 +136,29 @@ int main() } } - std::cout << "Read attributes in particlesPath in iteration " << i.first - << ":\n"; - for (auto const &a : i.second.particles.attributes()) + std::cout << "Read attributes in particlesPath in iteration " + << i.iterationIndex << ":\n"; + for (auto const &a : i.particles.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; - std::cout << "Read particleSpecies in iteration " << i.first << ":\n"; - for (auto const &val : i.second.particles) + std::cout << "Read particleSpecies in iteration " << i.iterationIndex + << ":\n"; + for (auto const &val : i.particles) std::cout << '\t' << val.first << '\n'; std::cout << '\n'; - for (auto const &p : i.second.particles) + for (auto const &p : i.particles) { std::cout << "Read attributes for particle species " << p.first - << " in iteration " << i.first << ":\n"; + << " in iteration " << i.iterationIndex << ":\n"; for (auto const &val : p.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; std::cout << "Read particle records for particle species " - << p.first << " in iteration " << i.first << ":\n"; + << p.first << " in iteration " << i.iterationIndex + << ":\n"; for (auto const &r : p.second) std::cout << '\t' << r.first << '\n'; std::cout << '\n'; @@ -167,6 +182,13 @@ int main() } } } + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + // Since we're using `Series::readIterations()`, this would also happen + // automatically upon the next iteration. + i.close(); } /* The files in 'o' are still open until the object is destroyed, on diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index da866eac65..bfb64e1fff 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -226,6 +226,11 @@ int main() // constant records mesh["y"].makeConstant(constant_value); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + cur_it.close(); + /* The files in 'f' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is @@ -233,6 +238,7 @@ int main() * effect as calling the destructor, including the release of file * handles. */ + f.close(); } // namespace ; return 0; diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index 884311f92d..84ca5002db 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -206,6 +206,11 @@ # constant records mesh["y"].make_constant(constant_value) + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + cur_it.close() + # The files in 'f' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. # One can close the object explicitly to trigger this. diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py index 5dc842918e..aebd266528 100644 --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -68,5 +68,10 @@ # files) f.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + cur_it.close() + # now the file is closed f.close() diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 134abe0519..3099af7025 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -44,9 +44,16 @@ class Series; * not possible once it has been closed. * */ + +namespace internal +{ + class SeriesData; +} + class WriteIterations { friend class Series; + friend class internal::SeriesData; private: using IterationsContainer_t = @@ -62,6 +69,7 @@ class WriteIterations struct SharedResources { IterationsContainer_t iterations; + //! Index of the last opened iteration std::optional currentlyOpen; SharedResources(IterationsContainer_t); @@ -70,8 +78,11 @@ class WriteIterations WriteIterations(IterationsContainer_t); explicit WriteIterations() = default; - //! Index of the last opened iteration - std::shared_ptr shared; + // std::optional so that a single instance is able to close this without + // needing to wait for all instances to deallocate + std::shared_ptr> shared; + + void close(); public: mapped_type &operator[](key_type const &key); diff --git a/src/Series.cpp b/src/Series.cpp index 6fe3f53d02..de667d41fe 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -2235,7 +2235,10 @@ namespace internal void SeriesData::close() { // WriteIterations gets the first shot at flushing - this->m_writeIterations = std::optional(); + if (this->m_writeIterations.has_value()) + { + this->m_writeIterations.value().close(); + } /* * Scenario: A user calls `Series::flush()` but does not check for * thrown exceptions. The exception will propagate further up, @@ -2251,10 +2254,6 @@ namespace internal impl.flush(); impl.flushStep(/* doFlush = */ true); } - if (m_writeIterations.has_value()) - { - m_writeIterations = std::optional(); - } // Not strictly necessary, but clear the map of iterations // This releases the openPMD hierarchy iterations.container().clear(); diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 872342dfbe..2bc34f0416 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -20,6 +20,7 @@ */ #include "openPMD/WriteIterations.hpp" +#include "openPMD/Error.hpp" #include "openPMD/Series.hpp" @@ -45,9 +46,15 @@ WriteIterations::SharedResources::~SharedResources() } WriteIterations::WriteIterations(IterationsContainer_t iterations) - : shared{std::make_shared(std::move(iterations))} + : shared{std::make_shared>( + std::move(iterations))} {} +void WriteIterations::close() +{ + *shared = std::nullopt; +} + WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) { // make a copy @@ -56,17 +63,23 @@ WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) } WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) { - if (shared->currentlyOpen.has_value()) + if (!shared || !shared->has_value()) + { + throw error::WrongAPIUsage( + "[WriteIterations] Trying to access after closing Series."); + } + auto &s = shared->value(); + if (s.currentlyOpen.has_value()) { - auto lastIterationIndex = shared->currentlyOpen.value(); - auto &lastIteration = shared->iterations.at(lastIterationIndex); + auto lastIterationIndex = s.currentlyOpen.value(); + auto &lastIteration = s.iterations.at(lastIterationIndex); if (lastIterationIndex != key && !lastIteration.closed()) { lastIteration.close(); } } - shared->currentlyOpen = key; - auto &res = shared->iterations[std::move(key)]; + s.currentlyOpen = key; + auto &res = s.iterations[std::move(key)]; if (res.getStepStatus() == StepStatus::NoStep) { res.beginStep(/* reread = */ false); diff --git a/src/binding/python/Attributable.cpp b/src/binding/python/Attributable.cpp index b60bd8cb48..61f1376b94 100644 --- a/src/binding/python/Attributable.cpp +++ b/src/binding/python/Attributable.cpp @@ -380,7 +380,7 @@ void init_Attributable(py::module &m) "attributes", [](Attributable &attr) { return attr.attributes(); }, // ref + keepalive - py::return_value_policy::reference_internal) + py::return_value_policy::move) // C++ pass-through API: Setter // note that the order of overloads is important! diff --git a/src/binding/python/Container.cpp b/src/binding/python/Container.cpp index 137260d9f1..28bda651ff 100644 --- a/src/binding/python/Container.cpp +++ b/src/binding/python/Container.cpp @@ -110,8 +110,10 @@ bind_container(py::handle scope, std::string const &name, Args &&...args) cl.def( "__getitem__", [](Map &m, KeyType const &k) -> MappedType & { return m[k]; }, - // ref + keepalive - py::return_value_policy::reference_internal); + // copy + keepalive + // All objects in the openPMD object model are handles, so using a copy + // is safer and still performant. + py::return_value_policy::copy); // Assignment provided only if the type is copyable py::detail::map_assignment(cl); diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index 98f0f7c87f..0ac290f7ff 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -74,13 +74,13 @@ void init_Iteration(py::module &m) .def_readwrite( "meshes", &Iteration::meshes, - py::return_value_policy::reference, + py::return_value_policy::copy, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()) .def_readwrite( "particles", &Iteration::particles, - py::return_value_policy::reference, + py::return_value_policy::copy, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()); } diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index 3dbaaa034c..cdff83fd43 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -61,8 +61,8 @@ void init_Series(py::module &m) [](WriteIterations writeIterations, Series::IterationIndex_t key) { return writeIterations[key]; }, - // keep container alive while iterator exists - py::keep_alive<0, 1>()); + // copy + keepalive + py::return_value_policy::copy); py::class_(m, "IndexedIteration") .def_readonly("iteration_index", &IndexedIteration::iterationIndex); py::class_(m, "ReadIterations") @@ -224,6 +224,11 @@ this method. .def_readwrite( "iterations", &Series::iterations, + /* + * Need to keep reference return policy here for now to further + * support legacy `del series` workflows that works despite children + * still being alive. + */ py::return_value_policy::reference, // garbage collection: return value must be freed before Series py::keep_alive<1, 0>())