Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 39 additions & 0 deletions examples/csv_output.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# %% [markdown]
# # Output to csv
#
# Routine to read from a PROCESS MFILE and write specified values into a csv.
#
# Input files:
# - MFILE.DAT as output from PROCESS
# - .json variable list as defined by user (defaults to local `mfile_to_csv_vars.json`)
#
# Instructions:
# - from command line: `python mfile_to_csv.py -f </path/to/mfile.dat> -v </path/to/varfile.json>`
# - from this Jupyter notebook: run the cell below
#
# Output file:
# - .csv will be saved to the directory of the input file

# %%
from pathlib import Path
from process.io import mfile_to_csv

# Project directory for example result file and default .json list;
# not needed if you replace both target filepaths below.
proj_dir = Path.cwd().parent

# Replace this path/to/MFILE.DAT with your target file:
mfilename = proj_dir / "examples/csv_output_large_tokamak_MFILE.DAT"

# Either replace this with your own path/to/file.json target,
# or add your required variables into the identified file:
varfilename = proj_dir / "process/io/mfile_to_csv_vars.json"
# This routine attempts to find every variable in the given list and
# writes the variable name, description and value to the output csv.
# Any listed variable that isn't in that MFILE will be skipped.

# call to function:
mfile_to_csv.main(args=["-f", str(mfilename), "-v", str(varfilename)])


# %%
169 changes: 121 additions & 48 deletions examples/examples.ipynb

Large diffs are not rendered by default.

184 changes: 184 additions & 0 deletions examples/examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
# %% [markdown]
# # Process examples
# A Jupyter notebook to demonstrate usage of the `process` package. This notebook has also been exported as a Python script to `examples.py`.
#
# ## Motivation
# Process is moving away from being a runnable package with a single command-line entry-point to an importable package which can be scripted. This notebook is a good way of demonstrating the functionality of the package, and could provide a better way of working for modellers, who may wish to create their own scripts or notebooks for different tasks.
#
# ## Setup
# Currently the various classes and "utilities" scripts in Process have different interfaces and read and write files in differing manners and in locations that can't be easily controlled. To partially avoid the headaches associated with this, the code cell below defines a function to allow each example to be run in a temporary directory, much like a test. Input files are copied to this temporary directory and outputs contained there before the directory is removed.
#
# This temporary directory function is only required for running the examples below and removing any modifications afterwards, not in regular use of Process where the outputs will want to be preserved. Further development work will unify these disparate ways of running Process into a common Pythonic form.

# %%
from pathlib import Path
from tempfile import TemporaryDirectory
from shutil import copy
from process.main import SingleRun
from process.io import plot_proc
from pdf2image import convert_from_path
from process.main import VaryRun
import os
from process.io import plot_scans
from PIL import Image
from IPython.display import display

# Define project root dir; when running a notebook, the cwd is the dir the notebook is in
PROJ_DIR = Path.cwd().parent


def copy_to_temp_dir(input_rel):
"""Copy an input file to a new temp dir and return its new path.

The new TemporaryDirectory object is returned to avoid destruction of the
object, which results in deletion of the directory prematurely. This way
the cleanup() method can be used to delete the directory when required.
:param input_rel: file path relative to project root dir
:type input_rel: str
:return: temporary dir and absolute path to file in temp dir
:rtype: (TemporaryDirectory, pathlib.Path)
"""
# Create temporary dir to contain the run's outputs
temp_dir = TemporaryDirectory()
temp_dir_path = Path(temp_dir.name)

# Define absolute path for input file
input_rel_path = Path(input_rel)
input_abs_path = PROJ_DIR / input_rel_path

try:
assert input_abs_path.exists()
except AssertionError:
raise FileNotFoundError("Input file doesn't exist.")

# Copy input file to temp dir
copy(input_abs_path, temp_dir_path)
temp_input_path = temp_dir_path / input_abs_path.name

return temp_dir, temp_input_path


# %% [markdown]
#
# ## Basic run of Process
# Run Process on an input file using the `SingleRun` class. This outputs an `MFILE.DAT` and an `OUT.DAT`.

# %%


# Define input file name relative to project dir, then copy to temp dir
input_rel = "tests/integration/data/large_tokamak_IN.DAT"
temp_dir, temp_input_path = copy_to_temp_dir(input_rel)

# Run process on an input file in a temporary directory
single_run = SingleRun(str(temp_input_path))
single_run.run()

# %% [markdown]
# ## Plot summary
# Create a summary PDF of the generated `MFILE.DAT` using `plot_proc`.

# %%


# Create a summary PDF
plot_proc.main(args=["-f", str(single_run.mfile_path)])

# Convert PDF to PNG in order to display in notebook
summary_pdf = str(single_run.mfile_path) + "SUMMARY.pdf"
pages = convert_from_path(summary_pdf)
for page_no, page_image in enumerate(pages):
png_path = PROJ_DIR / f"examples/plot_proc_{page_no + 1}.png"
page_image.save(png_path, "PNG")


# %% [markdown]
# `plot_proc`'s PDF output.

# %%
img1 = Image.open("plot_proc_1.png")
display(img1)
img2 = Image.open("plot_proc_2.png")
display(img2)

# %%
# Delete temp dir
temp_dir.cleanup()


# %% [markdown]
# ## View key output variables
# Run the large tokamak scenario using `SingleRun` to set some values on the `CostModel` instance and then print them.

# %%
# Define input file name relative to project dir
input_rel = "tests/integration/data/large_tokamak_IN.DAT"
temp_dir, temp_input_path = copy_to_temp_dir(input_rel)

# Run process on an input file
single_run = SingleRun(str(temp_input_path))
single_run.run()

# %%
# Print some values on the CostModel instance
print(f"Heat transport system: {single_run.models.costs.c226:.3e} M$")
print(f"Electrical plant equipment: {single_run.models.costs.c24:.3e} M$")


# %%
# Clean up
temp_dir.cleanup()


# %% [markdown]
# ## VaryRun
# Vary iteration parameters until a feasible solution is found, using the `VaryRun` class.

# %%


input_rel = "tests/integration/data/run_process.conf"
temp_dir, temp_input_path = copy_to_temp_dir(input_rel)

# .conf file relies on a separate input file too; copy this as well
# TODO This double input file requirement needs to be removed
input_rel_2 = "tests/integration/data/large_tokamak_IN.DAT"
copy(PROJ_DIR / input_rel_2, temp_dir.name)

# VaryRun uses process_config.py, which changes the current working directory
# via os.chdir() to the temporary dir. Apart from being bad practice, once the
# temp dir is removed, this causes Path.cwd() (as used in plot_scans.py) to
# throw an exception when trying to return the (now deleted) CWD. Hence it
# needs to be set back after VaryRun()
# TODO Remove the os.chdir() from VaryRun
cwd = Path.cwd()

vary_run = VaryRun(str(temp_input_path))
os.chdir(cwd)

temp_dir.cleanup()


# %% [markdown]
# ## Plot scan
# Plot a scanned MFILE.

# %%


# Define working directory relative to project dir and input file name
input_rel = "tests/integration/data/scan_MFILE.DAT"
temp_dir, temp_input_path = copy_to_temp_dir(input_rel)

plot_scans.main(
args=[
"-f",
str(temp_input_path),
"-yv",
"thwcndut",
"--outputdir",
str(temp_input_path.parent),
]
)

temp_dir.cleanup()
142 changes: 142 additions & 0 deletions examples/plot_solutions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
# %% [markdown]
# # `plot_solutions` Solution Comparison Tool
#
# This tool plots the solution vectors (i.e. final values of optimisation parameters) for different runs of PROCESS. This allows visual comparisons of different solution points.
#
# It can use different intra-solution optimisation parameter normalisations (e.g. initial value, parameter range) and inter-solution normalisations (e.g. normalise to a certain solution).
#
# ### Known Limitations
#
# - The solution vectors (optimisation parameter values at the solution) currently plotted are normalised to the initial point (from the `IN.DAT`) of each solution: each element of the vector is the $x_{final}/x_{initial}$, the `xcmxxx` values in the `MFILE.DAT`. This allows all optimisation parameters to be plotted on the same axis, showing the relative changes from their initial values across multiple solutions.
# - Solutions being plotted together must also have the same optimisation parameters.
# - The solutions plotted in this example are fictitious.

# %%
from process.io.plot_solutions import RunMetadata, plot_mfile_solutions
from pathlib import Path

# %% [markdown]
# ## Plot single solution
#
# Plot a single solution, showing optimisation parameters normalised to their initial values.

# %%
data_dir = Path("data")
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
]

# Figure and dataframe returned for optional further modification
fig1, df1 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="Large tokamak solution 1",
)
df1

# %% [markdown]
# ## Plot two solutions
#
# Plot two MFILEs together, showing normalised values of the optimisation parameters at the solution points, as well as the objective function values.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
]

fig2, df2 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="2 large tokamak solutions",
)
df2

# %% [markdown]
# ## Plot one solution normalised to another
#
# Normalised differences, relative to the a given solution, can also be plotted:

# %%
fig3, df3 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="Large tokamak 2 solution, relative to large tokamak 1",
normalising_tag="large tokamak 1",
)
df3

# %% [markdown]
# ## Plot multiple solutions normalised by one
#
# Plot two MFILEs, normalised by a third MFILE.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
]

fig4, df4 = plot_mfile_solutions(
runs_metadata,
"2 large tokamak solutions, relative to large tokamak 1",
normalising_tag="large tokamak 1",
)
df4

# %% [markdown]
# ## RMS Errors
#
# Plot RMS errors of multiple solutions relative to a reference solution.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig5, df5 = plot_mfile_solutions(
runs_metadata,
"3 large tokamak solutions with RMS errors normalised to large tokamak 1",
normalising_tag="large tokamak 1",
rmse=True,
)
df5

# %% [markdown]
# ## Solutions normalised by range
#
# Use `nitvar` values instead; the solution optimisation parameters are normalised to the range of their upper and lower bounds.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig6, df6 = plot_mfile_solutions(
runs_metadata,
"4 large tokamak solutions normalised to the range of the optimisation parameters",
normalisation_type="range",
)
df6

# %% [markdown]
# ## Actual values

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig7, df7 = plot_mfile_solutions(
runs_metadata,
"4 large tokamak solutions normalised to the range of the optimisation parameters",
normalisation_type=None,
)
df7
Loading