Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,801 changes: 1,801 additions & 0 deletions examples/data/large_tokamak_1_MFILE.DAT

Large diffs are not rendered by default.

1,801 changes: 1,801 additions & 0 deletions examples/data/large_tokamak_2_MFILE.DAT

Large diffs are not rendered by default.

1,801 changes: 1,801 additions & 0 deletions examples/data/large_tokamak_3_MFILE.DAT

Large diffs are not rendered by default.

1,801 changes: 1,801 additions & 0 deletions examples/data/large_tokamak_4_MFILE.DAT

Large diffs are not rendered by default.

1,356 changes: 1,356 additions & 0 deletions examples/plot_solutions.ipynb

Large diffs are not rendered by default.

142 changes: 142 additions & 0 deletions examples/plot_solutions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
# %% [markdown]
# # `plot_solutions` Solution Comparison Tool
#
# This tool plots the solution vectors (i.e. final values of optimisation parameters) for different runs of PROCESS. This allows visual comparisons of different solution points.
#
# It can use different intra-solution optimisation parameter normalisations (e.g. initial value, parameter range) and inter-solution normalisations (e.g. normalise to a certain solution).
#
# ### Known Limitations
#
# - The solution vectors (optimisation parameter values at the solution) currently plotted are normalised to the initial point (from the `IN.DAT`) of each solution: each element of the vector is the $x_{final}/x_{initial}$, the `xcmxxx` values in the `MFILE.DAT`. This allows all optimisation parameters to be plotted on the same axis, showing the relative changes from their initial values across multiple solutions.
# - Solutions being plotted together must also have the same optimisation parameters.
# - The solutions plotted in this example are fictitious.

# %%
from process.io.plot_solutions import RunMetadata, plot_mfile_solutions
from pathlib import Path

# %% [markdown]
# ## Plot single solution
#
# Plot a single solution, showing optimisation parameters normalised to their initial values.

# %%
data_dir = Path("data")
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
]

# Figure and dataframe returned for optional further modification
fig1, df1 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="Large tokamak solution 1",
)
df1

# %% [markdown]
# ## Plot two solutions
#
# Plot two MFILEs together, showing normalised values of the optimisation parameters at the solution points, as well as the objective function values.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
]

fig2, df2 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="2 large tokamak solutions",
)
df2

# %% [markdown]
# ## Plot one solution normalised to another
#
# Normalised differences, relative to the a given solution, can also be plotted:

# %%
fig3, df3 = plot_mfile_solutions(
runs_metadata=runs_metadata,
plot_title="Large tokamak 2 solution, relative to large tokamak 1",
normalising_tag="large tokamak 1",
)
df3

# %% [markdown]
# ## Plot multiple solutions normalised by one
#
# Plot two MFILEs, normalised by a third MFILE.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
]

fig4, df4 = plot_mfile_solutions(
runs_metadata,
"2 large tokamak solutions, relative to large tokamak 1",
normalising_tag="large tokamak 1",
)
df4

# %% [markdown]
# ## RMS Errors
#
# Plot RMS errors of multiple solutions relative to a reference solution.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig5, df5 = plot_mfile_solutions(
runs_metadata,
"3 large tokamak solutions with RMS errors normalised to large tokamak 1",
normalising_tag="large tokamak 1",
rmse=True,
)
df5

# %% [markdown]
# ## Solutions normalised by range
#
# Use `nitvar` values instead; the solution optimisation parameters are normalised to the range of their upper and lower bounds.

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig6, df6 = plot_mfile_solutions(
runs_metadata,
"4 large tokamak solutions normalised to the range of the optimisation parameters",
normalisation_type="range",
)
df6

# %% [markdown]
# ## Actual values

# %%
runs_metadata = [
RunMetadata(data_dir / "large_tokamak_1_MFILE.DAT", "large tokamak 1"),
RunMetadata(data_dir / "large_tokamak_2_MFILE.DAT", "large tokamak 2"),
RunMetadata(data_dir / "large_tokamak_3_MFILE.DAT", "large tokamak 3"),
RunMetadata(data_dir / "large_tokamak_4_MFILE.DAT", "large tokamak 4"),
]

fig7, df7 = plot_mfile_solutions(
runs_metadata,
"4 large tokamak solutions normalised to the range of the optimisation parameters",
normalisation_type=None,
)
df7
46 changes: 27 additions & 19 deletions process/io/mfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@

from collections import OrderedDict
import logging
from sys import stderr
import json
from typing import List, Union

LOG = logging.getLogger("mfile")
logger = logging.getLogger(__name__)


class MFileVariable(dict):
Expand Down Expand Up @@ -60,7 +60,7 @@ def __init__(
self.var_flag = var_flag
self.latest_scan = 0
super().__init__(*args, **kwargs)
LOG.debug(
logger.debug(
"Initialising variable '{}': {}".format(self.var_name, self.var_description)
)

Expand All @@ -85,7 +85,7 @@ def set_scan(self, scan_number, scan_value):
self["scan{:02}".format(scan_number)] = scan_value
if scan_number > self.latest_scan:
self.latest_scan = scan_number
LOG.debug(
logger.debug(
"Scan {} for variable '{}' == {}".format(
scan_number, self.var_name, scan_value
)
Expand Down Expand Up @@ -146,7 +146,7 @@ def __init__(self, item):
self.get_number_of_scans = self.get_error

def get_error(self, *args, **kwargs):
LOG.error("Key '{}' not in MFILE. KeyError! Check MFILE".format(self.item))
logger.error("Key '{}' not in MFILE. KeyError! Check MFILE".format(self.item))

if self.item == "error_status":
# Missing error_status key means Process exited prematurely, usually
Expand Down Expand Up @@ -230,7 +230,7 @@ def __repr__(self):
class MFile(object):
def __init__(self, filename="MFILE.DAT"):
"""Class object to store the MFile Objects"""
LOG.info("Creating MFile class for file '{}'".format(filename))
logger.info("Creating MFile class for file '{}'".format(filename))
self.filename = filename
# self.data = MFileDataDictionary()
# self.data = OrderedDict()
Expand All @@ -241,9 +241,9 @@ def __init__(self, filename="MFILE.DAT"):
self.mfile_modules["Misc"] = list()
self.current_module = "Misc"
if filename is not None:
LOG.info("Opening file '{}'".format(self.filename))
logger.info("Opening file '{}'".format(self.filename))
self.open_mfile()
LOG.info("Parsing file '{}'".format(self.filename))
logger.info("Parsing file '{}'".format(self.filename))
self.parse_mfile()

def open_mfile(self):
Expand Down Expand Up @@ -296,7 +296,8 @@ class or create a new class if it is the first instance of it.
if "runtitle" in var_name:
var_value = " ".join(line[2:])
else:
var_value = sort_value(line[2])
# Pass all value "words"
var_value = sort_value(line[2:])
var_unit = get_unit(var_des)
if len(line) >= 4:
var_flag = line[3]
Expand Down Expand Up @@ -374,18 +375,25 @@ def write_to_json(self, keys_to_write={}, scan=-1, verbose=False):
json.dump(dict_to_write, fp, indent=4)


def sort_value(val):
"""Function to sort out value line in MFILE."""
if '"' in val:
return str(val.strip('"'))
def sort_value(value_words: List[str]) -> Union[str, float]:
"""Parse value section of a line in MFILE.

value_words is a list of strings, which is then parsed.
:param value_words: value of var in MFILE as list of strings
:type value_words: List[str]
:return: string or float representation of value list
:rtype: Union[str, float]
"""
if '"' in value_words[0]:
# First "word" begins with ": return words as single str
return " ".join(value_words).strip().strip('"').strip()
else:
try:
return float(val)
except ValueError as err:
print("Please fix this!", file=stderr)
print(err, file=stderr)
print(val)
exit()
# Attempt float conversion of first word
return float(value_words[0])
except ValueError:
logger.exception(f"Can't parse value in MFILE: {value_words}")
raise


def sort_brackets(var):
Expand Down
Loading