diff --git a/.gitignore b/.gitignore index 463236ba..d22cb1ca 100644 --- a/.gitignore +++ b/.gitignore @@ -171,3 +171,5 @@ logs/* .ruff_cache/ engibench_studies/* +workshops/dcc26/artifacts/* +workshops/dcc26/optional_artifacts/* diff --git a/README.md b/README.md index 6b828b5a..9ee93ab0 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,21 @@ We have some colab notebooks that show how to use some of the EngiBench/EngiOpt * [Example easy model (GAN)](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/main/example_easy_model.ipynb) * [Example hard model (Diffusion)](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/main/example_hard_model.ipynb) +## Workshop notebooks + +For the DCC'26 workshop notebook landing page and Colab links, see: + +- `workshops/dcc26/README.md` + +The main live-workshop flow is in: + +- `workshops/dcc26/simple/` + +Extra fill-in exercises and facilitator solutions are in: + +- `workshops/dcc26/participant/` +- `workshops/dcc26/solutions/` + ## Citing diff --git a/engiopt/workshops/__init__.py b/engiopt/workshops/__init__.py new file mode 100644 index 00000000..e0e716fa --- /dev/null +++ b/engiopt/workshops/__init__.py @@ -0,0 +1 @@ +"""Workshop helpers and assets shipped with EngiOpt.""" diff --git a/engiopt/workshops/dcc26/__init__.py b/engiopt/workshops/dcc26/__init__.py new file mode 100644 index 00000000..16345743 --- /dev/null +++ b/engiopt/workshops/dcc26/__init__.py @@ -0,0 +1,3 @@ +"""DCC26 workshop utilities packaged with EngiOpt.""" + +from .notebook_helpers import * # noqa: F403 diff --git a/engiopt/workshops/dcc26/notebook_helpers.py b/engiopt/workshops/dcc26/notebook_helpers.py new file mode 100644 index 00000000..8eb37bf0 --- /dev/null +++ b/engiopt/workshops/dcc26/notebook_helpers.py @@ -0,0 +1,1162 @@ +"""Helper utilities for DCC26 workshop notebooks. + +All visualization and boilerplate code lives here so that notebook cells +stay short and import a stable packaged module:: + + from engiopt.workshops.dcc26.notebook_helpers import * +""" + +from __future__ import annotations + +from dataclasses import dataclass +import json +import os +import random +from typing import Any + +from IPython.display import display as ipy_display +import matplotlib.pyplot as plt +import numpy as np +from scipy.spatial.distance import cdist +import torch as th +from torch.utils.data import DataLoader +from torch.utils.data import TensorDataset + +try: + import ipywidgets as widgets +except ImportError: + widgets = None + +MIN_SHAPE_DIMS = 2 +MIN_PAIRWISE_COUNT = 2 + +# --------------------------------------------------------------------------- +# Reproducibility +# --------------------------------------------------------------------------- + + +def set_global_seed(seed: int) -> None: + """Set seeds for reproducibility across numpy, python, and torch.""" + random.seed(seed) + th.manual_seed(seed) + if th.cuda.is_available(): + th.cuda.manual_seed_all(seed) + th.backends.cudnn.deterministic = True + th.backends.cudnn.benchmark = False + + +def pick_device() -> th.device: + """Pick the best available torch device.""" + if th.backends.mps.is_available(): + return th.device("mps") + if th.cuda.is_available(): + return th.device("cuda") + return th.device("cpu") + + +# --------------------------------------------------------------------------- +# File I/O +# --------------------------------------------------------------------------- + + +def ensure_dir(path: str) -> str: + """Create a directory if needed and return the path.""" + os.makedirs(path, exist_ok=True) + return path + + +def save_json(data: Any, path: str) -> None: + """Serialize JSON-compatible data to disk.""" + with open(path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2) + + +def load_json(path: str) -> Any: + """Load JSON data from disk.""" + with open(path, encoding="utf-8") as f: + return json.load(f) + + +# --------------------------------------------------------------------------- +# Condition introspection +# --------------------------------------------------------------------------- + + +def _is_array_condition(dataset_train, key: str, n_check: int = 3) -> bool: + """Heuristic: is this condition key an array (image) rather than a scalar?""" + for i in range(min(n_check, len(dataset_train[key]))): + val = dataset_train[key][i] + arr = np.asarray(val) + if arr.ndim >= 1 and arr.size > 1: + return True + return False + + +def _split_condition_keys(dataset_train, problem) -> tuple[list[str], list[str]]: + """Split conditions into (scalar_keys, array_keys).""" + scalar, array = [], [] + for k in problem.conditions_keys: + if _is_array_condition(dataset_train, k): + array.append(k) + else: + scalar.append(k) + return scalar, array + + +# --------------------------------------------------------------------------- +# NB00 visualizations — uses problem.render() for everything +# --------------------------------------------------------------------------- + + +def show_design_gallery( + dataset, + problem, + n: int = 8, + seed: int = 7, +) -> None: + """Show a grid of random training designs. + + Renders designs directly with imshow in a single figure to avoid + duplicate-display issues with Jupyter's inline backend. + """ + rng = np.random.default_rng(seed) + train = dataset["train"] + n_total = len(train["optimal_design"]) + n = min(n, n_total) + ids = rng.choice(n_total, size=n, replace=False) + scalar_keys, _ = _split_condition_keys(train, problem) + + ncols = min(4, n) + nrows = (n + ncols - 1) // ncols + design_shape = problem.design_space.shape + aspect = design_shape[1] / design_shape[0] if len(design_shape) >= MIN_SHAPE_DIMS else 1.0 + fig, axes = plt.subplots(nrows, ncols, figsize=(3.5 * ncols * aspect, 3.5 * nrows)) + axes = np.atleast_2d(axes) + + for i, idx in enumerate(ids): + ax = axes[i // ncols, i % ncols] + d = np.array(train["optimal_design"][int(idx)]) + ax.imshow(d, cmap="gray_r", vmin=0, vmax=1) + ax.axis("off") + if scalar_keys: + cond_str = "\n".join(f"{k}={float(train[k][int(idx)]):.2f}" for k in scalar_keys) + ax.set_title(cond_str, fontsize=8) + + for i in range(n, nrows * ncols): + axes[i // ncols, i % ncols].axis("off") + + fig.suptitle(f"Random training designs ({n} samples)", fontsize=13, y=1.01) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_condition_distributions(dataset, problem) -> None: + """Histogram of each scalar condition key across the training set.""" + train = dataset["train"] + scalar_keys, _ = _split_condition_keys(train, problem) + + if not scalar_keys: + print("This problem has no scalar conditions to plot.") + return + + fig, axes = plt.subplots(1, len(scalar_keys), figsize=(5 * len(scalar_keys), 3.5)) + if len(scalar_keys) == 1: + axes = [axes] + + for ax, key in zip(axes, scalar_keys): + values = np.array([float(v) for v in train[key]]) + ax.hist(values, bins=30, edgecolor="white", color="steelblue") + ax.set_xlabel(key, fontsize=11) + ax.set_ylabel("count") + ax.set_title(f"Distribution of '{key}'") + + plt.tight_layout() + plt.show() + plt.close(fig) + + +def show_valid_vs_violated( + design, + violations, + valid_violations, + *, + problem=None, + cmap: str = "gray_r", +) -> None: + """Side-by-side rendering: valid config vs violated config. + + Uses ``problem.render()`` if provided, otherwise falls back to imshow. + """ + if problem is not None: + # Render via problem.render() for universal support + result1 = problem.render(design) + result2 = problem.render(design) + + fig1 = result1[0] if isinstance(result1, tuple) else result1 + fig2 = result2[0] if isinstance(result2, tuple) else result2 + + if hasattr(fig1, "savefig"): + n_valid = len(valid_violations) + fig1.suptitle( + f"Valid config — {n_valid} violation(s)", + color="green" if n_valid == 0 else "red", + fontsize=12, + ) + plt.show() + plt.close(fig1) + + if hasattr(fig2, "savefig"): + n_bad = len(violations) + fig2.suptitle( + f"Bad config — {n_bad} violation(s)", + color="red" if n_bad > 0 else "green", + fontsize=12, + ) + plt.show() + plt.close(fig2) + else: + # Fallback: simple imshow for 2D + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4)) + d = np.array(design) + ax1.imshow(d, cmap=cmap, vmin=0, vmax=1) + ax1.set_title( + f"Valid — {len(valid_violations)} violation(s)", color="green" if len(valid_violations) == 0 else "red" + ) + ax1.axis("off") + ax2.imshow(d, cmap=cmap, vmin=0, vmax=1) + ax2.set_title(f"Bad — {len(violations)} violation(s)", color="red" if len(violations) > 0 else "green") + ax2.axis("off") + fig.suptitle("Same design, different conditions", fontsize=13) + plt.tight_layout() + plt.show() + plt.close(fig) + + +def _build_scalar_condition_sliders( + scalar_keys: list[str], + scalar_conds: dict[str, np.ndarray], +) -> dict[str, Any]: + """Create one range slider per scalar condition.""" + if widgets is None: + return {} + + sliders: dict[str, Any] = {} + for key in scalar_keys: + vals = scalar_conds[key] + lo, hi = float(np.min(vals)), float(np.max(vals)) + step = max((hi - lo) / 100.0, 1e-6) + sliders[key] = widgets.FloatRangeSlider( + value=[lo, hi], + min=lo, + max=hi, + step=step, + description=key, + continuous_update=False, + layout=widgets.Layout(width="500px"), + style={"description_width": "130px"}, + ) + return sliders + + +@dataclass(slots=True) +class FilteredGalleryState: + """Bundle gallery state so the rendering callback stays compact.""" + + all_designs: np.ndarray + scalar_keys: list[str] + scalar_conds: dict[str, np.ndarray] + aspect: float + n_total: int + + +def _render_filtered_gallery( + output, + state: FilteredGalleryState, + slider_values: dict[str, tuple[float, float]], +) -> None: + """Render the gallery subset that matches the current slider values.""" + mask = np.ones(state.n_total, dtype=bool) + for key in state.scalar_keys: + lo, hi = slider_values[key] + mask &= (state.scalar_conds[key] >= lo) & (state.scalar_conds[key] <= hi) + + matching_ids = np.where(mask)[0] + with output: + output.clear_output(wait=True) + if len(matching_ids) == 0: + print("No designs match these conditions. Widen the sliders.") + return + + n_show = min(8, len(matching_ids)) + rng = np.random.default_rng(42) + show_ids = rng.choice(matching_ids, size=n_show, replace=False) + ncols = min(4, n_show) + nrows = (n_show + ncols - 1) // ncols + fig, axes = plt.subplots( + nrows, + ncols, + figsize=(3.5 * ncols * state.aspect, 3.5 * nrows), + ) + axes = np.atleast_2d(axes) + + for i, idx in enumerate(show_ids): + ax = axes[i // ncols, i % ncols] + ax.imshow(state.all_designs[idx], cmap="gray_r", vmin=0, vmax=1) + ax.axis("off") + cond_str = "\n".join(f"{key}={state.scalar_conds[key][idx]:.2f}" for key in state.scalar_keys) + ax.set_title(cond_str, fontsize=8) + + for i in range(n_show, nrows * ncols): + axes[i // ncols, i % ncols].axis("off") + + fig.suptitle( + f"Matching designs: {len(matching_ids)}/{state.n_total}", + fontsize=12, + ) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def interactive_condition_explorer(dataset, problem) -> None: + """Interactive slider widget to explore designs by scalar condition values. + + Falls back to a static gallery if ipywidgets is unavailable or if + the problem has no scalar conditions. + """ + train = dataset["train"] + scalar_keys, _ = _split_condition_keys(train, problem) + all_designs = np.array(train["optimal_design"]) + n_total = len(all_designs) + + if widgets is None: + print("ipywidgets not available — showing static gallery instead.") + show_design_gallery(dataset, problem) + return + + # Build scalar condition arrays + scalar_conds = {k: np.array([float(v) for v in train[k]]) for k in scalar_keys} + sliders = _build_scalar_condition_sliders(scalar_keys, scalar_conds) + output = widgets.Output() + + design_shape = problem.design_space.shape + aspect = design_shape[1] / design_shape[0] if len(design_shape) >= MIN_SHAPE_DIMS else 1.0 + state = FilteredGalleryState( + all_designs=all_designs, + scalar_keys=scalar_keys, + scalar_conds=scalar_conds, + aspect=aspect, + n_total=n_total, + ) + + if scalar_keys: + title = widgets.HTML("

Explore the dataset — drag sliders to filter by condition

") + slider_box = widgets.VBox(list(sliders.values())) + + def _current_slider_values() -> dict[str, tuple[float, float]]: + return {key: slider.value for key, slider in sliders.items()} + + def _on_slider_change(_change) -> None: + _render_filtered_gallery(output, state, _current_slider_values()) + + for s in sliders.values(): + s.observe(_on_slider_change, names="value") + + ipy_display(title, slider_box, output) + _render_filtered_gallery(output, state, _current_slider_values()) + else: + # No scalar conditions (e.g., PowerElectronics) + show_design_gallery(dataset, problem) + + +# --------------------------------------------------------------------------- +# NB01 training helpers +# --------------------------------------------------------------------------- + + +class WorkshopGenerator(th.nn.Module): + """Thin wrapper around the EngiOpt CNN generator for the workshop. + + The CNN generator expects 4-D inputs ``(B, C, 1, 1)`` and returns + ``(B, 1, H, W)``. This wrapper lets callers pass flat 2-D tensors + ``(B, C)`` and returns ``(B, H, W)`` — matching the supervised-training + helpers that operate on numpy arrays of shape ``(N, H, W)``. + """ + + def __init__(self, cnn_generator: th.nn.Module): + super().__init__() + self.gen = cnn_generator + + def forward(self, z: th.Tensor, conds: th.Tensor) -> th.Tensor: + z_4d = z.unsqueeze(-1).unsqueeze(-1) # (B, z_dim) -> (B, z_dim, 1, 1) + c_4d = conds.unsqueeze(-1).unsqueeze(-1) # (B, n_c) -> (B, n_c, 1, 1) + out = self.gen(z_4d, c_4d) # (B, 1, H, W) + return out.squeeze(1) # (B, H, W) + + +@dataclass(slots=True) +class TrainingConfig: + """Training hyperparameters for the workshop generator.""" + + latent_dim: int + epochs: int = 8 + batch_size: int = 64 + lr: float = 2e-4 + device: th.device | str | None = None + snapshot_at_epochs: list[int] | None = None + verbose: bool = True + + +def train_supervised_generator( + model, + train_conditions: np.ndarray, + train_targets: np.ndarray, + config: TrainingConfig, + snapshot_conditions: np.ndarray | None = None, +) -> dict: + """Train a conditional generator with supervised MSE loss. + + The generator learns to map (noise, conditions) to designs by minimising + the MSE between its outputs and real optimal designs from the dataset. + + Args: + model: Generator network. Forward signature: model(noise, conditions). + train_conditions: (N, n_conds) float32 array. + train_targets: (N, *design_shape) float32 array, scaled to [-1, 1]. + config: Training hyperparameters. + snapshot_conditions: If provided, generate designs from these conditions + at epochs listed in ``config.snapshot_at_epochs``. + + Returns: + Dict with keys ``losses`` (list[float]) and ``snapshots`` + (list of (epoch, np.ndarray) pairs). + """ + device = config.device + if device is None: + device = th.device("cpu") + device = th.device(device) if isinstance(device, str) else device + model = model.to(device) + model.train() + + optimizer = th.optim.Adam(model.parameters(), lr=config.lr) + criterion = th.nn.MSELoss() + + conds_t = th.tensor(train_conditions, dtype=th.float32, device=device) + targets_t = th.tensor(train_targets, dtype=th.float32, device=device) + dl = DataLoader( + TensorDataset(conds_t, targets_t), + batch_size=config.batch_size, + shuffle=True, + ) + + snap_conds_t = None + if snapshot_conditions is not None: + snap_conds_t = th.tensor(snapshot_conditions, dtype=th.float32, device=device) + + losses: list[float] = [] + snapshots: list[tuple[int, np.ndarray]] = [] + + snapshot_epochs = config.snapshot_at_epochs or [] + + for epoch in range(1, config.epochs + 1): + model.train() + epoch_loss = 0.0 + for batch_conds, batch_targets in dl: + z = th.randn(batch_conds.shape[0], config.latent_dim, device=device) + fake = model(z, batch_conds) + loss = criterion(fake.flatten(1), batch_targets.flatten(1)) + optimizer.zero_grad() + loss.backward() + optimizer.step() + epoch_loss += loss.item() + avg = epoch_loss / len(dl) + losses.append(avg) + if config.verbose: + print(f" Epoch {epoch:3d}/{config.epochs} | Loss: {avg:.6f}") + + if epoch in snapshot_epochs and snap_conds_t is not None: + model.eval() + with th.no_grad(): + z = th.randn(len(snap_conds_t), config.latent_dim, device=device) + snap = model(z, snap_conds_t) + snap_np = ((snap.cpu().numpy() + 1.0) / 2.0).clip(0, 1) + snapshots.append((epoch, snap_np)) + + return {"losses": losses, "snapshots": snapshots} + + +def generate_designs( + model, + conditions: np.ndarray, + *, + latent_dim: int, + device=None, +) -> np.ndarray: + """Generate designs from conditions using a trained generator. + + Args: + model: Trained generator model. + conditions: (N, n_conds) float32 array. + latent_dim: Dimensionality of the noise vector. + device: Torch device. + + Returns: + (N, *design_shape) float32 array in [0, 1]. + """ + if device is None: + device = th.device("cpu") + device = th.device(device) if isinstance(device, str) else device + model.eval() + conds_t = th.tensor(conditions, dtype=th.float32, device=device) + with th.no_grad(): + z = th.randn(len(conditions), latent_dim, device=device) + raw = model(z, conds_t) + return ((raw.cpu().numpy() + 1.0) / 2.0).clip(0, 1) + + +def show_training_progression( + snapshots: list[tuple[int, np.ndarray]], + baseline_designs: np.ndarray | None = None, + n_show: int = 4, +) -> None: + """Visualize how generated designs evolve across training epochs. + + Args: + snapshots: List of (epoch, designs_array) tuples from training. + baseline_designs: If provided, show ground-truth in the last row. + n_show: Number of designs to display per row. + """ + if not snapshots: + print("No snapshots to display.") + return + + n_show = min(n_show, snapshots[0][1].shape[0]) + n_rows = len(snapshots) + (1 if baseline_designs is not None else 0) + + fig, axes = plt.subplots(n_rows, n_show, figsize=(2.8 * n_show, 2.5 * n_rows)) + if n_rows == 1: + axes = axes[np.newaxis, :] + if n_show == 1: + axes = axes[:, np.newaxis] + + for row, (epoch, designs) in enumerate(snapshots): + for col in range(n_show): + axes[row, col].imshow(designs[col], cmap="gray_r", vmin=0, vmax=1) + axes[row, col].axis("off") + axes[row, 0].set_ylabel( + f"Epoch {epoch}", + fontsize=11, + rotation=0, + labelpad=55, + va="center", + ) + + if baseline_designs is not None: + row = len(snapshots) + for col in range(n_show): + axes[row, col].imshow(baseline_designs[col], cmap="gray_r", vmin=0, vmax=1) + axes[row, col].axis("off") + axes[row, 0].set_ylabel( + "Ground\ntruth", + fontsize=11, + rotation=0, + labelpad=55, + va="center", + ) + + fig.suptitle("How the generator learns over training", fontsize=14, y=1.02) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# NB01 visualizations +# --------------------------------------------------------------------------- + + +def show_training_curve(train_losses: list[float], save_path: str | None = None) -> None: + """Plot training loss over epochs.""" + fig, ax = plt.subplots(figsize=(7, 3.5)) + ax.plot(range(1, len(train_losses) + 1), train_losses, marker="o", linewidth=2, color="#2563eb") + ax.set_xlabel("Epoch", fontsize=12) + ax.set_ylabel("MSE Loss", fontsize=12) + ax.set_title("Generator Training Loss", fontsize=14) + ax.grid(visible=True, alpha=0.3) + fig.tight_layout() + if save_path: + fig.savefig(save_path, dpi=120) + plt.show() + print(f"Final loss: {train_losses[-1]:.6f}") + + +def show_gen_vs_baseline( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + conditions_records: list[dict], + n_show: int = 8, + problem=None, +) -> None: + """Show generated vs baseline designs using ``problem.render()`` if available.""" + n_show = min(n_show, len(gen_designs)) + + if problem is not None: + for i in range(n_show): + # Scalar conditions only for title + scalars = { + k: v + for k, v in conditions_records[i].items() + if not isinstance(v, (list, np.ndarray)) or np.asarray(v).size == 1 + } + cond_str = " | ".join(f"{k}: {float(v):.3f}" for k, v in scalars.items()) + + result_g = problem.render(gen_designs[i]) + fig_g = result_g[0] if isinstance(result_g, tuple) else result_g + if hasattr(fig_g, "savefig"): + fig_g.suptitle(f"Generated {i} — {cond_str}", fontsize=10, y=1.02) + plt.show() + plt.close(fig_g) + + result_b = problem.render(baseline_designs[i]) + fig_b = result_b[0] if isinstance(result_b, tuple) else result_b + if hasattr(fig_b, "savefig"): + fig_b.suptitle(f"Baseline {i}", fontsize=10, y=1.02) + plt.show() + plt.close(fig_b) + else: + # Fallback: side-by-side imshow grid + fig, axes = plt.subplots(2, n_show, figsize=(2.2 * n_show, 5.5)) + if n_show == 1: + axes = axes.reshape(2, 1) + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray", vmin=0, vmax=1) + axes[0, i].axis("off") + scalars = { + key: value + for key, value in conditions_records[i].items() + if not isinstance(value, (list, np.ndarray)) or np.asarray(value).size == 1 + } + cond_str = "\n".join(f"{key}: {float(value):.3f}" for key, value in scalars.items()) + axes[0, i].set_title(cond_str, fontsize=8) + axes[1, i].imshow(baseline_designs[i], cmap="gray", vmin=0, vmax=1) + axes[1, i].axis("off") + axes[0, 0].set_ylabel("Generated", fontsize=12, rotation=90, labelpad=10) + axes[1, 0].set_ylabel("Baseline", fontsize=12, rotation=90, labelpad=10) + fig.suptitle("Generated (top) vs Baseline (bottom)", fontsize=14, y=1.01) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# NB02 visualizations +# --------------------------------------------------------------------------- + + +def show_objective_comparison(results) -> None: + """Histogram + scatter of generated vs baseline objectives.""" + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) + + ax1.hist(results["gen_obj"], bins=10, alpha=0.7, label="Generated", color="#4C72B0") + ax1.hist(results["base_obj"], bins=10, alpha=0.7, label="Baseline", color="#DD8452") + ax1.set_xlabel("Objective (lower is better)") + ax1.set_ylabel("Count") + ax1.set_title("Objective distribution") + ax1.legend() + + colors = results["gen_feasible"].map({True: "#55A868", False: "#C44E52"}) + ax2.scatter(results["base_obj"], results["gen_obj"], alpha=0.8, c=colors, edgecolors="black", linewidths=0.5, s=60) + lo = min(results["base_obj"].min(), results["gen_obj"].min()) * 0.9 + hi = max(results["base_obj"].max(), results["gen_obj"].max()) * 1.1 + ax2.plot([lo, hi], [lo, hi], "--", color="gray", linewidth=1, label="y = x") + ax2.set_xlabel("Baseline objective") + ax2.set_ylabel("Generated objective") + ax2.set_title("Per-sample (green=feasible, red=infeasible)") + ax2.legend() + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_feasibility_bars(results) -> None: + """Bar chart comparing feasibility rates.""" + gen_rate = results["gen_feasible"].mean() + base_rate = results["base_feasible"].mean() + + fig, ax = plt.subplots(figsize=(5, 4)) + bars = ax.bar(["Generated", "Baseline"], [gen_rate, base_rate], color=["#4C72B0", "#DD8452"], edgecolor="black") + ax.set_ylim(0, 1.15) + ax.set_ylabel("Feasible fraction") + ax.set_title("Feasibility rate") + for bar, val in zip(bars, [gen_rate, base_rate]): + ax.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.03, f"{val:.0%}", ha="center", fontweight="bold") + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_design_comparison_grid(gen_designs, baseline_designs, results, n_show: int = 6, problem=None) -> None: + """Show generated vs baseline with feasibility annotations. + + Uses ``problem.render()`` if available, otherwise falls back to imshow. + """ + n_show = min(n_show, len(gen_designs)) + + if problem is not None: + for i in range(n_show): + feas = "FEASIBLE" if results.iloc[i]["gen_feasible"] else "INFEASIBLE" + gap = results.iloc[i]["gen_minus_base"] + + result_g = problem.render(gen_designs[i]) + fig_g = result_g[0] if isinstance(result_g, tuple) else result_g + if hasattr(fig_g, "savefig"): + color = "green" if results.iloc[i]["gen_feasible"] else "red" + fig_g.suptitle(f"Generated {i} — {feas} (gap={gap:.1f})", fontsize=11, color=color, y=1.02) + plt.show() + plt.close(fig_g) + else: + # Fallback: imshow grid + fig, axes = plt.subplots(2, n_show, figsize=(2.5 * n_show, 5)) + if n_show == 1: + axes = axes[:, None] + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray", vmin=0, vmax=1, aspect="auto") + feas = "FEASIBLE" if results.iloc[i]["gen_feasible"] else "INFEASIBLE" + color = "green" if results.iloc[i]["gen_feasible"] else "red" + axes[0, i].set_title(f"Gen {i}\n{feas}", fontsize=9, color=color, fontweight="bold") + axes[0, i].axis("off") + axes[1, i].imshow(baseline_designs[i], cmap="gray", vmin=0, vmax=1, aspect="auto") + axes[1, i].set_title(f"Baseline {i}", fontsize=9) + axes[1, i].axis("off") + fig.suptitle("Generated vs Baseline", fontsize=13, y=1.02) + fig.tight_layout() + plt.show() + plt.close(fig) + + +# --------------------------------------------------------------------------- +# Metric helpers (NB02) +# --------------------------------------------------------------------------- + + +def mean_pairwise_l2(designs: np.ndarray) -> float: + """Average L2 distance between all pairs. Measures intra-set diversity.""" + flat = designs.reshape(designs.shape[0], -1) + n = flat.shape[0] + if n < MIN_PAIRWISE_COUNT: + return 0.0 + pairwise = cdist(flat, flat, metric="euclidean") + upper_triangle = pairwise[np.triu_indices(n, k=1)] + return float(np.mean(upper_triangle)) + + +def mean_nn_distance_to_reference(designs: np.ndarray, reference: np.ndarray) -> float: + """Average nearest-neighbor distance to a reference set. Measures novelty.""" + q = designs.reshape(designs.shape[0], -1) + r = reference.reshape(reference.shape[0], -1) + nn_dists = [] + for i in range(q.shape[0]): + d = np.linalg.norm(r - q[i][None, :], axis=1) + nn_dists.append(float(np.min(d))) + return float(np.mean(nn_dists)) + + +# --------------------------------------------------------------------------- +# NB02 enhanced visualizations — pedagogical metric exploration +# --------------------------------------------------------------------------- + + +def show_residual_heatmaps( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + n_show: int = 6, +) -> None: + """Pixel-wise absolute difference between generated and baseline designs. + + Three rows: generated, baseline, |residual|. The residual row uses a + ``Reds`` colourmap (white = no error, dark red = large error). + """ + n_show = min(n_show, len(gen_designs)) + fig, axes = plt.subplots(3, n_show, figsize=(2.8 * n_show, 7)) + if n_show == 1: + axes = axes[:, None] + + for i in range(n_show): + axes[0, i].imshow(gen_designs[i], cmap="gray_r", vmin=0, vmax=1) + axes[0, i].set_title(f"Gen {i}", fontsize=9) + axes[0, i].axis("off") + + axes[1, i].imshow(baseline_designs[i], cmap="gray_r", vmin=0, vmax=1) + axes[1, i].set_title(f"Baseline {i}", fontsize=9) + axes[1, i].axis("off") + + diff = np.abs(gen_designs[i].astype(float) - baseline_designs[i].astype(float)) + axes[2, i].imshow(diff, cmap="Reds", vmin=0, vmax=1) + axes[2, i].set_title(f"|diff| mean={diff.mean():.3f}", fontsize=8) + axes[2, i].axis("off") + + axes[0, 0].set_ylabel("Generated", fontsize=11, rotation=90, labelpad=10) + axes[1, 0].set_ylabel("Baseline", fontsize=11, rotation=90, labelpad=10) + axes[2, 0].set_ylabel("|Residual|", fontsize=11, rotation=90, labelpad=10) + fig.suptitle( + "Pixel-level residuals: where do generated designs differ from baselines?", + fontsize=13, + y=1.01, + ) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_objective_residuals(results) -> None: + """Per-sample bar chart of objective gap (generated - baseline). + + Bars above zero mean the generated design is *worse* (higher compliance). + """ + gaps = results["gen_minus_base"] + colors = ["#C44E52" if g > 0 else "#55A868" for g in gaps] + + fig, ax = plt.subplots(figsize=(max(8, len(gaps) * 0.45), 4)) + ax.bar(range(len(gaps)), gaps, color=colors, edgecolor="black", linewidth=0.5) + ax.axhline(0, color="gray", linewidth=1, linestyle="--") + ax.set_xlabel("Sample index") + ax.set_ylabel("Objective gap (gen - baseline)") + ax.set_title( + "Per-sample objective residuals (green = generated is better, red = worse)", + fontsize=11, + ) + ax.set_xticks(range(len(gaps))) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_volfrac_analysis(results, volfrac_tol: float = 0.05) -> None: + """Volume-fraction target vs actual scatter + error distribution.""" + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) + + colors = results["gen_feasible"].map({True: "#55A868", False: "#C44E52"}) + ax1.scatter( + results["target_volfrac"], + results["gen_volfrac"], + c=colors, + edgecolors="black", + linewidths=0.5, + s=60, + alpha=0.8, + ) + lo = min(results["target_volfrac"].min(), results["gen_volfrac"].min()) - 0.05 + hi = max(results["target_volfrac"].max(), results["gen_volfrac"].max()) + 0.05 + xs = np.linspace(lo, hi, 100) + ax1.plot(xs, xs, "--", color="gray", linewidth=1, label="Perfect match") + ax1.fill_between( + xs, + xs - volfrac_tol, + xs + volfrac_tol, + alpha=0.12, + color="green", + label=f"Tolerance (\u00b1{volfrac_tol})", + ) + ax1.set_xlabel("Target volume fraction") + ax1.set_ylabel("Generated volume fraction") + ax1.set_title("Constraint satisfaction: target vs actual volfrac") + ax1.legend(fontsize=9) + + errors = results["gen_volfrac"] - results["target_volfrac"] + ax2.hist(errors, bins=15, edgecolor="white", color="#4C72B0", alpha=0.8) + ax2.axvline(0, color="gray", linestyle="--", linewidth=1) + ax2.axvline(-volfrac_tol, color="red", linestyle=":", linewidth=1.5, label=f"\u00b1{volfrac_tol}") + ax2.axvline(volfrac_tol, color="red", linestyle=":", linewidth=1.5) + ax2.set_xlabel("Volume fraction error (actual \u2212 target)") + ax2.set_ylabel("Count") + ax2.set_title("Distribution of constraint errors") + ax2.legend(fontsize=9) + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_spatial_distribution_comparison( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + train_reference: np.ndarray | None = None, +) -> None: + """Compare *where* material is placed on average across design sets. + + For binary/near-binary topology designs, pixel-intensity histograms are + uninformative (just two spikes at 0 and 1). Instead we show: + + - **Mean design images**: the average design across each set, revealing + where material tends to be placed. Differences highlight spatial + biases in the generator. + - **Per-design volume fraction distributions**: how much total material + each design uses, compared across sets. + """ + has_train = train_reference is not None + n_img = 3 if has_train else 2 + + fig, axes = plt.subplots( + 1, + n_img + 1, + figsize=(4.2 * (n_img + 1), 4), + gridspec_kw={"width_ratios": [1] * n_img + [1.3]}, + constrained_layout=True, + ) + + # ── Mean design images ─────────────────────────────────────────── + sets: list[tuple[str, np.ndarray, str]] = [ + ("Generated", gen_designs, "#4C72B0"), + ("Baseline", baseline_designs, "#DD8452"), + ] + if has_train: + assert train_reference is not None + sets.append(("Training", train_reference, "#55A868")) + + vmin, vmax = 0, 1 + for ax, (label, designs, _color) in zip(axes[:n_img], sets): + mean_img = designs.mean(axis=0) + im = ax.imshow(mean_img, cmap="gray_r", vmin=vmin, vmax=vmax) + ax.set_title(f"Mean {label}\n(n={len(designs)})", fontsize=11) + ax.axis("off") + fig.colorbar(im, ax=axes[:n_img].tolist(), shrink=0.75, label="Avg. material density", pad=0.04) + + # ── Per-design volume fraction distributions ───────────────────── + ax_vf = axes[n_img] + for label, designs, color in sets: + vfracs = designs.reshape(designs.shape[0], -1).mean(axis=1) + ax_vf.hist(vfracs, bins=25, alpha=0.5, density=True, label=label, color=color, edgecolor="white", linewidth=0.3) + ax_vf.set_xlabel("Volume fraction (per design)") + ax_vf.set_ylabel("Density") + ax_vf.set_title("Material-usage\ndistributions", fontsize=11) + ax_vf.legend(fontsize=9) + + fig.suptitle( + "Spatial distribution comparison \u2014 where does each set place material?", + fontsize=13, + y=1.03, + ) + plt.show() + plt.close(fig) + + +def show_mmd_comparison_bar( + mmd_gen_base: float, + mmd_train_base: float, + mmd_random_base: float, +) -> None: + """Bar chart placing the generator's MMD in context. + + Shows three reference points so the raw MMD number becomes interpretable: + - Generated vs baseline (our metric -- same conditions) + - Train sample vs baseline (retrieval baseline -- no conditioning) + - Random vs baseline (upper bound / worst case) + """ + labels = [ + "Generated\nvs Baseline", + "Train sample\nvs Baseline\n(no conditioning)", + "Random\nvs Baseline\n(worst case)", + ] + values = [mmd_gen_base, mmd_train_base, mmd_random_base] + colors = ["#4C72B0", "#DD8452", "#C44E52"] + + fig, ax = plt.subplots(figsize=(7, 4.5)) + bars = ax.bar(labels, values, color=colors, edgecolor="black", linewidth=0.5, width=0.55) + for bar, v in zip(bars, values): + ax.text( + bar.get_x() + bar.get_width() / 2, + bar.get_height() + max(abs(v) for v in values) * 0.02, + f"{v:.4f}", + ha="center", + fontsize=11, + fontweight="bold", + ) + ax.set_ylabel("MMD (lower = more similar)") + ax.set_title("MMD in context \u2014 where does the generator sit?", fontsize=13) + ax.set_ylim(0, max(values) * 1.25) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_pairwise_distance_heatmap( + designs: np.ndarray, + title: str = "Pairwise L2 distance among generated designs", +) -> None: + """Heatmap of pairwise L2 distances — visual proxy for diversity. + + A uniform warm colour off-diagonal means all designs differ roughly + equally (good diversity). Cool/dark blocks reveal clusters of + near-identical designs (partial mode collapse). + """ + flat = designs.reshape(designs.shape[0], -1) + dists = cdist(flat, flat, "euclidean") + + fig, (ax, ax_hist) = plt.subplots( + 1, + 2, + figsize=(11, 5), + gridspec_kw={"width_ratios": [1.2, 1]}, + ) + + im = ax.imshow(dists, cmap="viridis") + ax.set_xlabel("Design index") + ax.set_ylabel("Design index") + ax.set_title(title, fontsize=11) + fig.colorbar(im, ax=ax, label="L2 distance", fraction=0.046, pad=0.04) + + # Histogram of off-diagonal distances + triu_idx = np.triu_indices(len(designs), k=1) + off_diag = dists[triu_idx] + ax_hist.hist(off_diag, bins=25, edgecolor="white", color="#4C72B0", alpha=0.8) + ax_hist.axvline(off_diag.mean(), color="#C44E52", linewidth=2, linestyle="--", label=f"Mean = {off_diag.mean():.1f}") + ax_hist.set_xlabel("Pairwise L2 distance") + ax_hist.set_ylabel("Count") + ax_hist.set_title("Distribution of pairwise distances", fontsize=11) + ax_hist.legend(fontsize=9) + + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_embedding_scatter( + gen_designs: np.ndarray, + baseline_designs: np.ndarray, + train_reference: np.ndarray, +) -> None: + """PCA 2-D projection of generated, baseline, and training designs. + + Uses numpy SVD so there is no sklearn dependency. + """ + g = gen_designs.reshape(gen_designs.shape[0], -1).astype(np.float64) + b = baseline_designs.reshape(baseline_designs.shape[0], -1).astype(np.float64) + t = train_reference.reshape(train_reference.shape[0], -1).astype(np.float64) + + combined = np.vstack([g, b, t]) + mean = combined.mean(axis=0) + centered = combined - mean + _, _, vt = np.linalg.svd(centered, full_matrices=False) + proj = centered @ vt[:2].T + + ng, nb = len(g), len(b) + pg, pb, pt = proj[:ng], proj[ng : ng + nb], proj[ng + nb :] + + fig, ax = plt.subplots(figsize=(8, 7)) + ax.scatter(pt[:, 0], pt[:, 1], alpha=0.15, s=15, c="#AAAAAA", label=f"Training ({len(t)})") + ax.scatter( + pb[:, 0], + pb[:, 1], + alpha=0.7, + s=50, + c="#DD8452", + edgecolors="black", + linewidths=0.5, + label=f"Baseline ({nb})", + marker="s", + ) + ax.scatter( + pg[:, 0], + pg[:, 1], + alpha=0.8, + s=60, + c="#4C72B0", + edgecolors="black", + linewidths=0.5, + label=f"Generated ({ng})", + marker="o", + ) + ax.set_xlabel("PC 1") + ax.set_ylabel("PC 2") + ax.set_title("PCA projection \u2014 where do generated designs live in design space?", fontsize=12) + ax.legend(fontsize=10) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_optimization_trajectories(opt_data: list[dict]) -> None: + """Plot optimization trajectories showing how generated designs warmstart optimization. + + Each entry in *opt_data* should be a dict with keys: + ``sample_idx``, ``obj_trajectory`` (list of floats), ``base_obj`` (float). + """ + n = len(opt_data) + fig, axes = plt.subplots(1, n, figsize=(5.5 * n, 4.5), squeeze=False) + + for i, d in enumerate(opt_data): + ax = axes[0, i] + objs = d["obj_trajectory"] + steps = list(range(len(objs))) + base = d["base_obj"] + + ax.plot(steps, objs, "o-", color="#4C72B0", linewidth=2, markersize=4, label="Optimizer") + ax.axhline(base, color="#DD8452", linestyle="--", linewidth=1.5, label=f"Baseline = {base:.1f}") + ax.fill_between(steps, objs, base, alpha=0.12, color="#4C72B0") + + iog = objs[0] - base + fog = objs[-1] - base + cog = sum(o - base for o in objs) + + ax.set_title( + f"Sample {d['sample_idx']}\nIOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f}", + fontsize=10, + ) + ax.set_xlabel("Optimization step") + ax.set_ylabel("Objective (compliance)") + ax.legend(fontsize=8, loc="upper right") + + fig.suptitle( + "Optimization from generated warmstarts \u2014 does the model give the optimizer a head start?", + fontsize=13, + y=1.05, + ) + fig.tight_layout() + plt.show() + plt.close(fig) + + +def show_metric_summary_dashboard(summary_dict: dict) -> None: + """Multi-panel grouped bar chart summarizing all metric categories.""" + categories = { + "Simulation\nPerformance": [ + ("Obj gap (gen\u2212base)", summary_dict.get("objective_gap_mean", 0)), + ("Improvement rate", summary_dict.get("improvement_rate", 0)), + ], + "Constraint\nSatisfaction": [ + ("Gen feasible %", summary_dict.get("gen_feasible_rate", 0)), + ("Base feasible %", summary_dict.get("base_feasible_rate", 0)), + ], + "Distributional\nSimilarity": [ + ("MMD", summary_dict.get("mmd", 0)), + ], + "Diversity &\nNovelty": [ + ("Diversity (L2)", summary_dict.get("gen_diversity_l2", 0)), + ("Novelty (NN)", summary_dict.get("gen_novelty_to_train_l2", 0)), + ], + } + + fig, axes = plt.subplots(1, len(categories), figsize=(4 * len(categories), 4.5)) + palette = ["#4C72B0", "#DD8452", "#55A868", "#C44E52"] + + for ax, (cat_name, metrics), color in zip(axes, categories.items(), palette): + names = [m[0] for m in metrics] + vals = [m[1] for m in metrics] + bars = ax.barh(names, vals, color=color, edgecolor="black", linewidth=0.5) + for bar, v in zip(bars, vals): + ax.text( + bar.get_width() + max(abs(v) for v in vals) * 0.03, + bar.get_y() + bar.get_height() / 2, + f"{v:.4f}" if abs(v) < 1 else f"{v:.1f}", + va="center", + fontsize=9, + ) + ax.set_title(cat_name, fontsize=11, fontweight="bold") + ax.set_xlim(left=min(0, min(vals) * 1.2)) + + fig.suptitle("Evaluation dashboard \u2014 how does the generator perform?", fontsize=14, y=1.03) + fig.tight_layout() + plt.show() + plt.close(fig) diff --git a/pyproject.toml b/pyproject.toml index 36881c4a..32a809b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,6 +113,10 @@ target-version = "py39" ######################################## LINTING ######################################## [tool.ruff.lint] select = ["ALL"] +exclude = [ + "workshops/dcc26/**/*.ipynb", + "workshops/dcc26/utils/**/*.py", +] ignore = [ "ANN", # flake8-annotations (mypy's job) "COM812", # missing-trailing-comma (conflicts with formatter) @@ -190,6 +194,7 @@ order-by-type = false ###################################### FORMAT ######################################## [tool.ruff.format] +exclude = ["workshops/dcc26/**/*.ipynb"] # Like Black, use double quotes for strings. quote-style = "double" @@ -275,6 +280,9 @@ module = [ "seaborn", "matplotlib", "matplotlib.*", + "IPython", + "IPython.*", + "ipywidgets", "pandas", "cvxopt", "cvxopt.*", diff --git a/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md b/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md new file mode 100644 index 00000000..4d56ed74 --- /dev/null +++ b/workshops/dcc26/NOTEBOOK_PEDAGOGY_BLUEPRINT.md @@ -0,0 +1,153 @@ +# DCC26 Notebook Pedagogy Blueprint (Pre-write Source) + +This document is the canonical pre-write for workshop notebooks. Notebooks should be generated from this structure, not authored directly as raw `.ipynb` first. + +## Teaching Design Principles + +1. Every technical step is paired with a markdown teaching cell. +2. Every code section has local context: why, inputs, outputs, checks, failure modes. +3. Benchmark science is explicit: objective, feasibility, diversity, novelty, reproducibility. +4. Discussion prompts are embedded and mapped to workshop breakout questions. +5. Participant and solution tracks share the same pedagogical arc; only implementation detail differs. + +## Common Cell Pattern + +For each section: + +- Purpose: why this step matters for benchmark credibility +- Inputs: what artifacts/variables are required +- Action: code operation performed +- Success check: what output indicates correctness +- Failure modes: common pitfalls and fixes +- Discussion bridge: one reflection question + +--- + +## Notebook 00: Setup + API Warmup + +### Learning objective +Understand EngiBench benchmark contract components and reproducibility controls. + +### Section plan +1. Read-me-first + copy mode + runtime expectation +2. Concept cell: EngiBench vs model libraries +3. Environment bootstrap +4. Reproducibility cell (seed, versions) +5. Problem instantiation (`Beams2D`) + inspection +6. Dataset inspection and shape sanity +7. Render one sample and explain representation +8. Explicit constraint violation check with interpretation +9. Reflection prompts tied to comparability across papers + +### Discussion trigger +Which benchmark settings must be fixed for fair method comparison? + +--- + +## Notebook 01: Train + Generate + +### Learning objective +Implement an EngiOpt model against EngiBench data while preserving evaluation-ready artifacts. + +### Section plan +1. Read-me-first + copy mode + expected runtime +2. Concept cell: inverse design framing, conditional generation assumptions +3. Bootstrap deps and imports +4. Configuration and artifact contract +5. Data subset construction and rationale (runtime vs fidelity) +6. Model definition and optimizer +7. Training loop with diagnostics + expected loss behavior +8. Generation from test conditions +9. Quick feasibility precheck (not final evaluation) +10. Artifact export contract (npy/json/checkpoint/history/curve) +11. Optional W&B logging: train curve, scalar logs, artifact bundle +12. Visual sanity grid +13. Discussion prompt: training loss vs engineering validity mismatch + +### Discussion trigger +Can lower train reconstruction loss worsen simulator objective or feasibility? + +--- + +## Notebook 02: Evaluate + Metrics + +### Learning objective +Run robust benchmark evaluation and interpret trade-offs beyond objective score. + +### Section plan +1. Read-me-first + copy mode + expected runtime +2. Concept cell: why objective-only reporting is incomplete +3. Bootstrap deps and imports +4. Artifact loading strategy (local -> optional W&B -> local auto-build) +5. Per-sample evaluation loop (constraint + simulate) +6. Metric layer: + - objective means and gap + - improvement rate + - feasibility/violation rates + - diversity proxy + - novelty-to-train proxy +7. Export layer: CSV + histogram + scatter + grid +8. Optional W&B evaluation logging (table + images + summary) +9. Interpretation rubric with examples +10. Breakout prompts mapped to workshop proposal + +### Discussion trigger +Which missing metric would change conclusions for your domain? + +--- + +## Notebook 03: Add New Problem Scaffold + +### Learning objective +Understand minimal interface required for a reusable EngiBench-style benchmark problem. + +### Section plan +1. Read-me-first + copy mode +2. Concept cell: benchmark-ready problem checklist +3. Scaffold imports and abstract contract explanation +4. Minimal `Problem` implementation skeleton +5. Toy simulator and constraints +6. Registration/discovery and deterministic behavior +7. Contribution checklist for real domains +8. Reflection prompts on leakage, units, and reproducibility metadata + +### Discussion trigger +What metadata is minimally required so another lab can reproduce your new benchmark? + +--- + +## Notebook 04: Heat Exchanger Design Problem + +### Learning objective +Wrap a lightweight external engineering calculation as an EngiBench-style benchmark problem. + +### Section plan +1. Read-me-first + copy mode +2. Concept cell: same thermal domain, different physics than heat-conduction topology optimization +3. Bootstrap optional `ht` and `fluids` dependencies +4. Define design variables, operating conditions, objectives, and constraints +5. Implement a small simulator using effectiveness-NTU and pressure-drop calculations +6. Package the simulator behind `simulate`, `check_constraints`, `random_design`, `optimize`, and `render` +7. Compare candidate designs and constraint violations +8. Run a transparent random-search baseline +9. Vary operating conditions to show conditional design behavior +10. Map the notebook wrapper to a production EngiBench contribution checklist + +### Discussion trigger +When should a performance quantity be an objective, a constraint, or both? + +--- + +## Participant vs Solution Policy + +- Participant notebooks: keep code TODOs, but each TODO has explicit completion checks and expected outputs. +- Solution notebooks: complete implementations plus concise inline comments for non-obvious logic only. +- Both tracks: keep identical markdown structure for pedagogical alignment. + +## Quality Gate Before Publishing + +1. All code cells compile. +2. Solution Notebook 01+02 execute end-to-end in workshop env. +3. Artifact contract is consistent between Notebook 01 and 02. +4. Copy-safe links use `#copy=true`. +5. Standalone readability check: each notebook understandable without live lecture. diff --git a/workshops/dcc26/README.md b/workshops/dcc26/README.md new file mode 100644 index 00000000..433c284d --- /dev/null +++ b/workshops/dcc26/README.md @@ -0,0 +1,83 @@ +# DCC 2026 Workshop Notebook Suite + +This folder contains the DCC'26 workshop notebooks for benchmarking AI methods in engineering design with EngiBench and EngiOpt. + +## Start Here: Workshop Notebooks + +Use these notebooks during the live workshop. They are guided, narrative notebooks: run cells top to bottom, read the short explanations, and discuss the prompts. + +Open notebooks with the `?copy=true` Colab links below. Colab will prompt you to create your own copy before editing, so your changes do not write back to the EngiOpt repository. + +| Step | Notebook | Colab | +|---|---|---| +| 00 | Frame an engineering design problem as a benchmark | [Open Simple 00](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/simple/00_framing_your_design_problem.ipynb?copy=true) | +| 01 | Train a lightweight conditional generator | [Open Simple 01](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/simple/01_training_a_generative_model.ipynb?copy=true) | +| 02 | Evaluate generated designs with benchmark methods | [Open Simple 02](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/simple/02_evaluating_your_generated_designs.ipynb?copy=true) | +| 03 | Write a minimal new EngiBench-style problem | [Open Simple 03](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/simple/03_writing_your_own_problem.ipynb?copy=true) | + +Recommended live flow: + +1. Run Simple 00 to understand the benchmark contract: design variables, conditions, objectives, constraints, rendering, simulation, and baseline optimization. +2. Run Simple 01 to train a small EngiOpt generator and export generated designs. +3. Run Simple 02 to evaluate visual quality, feasibility, simulation performance, diversity, and warmstarting. If artifacts from Simple 01 are missing, Simple 02 rebuilds them automatically. +4. Run Simple 03 as the capstone for adding a new benchmark problem. + +## Optional Extra Exercises + +The `participant/` notebooks are more hands-on. They contain `PUBLIC FILL-IN` cells, checkpoints, and deeper metric or implementation exercises. Use these as homework, breakout exercises, or follow-up material after the live workshop. + +| Step | Exercise notebook | Colab | Solution | +|---|---|---|---| +| 00 | API warmup with fill-ins | [Participant 00](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/00_setup_api_warmup.ipynb?copy=true) | [Solution 00](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/00_setup_api_warmup.ipynb?copy=true) | +| 01 | Train and generate with fill-ins | [Participant 01](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/01_train_generate.ipynb?copy=true) | [Solution 01](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/01_train_generate.ipynb?copy=true) | +| 02 | Full evaluation metrics exercise | [Participant 02](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/02_evaluate_metrics.ipynb?copy=true) | [Solution 02](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/02_evaluate_metrics.ipynb?copy=true) | +| 03 | Ambitious PyBullet co-design scaffold | [Participant 03](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb?copy=true) | [Solution 03](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb?copy=true) | +| 04 | Heat-exchanger physics-wrapper exercise | [Participant 04](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/participant/04_heat_exchanger_design_problem.ipynb?copy=true) | [Solution 04](https://colab.research.google.com/github/IDEALLab/EngiOpt/blob/codex/dcc26-workshop-notebooks/workshops/dcc26/solutions/04_heat_exchanger_design_problem.ipynb?copy=true) | + +## What Each Track Is For + +- `simple/`: main live-workshop notebooks. Best for participants and first-time readers. +- `participant/`: extra exercises with fill-in cells. Best for homework, small-group work, or deeper practice. +- `solutions/`: completed versions of the extra exercises. Best for facilitators or participants who get stuck. + +## Runtime Assumptions + +- Primary live problem: `Beams2D` +- No container-dependent problems are required during the main workshop notebooks. +- W&B integration is optional and disabled by default in the exercise track. +- Notebook bootstrap cells install dependencies automatically on Colab. +- On local environments, install cells skip by default unless `FORCE_INSTALL = True`. + +## Artifact Flow + +Simple 01 writes generated artifacts to: + +- Google Colab runtime: `/content/dcc26_artifacts/` +- Local/Jupyter: `workshops/dcc26/artifacts/` + +Simple 02 reads those artifacts. If they are missing, it rebuilds the same lightweight Simple 01 train/generate/export path automatically, then continues evaluation. + +The key artifacts are: + +- `generated_designs.npy` +- `baseline_designs.npy` +- `conditions.json` + +The deeper participant/solution evaluation notebooks may additionally write: + +- `engiopt_cgan2d_generator_supervised.pt` +- `training_history.csv` +- `training_curve.png` +- `metrics_summary.csv` +- `objective_histogram.png` +- `objective_scatter.png` +- `design_grid.png` + +## Suggested Pre-Workshop Checks + +1. Open each Simple Colab link in a fresh browser/session. +2. Confirm the install cell succeeds. +3. Run Simple 00 once end-to-end. +4. Run Simple 01 through the artifact export cell. +5. Open Simple 02 in a fresh runtime and confirm the artifact rebuild path works. +6. Run Simple 03 at least through the `Problem` class smoke test. diff --git a/workshops/dcc26/SLIDE_STORYBOARD.md b/workshops/dcc26/SLIDE_STORYBOARD.md new file mode 100644 index 00000000..e6e3264e --- /dev/null +++ b/workshops/dcc26/SLIDE_STORYBOARD.md @@ -0,0 +1,73 @@ +# DCC 2026 EngiBench / EngiOpt Slide Storyboard + +Final deck: `workshops/dcc26/slides/benchmarking-ai-for-engineering-design-dcc26.pptx` + +Contact sheet: `workshops/dcc26/slides/contact-sheet.png` + +Presenters: Matthew Keeler, Soheyl Massoudi, Mark Fuge. + +Core message: EngiBench and EngiOpt help engineering design ML become cumulative by turning design problems into executable benchmark contracts. Participants should leave able to ask what was actually benchmarked, whether the result is reusable, and how to express their own design problem in the same way. + +Audience: DCC 2026 workshop participants: design computing researchers, engineering design researchers, and AI-for-design practitioners. Assume mixed ML depth and keep the simple notebooks as the main path. + +## Red Line + +1. Engineering design ML comparison is fragile when papers quietly change the task. +2. A benchmark is not just a dataset or leaderboard; it is an executable contract. +3. The contract makes hidden choices explicit: design space, conditions, objectives, constraints, dataset, renderer, simulator, and optimizer. +4. The simple notebooks let participants use that contract to frame a problem, train a generator, evaluate designs, and sketch their own domain problem. +5. The closing takeaway is transfer: participants should know how this infrastructure can make their own research more reproducible, comparable, and reusable. + +## Design System + +- 16:9 widescreen PowerPoint. +- Off-white background, ETH-like blue section accents, near-black text. +- Red/orange accents for constraint and validity failure. +- One claim per slide. +- Large proof objects: rendered designs, training curves, metric summaries, warm-start curves. +- Calm footer with section and workshop label. + +## Final Slide Sequence + +1. **Title**: Benchmarking AI for Engineering Design. Includes presenters and ETH logo. +2. **Why this workshop exists**: Engineering design ML cannot become cumulative if every paper quietly changes the problem. +3. **What you should leave believing**: A benchmark is not just a leaderboard; it makes design claims reproducible. +4. **What you will do today**: Four simple notebooks turn that motivation into practice. +5. **Why benchmark before modeling?**: Same-looking tasks, designs, and scores can hide different engineering problems. +6. **Benchmark mental model**: A benchmark records the design task, not only the dataset. +7. **Notebook 00 bridge**: Frame your design problem before touching a model. +8. **Notebook 00 anchor**: Beams2D as the first concrete engineering task. +9. **API slide**: EngiBench turns design-problem questions into Python calls. +10. **Notebook 00 noticing slide**: Render, check validity, simulate, optimize. +11. **Notebook 01 bridge**: Train a generative model against optimizer answers. +12. **Training slide**: Training loss is a learning signal, not an engineering verdict. +13. **Generated vs baseline slide**: Look at designs, but do not stop at the eye test. +14. **Notebook 02 bridge**: Evaluate generated designs as engineering candidates. +15. **Evidence dashboard**: Feasibility, baseline feasibility, and generator wins. +16. **Failure mode**: Visual plausibility did not imply feasibility or performance. +17. **Warm-starting**: A generator may still be useful if it helps optimization. +18. **Discussion prompt**: Which metric would change the conclusion in your domain? +19. **Notebook 03 bridge**: Write your own design problem behind the same interface. +20. **Notebook 03 anchor**: Tiny cantilever problem with real engineering checks. +21. **Implementation checklist**: Explicit promises needed for a reusable problem. +22. **Domain translation worksheet**: Six prompts for participant research domains. +23. **Workshop logistics**: Use the simple notebooks; keep solution notebooks as facilitator references. +24. **Runtime fallback**: What to do if setup, training, optimization, or widgets are slow. +25. **Resources**: Simple notebooks, EngiBench docs, code, and paper. +26. **Closing**: Leave with a way to make your design problem executable, comparable, and reusable. + +## Main Local Sources + +- `EngiBench/docs/_static/img/engibench_problems.png` +- `EngiBench/docs/_static/img/problems/beams2d.png` +- `EngiBench/docs/_static/img/problems/airfoil.png` +- `EngiBench/docs/_static/img/problems/heatconduction2d.png` +- `EngiOpt/workshops/dcc26/simple/00_framing_your_design_problem.ipynb` +- `EngiOpt/workshops/dcc26/simple/01_training_a_generative_model.ipynb` +- `EngiOpt/workshops/dcc26/simple/02_evaluating_your_generated_designs.ipynb` +- `EngiOpt/workshops/dcc26/simple/03_writing_your_own_problem.ipynb` +- `EngiOpt/workshops/dcc26/artifacts/design_grid.png` +- `EngiOpt/workshops/dcc26/artifacts/objective_scatter.png` +- `EngiOpt/workshops/dcc26/artifacts/objective_histogram.png` +- `EngiOpt/workshops/dcc26/artifacts/training_curve.png` +- `EngiOpt/workshops/dcc26/artifacts/metrics_summary.csv` diff --git a/workshops/dcc26/assets/engibench_logo.png b/workshops/dcc26/assets/engibench_logo.png new file mode 100644 index 00000000..6c1f17c3 Binary files /dev/null and b/workshops/dcc26/assets/engibench_logo.png differ diff --git a/workshops/dcc26/assets/engibench_problems.png b/workshops/dcc26/assets/engibench_problems.png new file mode 100644 index 00000000..44173818 Binary files /dev/null and b/workshops/dcc26/assets/engibench_problems.png differ diff --git a/workshops/dcc26/participant/00_setup_api_warmup.ipynb b/workshops/dcc26/participant/00_setup_api_warmup.ipynb new file mode 100644 index 00000000..9034904b --- /dev/null +++ b/workshops/dcc26/participant/00_setup_api_warmup.ipynb @@ -0,0 +1,483 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Welcome to the DCC'26 EngiBench Workshop!\n", + "\n", + "In the next 20 minutes you will **load an engineering-design benchmark, explore its data, and break its constraints on purpose**. No ML required yet — just Python and curiosity.\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"seaborn\", \"ipywidgets\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is EngiBench?\n", + "\n", + "\n", + "\n", + "EngiBench is an **open benchmark suite for engineering design** with ML. Three things it gives you:\n", + "\n", + "- **Standardised problems** — beams, heat sinks, photonic crystals, and more, each with the same Python API\n", + "- **Ready-made datasets** — thousands of optimal designs with their operating conditions, hosted on HuggingFace\n", + "- **Built-in evaluation** — constraint checking, simulation, and metrics so results are comparable across papers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercise legend\n", + "\n", + "| Marker | Meaning |\n", + "|---|---|\n", + "| `PUBLIC FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n", + "| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1 — Choose a problem and set up\n", + "\n", + "EngiBench has many problems, all with the **same API**. Pick one by name from the list below.\n", + "\n", + "**Your task:** set `PROBLEM_ID` to one of the available problem strings (we recommend `\"beams2d\"` for this workshop)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-A\n", + "import importlib\n", + "import random, sys, os\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Import workshop helpers from the installed EngiOpt package\n", + "import engiopt.workshops.dcc26.notebook_helpers as notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers)\n", + "from engiopt.workshops.dcc26.notebook_helpers import * # noqa: F401,F403\n", + "\n", + "import engibench\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "print(\"Available problems:\", list(BUILTIN_PROBLEMS.keys()))\n", + "\n", + "SEED = 7\n", + "set_global_seed(SEED)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "PROBLEM_ID = None # Pick a problem! Example: \"beams2d\"\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if PROBLEM_ID is None:\n", + " raise RuntimeError('Set PROBLEM_ID to a problem name, e.g. PROBLEM_ID = \"beams2d\"')\n", + "\n", + "# CHECKPOINT\n", + "assert PROBLEM_ID in BUILTIN_PROBLEMS, f'\"{PROBLEM_ID}\" not found. Choose from: {list(BUILTIN_PROBLEMS.keys())}'\n", + "print(f\"\\u2705 Checkpoint passed — using problem: {PROBLEM_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 2 — Instantiate the problem\n", + "\n", + "One line. Every EngiBench problem uses the same constructor — just pass a seed for reproducibility." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\n", + "print(\"Problem class:\", type(problem).__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 3 — Explore the API contract\n", + "\n", + "Every EngiBench problem exposes the **same fields**. This is what makes the benchmark fair — algorithms can only change the *method*, not the *problem definition*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Problem class: \", type(problem).__name__)\n", + "print(\"Design space: \", problem.design_space)\n", + "print(\"Design shape: \", problem.design_space.shape)\n", + "print(\"Objectives: \", problem.objectives)\n", + "print(\"Condition keys: \", problem.conditions_keys)\n", + "print(\"Dataset ID: \", problem.dataset_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Key takeaway:** `design_space`, `objectives`, and `conditions_keys` are the **contract**. Any method you build must respect them." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 4 — Load and inspect a dataset sample\n", + "\n", + "The dataset lives on HuggingFace and downloads automatically. Your job:\n", + "1. Grab one training sample's **design** (a 2D numpy array)\n", + "2. Build a **config** dict mapping each condition key to the sample's value\n", + "\n", + "We give you the dataset loading — you extract the fields." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-B\n", + "# Goal: extract a design array and build a config dict from one training sample.\n", + "\n", + "dataset = problem.dataset # <-- this is provided for you\n", + "print(dataset) # inspect the splits and columns\n", + "\n", + "sample_idx = 0\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# The training split is: dataset[\"train\"]\n", + "# Each column can be indexed like: dataset[\"train\"][\"optimal_design\"][sample_idx]\n", + "#\n", + "# 1) Extract the design as a numpy array:\n", + "# design = np.array(dataset[\"train\"][\"optimal_design\"][sample_idx])\n", + "#\n", + "# 2) Build a config dict with one entry per condition key (use np.asarray to\n", + "# ensure array conditions are numpy arrays, not Python lists):\n", + "# config = {k: np.asarray(dataset[\"train\"][k][sample_idx]) for k in problem.conditions_keys}\n", + "\n", + "design = None\n", + "config = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if design is None or config is None:\n", + " raise RuntimeError(\"Uncomment / fill in `design` and `config` above.\")\n", + "\n", + "print(\"design shape:\", np.array(design).shape)\n", + "print(\"config: \", config)\n", + "\n", + "# CHECKPOINT\n", + "assert tuple(np.array(design).shape) == tuple(problem.design_space.shape), (\n", + " f\"design shape mismatch: expected {problem.design_space.shape}, got {np.array(design).shape}\"\n", + ")\n", + "missing = [k for k in problem.conditions_keys if k not in config]\n", + "assert not missing, f\"config missing condition keys: {missing}\"\n", + "print(\"\\u2705 Checkpoint passed — dataset sample loaded correctly.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 5 — Design gallery\n", + "\n", + "Eight random training designs with their conditions. Notice how different conditions produce very different structures." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_design_gallery(dataset, problem, n=8, seed=SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 6 — Explore the dataset interactively\n", + "\n", + "**Drag the sliders** to filter designs by condition range. This is the dataset your generative model will learn from." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "interactive_condition_explorer(dataset, problem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 7 — Render a single design\n", + "\n", + "EngiBench problems have a built-in `render()` method that draws the design with physics-aware styling." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(design)\n", + "fig.suptitle(f\"Design (sample {sample_idx})\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 8 — Break constraints deliberately\n", + "\n", + "### The EngiBench constraint API\n", + "\n", + "Every EngiBench problem declares **design constraints** — rules a design must satisfy\n", + "to be physically valid. The `@constraint` decorator wraps a function that `assert`s\n", + "what must be true; `check_constraints()` catches failures and returns a `Violations`\n", + "object.\n", + "\n", + "Each constraint is tagged with a **category** that tells you *why* it exists:\n", + "\n", + "| Category | Import | Meaning |\n", + "|---|---|---|\n", + "| **`THEORY`** | `from engibench.constraint import THEORY` | The constraint comes from **physics**. Values outside the domain are unphysical (e.g. negative volume fraction) but may not crash the solver. |\n", + "| **`IMPL`** | `from engibench.constraint import IMPL` | The constraint guards the **implementation**. Violating it causes runtime errors or undefined behavior in the solver (e.g. mesh resolution too small). |\n", + "\n", + "Constraints also have a **criticality** level:\n", + "- `Criticality.Error` — hard violation, design is infeasible\n", + "- `Criticality.Warning` — soft violation, solver may still run but results are suspect\n", + "\n", + "The `Violations` object returned by `check_constraints()` supports filtering:\n", + "```python\n", + "violations.by_category(THEORY) # only physics violations\n", + "violations.by_category(IMPL) # only implementation violations\n", + "violations.by_criticality(Criticality.Warning) # only warnings\n", + "```\n", + "\n", + "For example, in `beams2d` the volume fraction has a `THEORY` constraint (physically,\n", + "volfrac must be in [0, 1]) **and** a stricter `IMPL` warning (the solver works best\n", + "with volfrac in [0.1, 0.9]).\n", + "\n", + "---\n", + "\n", + "### Exercise: force a constraint violation\n", + "\n", + "A design is only valid if it **satisfies all constraints for its operating conditions**. Let's see what happens when we lie about the conditions.\n", + "\n", + "**Your task:** copy the valid `config`, change one **scalar** condition to an extreme value, and call `problem.check_constraints(design=design, config=bad_config)`.\n", + "\n", + "The function returns a `Violations` object — `len(violations) == 0` means no violations." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# PUBLIC FILL-IN CELL 00-C\n", + "# Goal: force a constraint violation and inspect the result.\n", + "\n", + "# Find a scalar condition to perturb\n", + "scalar_keys = [k for k in problem.conditions_keys if np.asarray(config[k]).ndim == 0]\n", + "perturb_key = scalar_keys[0]\n", + "original_val = float(config[perturb_key])\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1) Make a copy of the valid config and change perturb_key to something extreme:\n", + "# bad_config = dict(config)\n", + "# bad_config[perturb_key] = original_val * 10\n", + "#\n", + "# 2) Check constraints with the mismatched config:\n", + "# violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "bad_config = None\n", + "violations = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if violations is None:\n", + " raise RuntimeError(\"Uncomment / fill in `bad_config` and `violations` above.\")\n", + "\n", + "print(f\"Perturbing '{perturb_key}': {original_val} \\u2192 {bad_config[perturb_key]}\")\n", + "violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "print(f\"\\n--- All constraint checks for {PROBLEM_ID} ({violations.n_constraints} total) ---\")\n", + "print(f\"Violations triggered: {len(violations)}\\n\")\n", + "if violations:\n", + " print(violations)\n", + "else:\n", + " print(\"No violations. Try a more extreme value.\")\n", + "\n", + "# CHECKPOINT\n", + "assert hasattr(violations, \"__len__\"), \"violations should be a Violations object\"\n", + "print(\"\\n\\u2705 Checkpoint passed — constraint checking explored.\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 9 — Simulate and optimise\n", + "\n", + "Beyond constraint checking, EngiBench problems expose **`simulate()`** and **`optimize()`** methods — the same solvers used to generate the dataset.\n", + "\n", + "| Method | What it does | Returns |\n", + "|---|---|---|\n", + "| `problem.simulate(design, config)` | Evaluate objective(s) for a given design | `np.ndarray` of objective values |\n", + "| `problem.optimize(starting_point, config)` | Run the full optimiser from a starting design | `(optimised_design, optimisation_history)` |\n", + "\n", + "> **Colab note:** some problems (e.g. `heatconduction2d`, `heatconduction3d`, `airfoil`) require a **Docker container** for their solver and will not run on Colab. Problems like `beams2d` and `thermoelastic2d` use pure-Python solvers and work everywhere." + ] + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Simulate: evaluate the objective for an existing design\n", + "obj_values = problem.simulate(design, config)\n", + "print(f\"Objective values for sample {sample_idx}: {obj_values}\")\n", + "print(f\"Objectives defined: {problem.objectives}\")" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "metadata": {}, + "source": [ + "# Optimize: run the solver from a uniform starting point\n", + "starting_point = np.full(problem.design_space.shape, float(config[\"volfrac\"]))\n", + "optimised_design, history = problem.optimize(starting_point, config)\n", + "\n", + "print(f\"Optimisation ran for {len(history)} steps\")\n", + "print(f\"Final objective: {history[-1].obj_values}\")\n", + "\n", + "# Compare: generated design vs. dataset design\n", + "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n", + "axes[0].imshow(design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[0].set_title(f\"Dataset design (obj={obj_values[0]:.4f})\")\n", + "axes[0].axis(\"off\")\n", + "axes[1].imshow(optimised_design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[1].set_title(f\"Re-optimised (obj={history[-1].obj_values[0]:.4f})\")\n", + "axes[1].axis(\"off\")\n", + "plt.tight_layout()\n", + "plt.show()" + ], + "outputs": [], + "execution_count": null + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection\n", + "\n", + "Before moving on, think about:\n", + "\n", + "1. **The API contract** — what is *fixed* by the benchmark (design space, conditions, objectives) vs. what is *yours* to choose (model, hyperparameters, training strategy)?\n", + "2. **Constraints as a test** — why is it important that `check_constraints` exists as a separate function, rather than just training on feasible data?\n", + "3. **Simulate vs. optimise** — `simulate` is cheap (one forward pass), `optimize` is expensive (iterative solver). How might you use each when evaluating a generative model?\n", + "4. **What surprised you?** — anything about the design shapes, condition ranges, or dataset size that you did not expect?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Next\n", + "\n", + "Proceed to **Notebook 01** where you will train a generative model on this exact benchmark and produce new designs. The API you just learned carries over unchanged." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/participant/01_train_generate.ipynb b/workshops/dcc26/participant/01_train_generate.ipynb new file mode 100644 index 00000000..edbac951 --- /dev/null +++ b/workshops/dcc26/participant/01_train_generate.ipynb @@ -0,0 +1,1480 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 01: Train a Generative Model for Inverse Design\n\n**Can we learn to skip the optimizer?**\n\nIn Notebook 00 you saw that EngiBench bundles an optimizer with every problem.\nRunning that optimizer produces an optimal design — but it takes time. For\nBeams2D it runs in seconds, but for complex 3D problems it can take minutes or\nhours *per design*.\n\nGenerative AI offers a different approach: **train a neural network once on a\ndataset of optimal designs, then generate new designs instantly.** The trade-off\nis quality for speed — and the central question of this workshop is *how do we\nmeasure that trade-off rigorously?*\n\n### What you will do\n\n| Step | What happens | Key concept |\n|------|-------------|-------------|\n| **Prepare data** | Extract conditions and designs from EngiBench | The standardised data API |\n| **Train a model** | Fit a neural network to map conditions → designs | Supervised learning on design data |\n| **Generate designs** | Produce new designs for unseen conditions | Instant inference vs. slow optimization |\n| **Inspect results** | Compare generated vs. ground-truth designs visually | Setting up evaluation (Notebook 02) |\n\n> **Heads up:** We deliberately train a simple model with limited data and few\n> epochs. The results will be imperfect — **that is the point.** Understanding\n> *why* they are imperfect motivates the rigorous benchmarking we explore in\n> Notebook 02 and the discussion session.\n\n---\n\n### Exercise legend\n| Marker | Meaning |\n|---|---|\n| `FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ], + "id": "cell-0" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Colab users:** click **File > Save a copy in Drive** before editing so your changes persist." + ], + "id": "cell-1" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependencies" + ], + "id": "cell-2" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab / local dependency bootstrap\nimport subprocess, sys\n\nIN_COLAB = \"google.colab\" in sys.modules\nFORCE_INSTALL = False # Set True to force install outside Colab\n\nif IN_COLAB or FORCE_INSTALL:\n def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n _pip([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n try:\n import torch\n except Exception:\n _pip([\"torch\", \"torchvision\"])\n print(\"Install complete.\")\nelse:\n print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ], + "id": "cell-3" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## The inverse design problem\n\nTraditional topology optimization works like this:\n\n```\nConditions (volfrac, loads, …) ──► [ Optimizer (iterative) ] ──► Optimal design\n ⏱ seconds to hours\n```\n\nA **learned generator** replaces the optimizer with a neural network:\n\n```\nConditions ─┐\n ├──► [ Neural network ] ──► Approximate design\nRandom noise ─┘ ⏱ milliseconds\n```\n\nThe noise input lets the model produce **diverse** designs for the same\nconditions — useful for exploring the design space. But the designs are only\n*approximate*: the network has to generalise from training examples rather than\nsolving the physics directly.\n\n**Key question:** How close can a learned generator get to the optimizer? That\nis what benchmarking measures." + ], + "id": "cell-4" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Imports" + ], + "id": "cell-5" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "j", + "s", + "o", + "n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "r", + "a", + "n", + "d", + "o", + "m", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "y", + "s", + ",", + " ", + "o", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "p", + "a", + "t", + "h", + "l", + "i", + "b", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "P", + "a", + "t", + "h", + "\n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "m", + "a", + "t", + "p", + "l", + "o", + "t", + "l", + "i", + "b", + ".", + "p", + "y", + "p", + "l", + "o", + "t", + " ", + "a", + "s", + " ", + "p", + "l", + "t", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "u", + "m", + "p", + "y", + " ", + "a", + "s", + " ", + "n", + "p", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "t", + "o", + "r", + "c", + "h", + " ", + "a", + "s", + " ", + "t", + "h", + "\n", + "\n", + "#", + " ", + "W", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + " ", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "(", + "v", + "i", + "s", + "u", + "a", + "l", + "i", + "z", + "a", + "t", + "i", + "o", + "n", + " ", + "+", + " ", + "t", + "r", + "a", + "i", + "n", + "i", + "n", + "g", + " ", + "u", + "t", + "i", + "l", + "i", + "t", + "i", + "e", + "s", + ")", + "\n", + "i", + "f", + " ", + "\"", + "g", + "o", + "o", + "g", + "l", + "e", + ".", + "c", + "o", + "l", + "a", + "b", + "\"", + " ", + "i", + "n", + " ", + "s", + "y", + "s", + ".", + "m", + "o", + "d", + "u", + "l", + "e", + "s", + ":", + "\n", + " ", + " ", + " ", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "\"", + "/", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "_", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "o", + "s", + ".", + "m", + "a", + "k", + "e", + "d", + "i", + "r", + "s", + "(", + "_", + "u", + "t", + "i", + "l", + "s", + ",", + " ", + "e", + "x", + "i", + "s", + "t", + "_", + "o", + "k", + "=", + "T", + "r", + "u", + "e", + ")", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + " ", + "=", + " ", + "\"", + "c", + "o", + "d", + "e", + "x", + "/", + "d", + "c", + "c", + "2", + "6", + "-", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "-", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "a", + "s", + "e", + " ", + "=", + " ", + "f", + "\"", + "h", + "t", + "t", + "p", + "s", + ":", + "/", + "/", + "r", + "a", + "w", + ".", + "g", + "i", + "t", + "h", + "u", + "b", + "u", + "s", + "e", + "r", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + ".", + "c", + "o", + "m", + "/", + "I", + "D", + "E", + "A", + "L", + "L", + "a", + "b", + "/", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "/", + "{", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + "}", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "f", + "o", + "r", + " ", + "_", + "f", + " ", + "i", + "n", + " ", + "(", + "\"", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ".", + "p", + "y", + "\"", + ",", + " ", + "\"", + "_", + "_", + "i", + "n", + "i", + "t", + "_", + "_", + ".", + "p", + "y", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "i", + "f", + " ", + "n", + "o", + "t", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "e", + "x", + "i", + "s", + "t", + "s", + "(", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + ".", + "c", + "h", + "e", + "c", + "k", + "_", + "c", + "a", + "l", + "l", + "(", + "[", + "\"", + "w", + "g", + "e", + "t", + "\"", + ",", + " ", + "\"", + "-", + "q", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "b", + "a", + "s", + "e", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ",", + " ", + "\"", + "-", + "O", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + "]", + ")", + "\n", + "e", + "l", + "s", + "e", + ":", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "a", + "b", + "s", + "p", + "a", + "t", + "h", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "i", + "f", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "s", + "d", + "i", + "r", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "e", + "l", + "s", + "e", + " ", + "\"", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + "s", + "y", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "n", + "s", + "e", + "r", + "t", + "(", + "0", + ",", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + ")", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "E", + "4", + "0", + "2", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + ".", + "r", + "e", + "l", + "o", + "a", + "d", + "(", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ")", + " ", + " ", + "#", + " ", + "a", + "l", + "w", + "a", + "y", + "s", + " ", + "p", + "i", + "c", + "k", + " ", + "u", + "p", + " ", + "l", + "a", + "t", + "e", + "s", + "t", + " ", + "e", + "d", + "i", + "t", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "*", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "F", + "4", + "0", + "1", + ",", + "F", + "4", + "0", + "3", + "\n", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "b", + "e", + "n", + "c", + "h", + ".", + "u", + "t", + "i", + "l", + "s", + ".", + "a", + "l", + "l", + "_", + "p", + "r", + "o", + "b", + "l", + "e", + "m", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "B", + "U", + "I", + "L", + "T", + "I", + "N", + "_", + "P", + "R", + "O", + "B", + "L", + "E", + "M", + "S", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "o", + "p", + "t", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r", + " ", + "a", + "s", + " ", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "C", + "N", + "N", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r" + ], + "id": "cell-6" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Configuration\n\nAll tuneable knobs in one place. **Experiment with these** — especially\n`EPOCHS` and `N_TRAIN` — to see how they affect the generated designs." + ], + "id": "cell-7" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ---------- Reproducibility ----------\n", + "SEED = 7\n", + "\n", + "# ---------- Problem ----------\n", + "PROBLEM_ID = \"beams2d\" # Change to try a different EngiBench problem\n", + "\n", + "# ---------- Training ----------\n", + "EPOCHS = 15 # Short for workshop; try 50+ for better results\n", + "BATCH_SIZE = 64\n", + "LR = 2e-4 # Adam learning rate\n", + "LATENT_DIM = 32 # Size of random noise vector fed to generator\n", + "# ---------- Generation ----------\n", + "N_SAMPLES = 24 # Designs to generate for Notebook 02\n", + "\n", + "# ---------- Device ----------\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)\n", + "\n", + "if \"google.colab\" in sys.modules and not th.cuda.is_available():\n", + " print(\"\\n⚠️ WARNING: No GPU detected! Training will be very slow (~1 min/epoch).\")\n", + " print(\" Go to: Runtime → Change runtime type → T4 GPU → Save\")\n", + " print(\" Then re-run from the top.\\n\")\n", + "\n", + "# ---------- Artifact paths ----------\n", + "ARTIFACT_DIR = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "CKPT_PATH = ARTIFACT_DIR / \"engiopt_cgan2d_generator_supervised.pt\"\n", + "HISTORY_PATH = ARTIFACT_DIR / \"training_history.csv\"\n", + "TRAIN_CURVE_PATH = ARTIFACT_DIR / \"training_curve.png\"\n", + "\n", + "# ---------- Seed everything ----------\n", + "random.seed(SEED)\n", + "np.random.seed(SEED)\n", + "th.manual_seed(SEED)\n", + "if th.cuda.is_available():\n", + " th.cuda.manual_seed_all(SEED)\n", + "\n", + "print(\"Problem: \", PROBLEM_ID)\n", + "print(\"Artifact dir:\", ARTIFACT_DIR)" + ], + "id": "cell-8" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 3. Load the EngiBench problem\n\nSame API you used in Notebook 00 — every problem exposes `.dataset`,\n`.conditions_keys`, and `.design_space`." + ], + "id": "cell-9" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\ntrain_ds = problem.dataset[\"train\"]\ntest_ds = problem.dataset[\"test\"]\n\ncondition_keys = problem.conditions_keys\ndesign_shape = problem.design_space.shape\nn_conds = len(condition_keys)\n\nprint(f\"Problem : {type(problem).__name__}\")\nprint(f\"Design shape : {design_shape}\")\nprint(f\"Condition keys : {condition_keys}\")\nprint(f\"Train examples : {len(train_ds)}\")\nprint(f\"Test examples : {len(test_ds)}\")" + ], + "id": "cell-10" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Quick look at a few training designs\nshow_design_gallery(problem.dataset, problem, n=4, seed=SEED)" + ], + "id": "cell-11" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 4. FILL-IN 01-A: Prepare training data\n\nThe EngiBench dataset stores conditions and designs as separate columns.\nTo train a neural network we need to extract them into numeric arrays:\n\n1. **Conditions**: a `(N, n_conds)` array of floats — one row per sample, one column per condition key\n2. **Designs**: a `(N, H, W)` array of pixel values\n\nWe use the **full training set** so the model sees as many examples as possible.\nWe also rescale designs from `[0, 1]` to `[-1, 1]` because the generator uses a\n`tanh` output layer (which naturally outputs that range)." + ], + "id": "cell-12" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-A\n# Goal: extract conditions and designs from the full EngiBench training set.\n\nrng = np.random.default_rng(SEED)\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Stack all condition columns into one (N, n_conds) array\n# Hint: use np.stack with a list comprehension over condition_keys\n# Example: np.stack([np.array(train_ds[k]).astype(np.float32)\n# for k in condition_keys], axis=1)\nconds_np = None\n\n# 2. Extract the optimal designs\n# Hint: np.array(train_ds[\"optimal_design\"]).astype(np.float32)\ndesigns_np = None\n\n# 3. Rescale designs from [0, 1] to [-1, 1]\n# Hint: targets = designs * 2.0 - 1.0\ntargets_np = None\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nn_train = len(train_ds)\nassert conds_np is not None and designs_np is not None and targets_np is not None, (\n \"Fill in conds_np, designs_np, and targets_np above.\"\n)\nassert conds_np.shape == (n_train, n_conds), (\n f\"Expected conditions shape ({n_train}, {n_conds}), got {conds_np.shape}\"\n)\nassert targets_np.shape == (n_train, *design_shape), (\n f\"Expected targets shape ({n_train}, {', '.join(map(str, design_shape))}), got {targets_np.shape}\"\n)\nassert targets_np.min() >= -1.0 and targets_np.max() <= 1.0, (\n f\"Targets should be in [-1, 1], got [{targets_np.min():.2f}, {targets_np.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: {n_train} samples, conditions {conds_np.shape}, targets {targets_np.shape}\")" + ], + "id": "cell-13" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 5. The Generator model\n\nWe use a **convolutional conditional generator** (cDCGAN) from EngiOpt. Unlike a\nsimple fully-connected network that treats the design as a flat vector of pixels,\nthis model uses **transposed convolutions** that upsample a small feature map\ninto a full-resolution design image — preserving spatial structure at every step.\n\n```\nnoise (32, 1, 1) ──► ConvT ──┐\n ├─► concat (256, 7, 7)\nconditions (4, 1, 1) ► ConvT ┘ │\n ▼\n ConvT 7×7 → 13×13\n ConvT 13×13 → 25×25\n ConvT 25×25 → 50×50\n ConvT 50×50 → 100×100 → resize → design\n```\n\nThis **convolutional inductive bias** is why CNN generators produce much sharper\ndesigns than MLP generators: each layer reasons about local spatial\nneighbourhoods rather than treating every pixel independently." + ], + "id": "cell-14" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Wrap the CNN generator so it accepts flat (B, dim) inputs\nfrom engiopt.workshops.dcc26.notebook_helpers import WorkshopGenerator\n\ncnn_gen = EngiOptCNNGenerator(\n latent_dim=LATENT_DIM,\n n_conds=n_conds,\n design_shape=design_shape,\n)\nmodel = WorkshopGenerator(cnn_gen).to(DEVICE)\n\nn_params = sum(p.numel() for p in model.parameters())\nprint(f\"Generator created: {n_params:,} parameters\")\nprint(f\"Input: noise ({LATENT_DIM}) + conditions ({n_conds}) = {LATENT_DIM + n_conds}\")\nprint(f\"Output: {' x '.join(map(str, design_shape))} design image\")" + ], + "id": "cell-15" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 6. FILL-IN 01-B: Train the model\n\nTraining is **supervised**: for each sample, the model sees random noise +\nconditions and tries to reproduce the optimal design. The loss measures\npixel-by-pixel error (MSE).\n\nWe provide a `train_supervised_generator()` helper that handles the training\nloop. Your job: **call it with the right arguments and experiment with\nsettings.**\n\n> **Try it:** After training with the default 8 epochs, change `EPOCHS` to 20\n> or 50 in the config cell above, re-run from there, and see how the loss and\n> designs change." + ], + "id": "cell-16" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-B\n", + "# Goal: train the generator. Experiment with EPOCHS and N_TRAIN.\n", + "\n", + "# Pick a few test conditions for snapshot visualization during training\n", + "snap_idx = rng.choice(len(test_ds), size=4, replace=False)\n", + "snap_conds = np.stack(\n", + " [np.array(test_ds[k])[snap_idx].astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + ")\n", + "snap_baselines = np.array(test_ds[\"optimal_design\"])[snap_idx].astype(np.float32)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "\n", + "# Call train_supervised_generator() with appropriate arguments.\n", + "# It returns a dict with keys \"losses\" and \"snapshots\".\n", + "#\n", + "# Signature:\n", + "# train_supervised_generator(\n", + "# model, conditions_array, targets_array,\n", + "# TrainingConfig(...), snapshot_conditions=...,\n", + "# )\n", + "#\n", + "# Use the variables: model, conds_np, targets_np, LATENT_DIM, EPOCHS,\n", + "# BATCH_SIZE, LR, DEVICE, snap_conds\n", + "# Wrap the hyperparameters in TrainingConfig(...).\n", + "\n", + "train_result = None # Replace with the function call\n", + "\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if train_result is None:\n", + " raise RuntimeError(\"Call train_supervised_generator() above and assign to train_result.\")\n", + "\n", + "train_losses = train_result[\"losses\"]\n", + "snapshots = train_result[\"snapshots\"]\n", + "\n", + "# Save checkpoint\n", + "th.save(model.state_dict(), CKPT_PATH)\n", + "\n", + "# CHECKPOINT\n", + "assert len(train_losses) == EPOCHS, f\"Expected {EPOCHS} loss values, got {len(train_losses)}\"\n", + "assert train_losses[-1] < train_losses[0], (\n", + " \"Loss did not decrease — check your training arguments.\"\n", + ")\n", + "print(f\"\\nCHECKPOINT passed: trained for {EPOCHS} epochs, final loss {train_losses[-1]:.6f}\")\n" + ], + "id": "cell-17" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training loss curve\n\nThe loss should decrease over epochs. A flat or increasing loss means something\nwent wrong. Note that even a decreasing loss does not guarantee good designs —\nMSE rewards blurry averages." + ], + "id": "cell-18" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save training history\nimport pandas as pd\npd.DataFrame({\"epoch\": range(1, len(train_losses) + 1), \"loss\": train_losses}).to_csv(\n HISTORY_PATH, index=False,\n)\n\nshow_training_curve(train_losses, save_path=str(TRAIN_CURVE_PATH))" + ], + "id": "cell-19" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How the generator learns\n\nBelow you can see what the generator produces at different points during\ntraining. Early outputs are random noise; later outputs start to resemble beam\nstructures. The ground-truth row shows what the model is trying to match." + ], + "id": "cell-20" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_progression(snapshots, baseline_designs=snap_baselines, n_show=4)" + ], + "id": "cell-21" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 7. FILL-IN 01-C: Generate designs from test conditions\n\nNow for the payoff: use your trained model to produce designs for **conditions\nit has never seen** (from the held-out test set).\n\nIf the model generalises, it should produce reasonable designs for new\nconditions without running the optimizer. The `generate_designs()` helper\nhandles the inference — you just need to:\n\n1. Pick test conditions from the EngiBench dataset\n2. Call the generator\n3. Also extract the ground-truth baselines for comparison" + ], + "id": "cell-22" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-C\n# Goal: generate N_SAMPLES designs conditioned on test-set conditions.\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Sample N_SAMPLES indices from the test set\n# Example: test_idx = rng.choice(len(test_ds), size=N_SAMPLES, replace=False)\ntest_idx = None\n\n# 2. Extract test conditions as (N_SAMPLES, n_conds) array and baseline designs\n# Example:\n# test_conds_np = np.stack(\n# [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n# axis=1,\n# )\n# baseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\ntest_conds_np = None\nbaseline_designs = None\n\n# 3. Generate designs using generate_designs()\n# Example: gen_designs = generate_designs(model, test_conds_np, latent_dim=LATENT_DIM, device=DEVICE)\ngen_designs = None\n\n# 4. Build condition records (list of dicts) for JSON export\n# Example:\n# conditions_records = [\n# {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n# for i in range(N_SAMPLES)\n# ]\nconditions_records = None\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nfor name, val in [(\"test_idx\", test_idx), (\"test_conds_np\", test_conds_np),\n (\"baseline_designs\", baseline_designs), (\"gen_designs\", gen_designs),\n (\"conditions_records\", conditions_records)]:\n assert val is not None, f\"Fill in {name} above.\"\nassert gen_designs.shape == baseline_designs.shape, (\n f\"Shape mismatch: generated {gen_designs.shape} vs baseline {baseline_designs.shape}\"\n)\nassert len(conditions_records) == N_SAMPLES\nassert 0.0 <= gen_designs.min() and gen_designs.max() <= 1.0, (\n f\"Generated designs should be in [0, 1], got [{gen_designs.min():.2f}, {gen_designs.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: generated {N_SAMPLES} designs, shape {gen_designs.shape}\")" + ], + "id": "cell-23" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 8. Visual comparison: Generated vs Ground Truth\n\nEach column shows the same conditions. Top row = your model's output; bottom row\n= the optimizer's solution from the dataset.\n\n**What to look for:**\n- **Blurriness:** generated designs are often blurry because MSE loss averages\n over possible solutions\n- **Structure:** do the generated designs have recognisable beam topology (load\n paths, supports)?\n- **Condition sensitivity:** do different conditions produce visibly different\n designs, or does the model output the same thing regardless?" + ], + "id": "cell-24" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_gen_vs_baseline(gen_designs, baseline_designs, conditions_records)" + ], + "id": "cell-25" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 9. Export artifacts for Notebook 02\n\nNotebook 02 needs three files to run its evaluation pipeline:\n- `generated_designs.npy` — your model's output\n- `baseline_designs.npy` — ground-truth designs from the dataset\n- `conditions.json` — the conditions used for generation" + ], + "id": "cell-26" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.save(ARTIFACT_DIR / \"generated_designs.npy\", gen_designs)\nnp.save(ARTIFACT_DIR / \"baseline_designs.npy\", baseline_designs)\nwith open(ARTIFACT_DIR / \"conditions.json\", \"w\") as f:\n json.dump(conditions_records, f, indent=2)\n\n# Verify\nrequired = [\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\"]\nmissing = [f for f in required if not (ARTIFACT_DIR / f).exists()]\nassert not missing, f\"Missing: {missing}\"\nprint(f\"Exported to {ARTIFACT_DIR}:\")\nfor f in required:\n print(f\" {f}\")" + ], + "id": "cell-27" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Discussion\n\n### What you have seen\n\nYou trained a neural network on a few hundred examples for a few epochs and used\nit to produce beam designs in milliseconds. The results are imperfect — and that\nis exactly the point.\n\n### Questions to think about\n\n1. **Why are the designs blurry?** MSE loss penalises pixel-wise error, which\n rewards the *average* of all plausible designs rather than any single sharp\n one. What alternative losses or model architectures might produce crisper\n output? (Think: adversarial loss, diffusion models, VAEs.)\n\n2. **Does the model respond to conditions?** Compare designs generated for very\n different volume fractions or load distributions. If they all look the same,\n the model may have learned the dataset mean rather than the\n condition → design relationship. What might help? (More training data? More\n epochs? A different architecture?)\n\n3. **From pixels to physics.** A design can *look* reasonable but fail under\n simulation — disconnected material, wrong volume fraction, stress\n concentrations. Notebook 02 will run the physics solver on your generated\n designs and quantify these failures.\n\n4. **The benchmarking motivation.** We do not know how bad these designs are\n until we *measure*. That is the role of a benchmark: providing standardised\n evaluation so we can compare methods, track progress, and avoid fooling\n ourselves with visual inspection alone.\n\n5. **What would you change?** If you had an hour instead of 30 minutes, what\n would you try? More data, more epochs, a different model, a different loss\n function? How would you decide whether it *actually* improved?" + ], + "id": "cell-28" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Next\n\nProceed to **Notebook 02** to evaluate your generated designs with physics-based\nsimulation and compute benchmark metrics. Your exported artifacts are the input." + ], + "id": "cell-29" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/participant/02_evaluate_metrics.ipynb b/workshops/dcc26/participant/02_evaluate_metrics.ipynb new file mode 100644 index 00000000..59b26c82 --- /dev/null +++ b/workshops/dcc26/participant/02_evaluate_metrics.ipynb @@ -0,0 +1,1259 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 02 (Participant): Evaluate Your Generated Designs\n", + "\n", + "In Notebook 01 you trained a generative model and produced candidate beam designs.\n", + "Now comes the critical question: **are those designs actually any good?**\n", + "\n", + "In generative modeling for engineering, \"good\" is **not** a single number.\n", + "A design can look plausible yet fail simulation. It can perform well on one\n", + "objective yet violate a critical constraint. It can be high-quality but\n", + "identical to a training example -- memorised, not generalised.\n", + "\n", + "This notebook walks you through a **structured evaluation pipeline** that\n", + "diagnoses generative model quality from multiple complementary angles, each\n", + "revealing something the others miss." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A taxonomy of generative-model metrics\n", + "\n", + "```\n", + " How do we evaluate a generative model?\n", + " |\n", + " ┌───────────────┬───────────┼───────────────┬──────────────────┐\n", + " | | | | |\n", + " Visual Simulation Constraint Distributional Diversity &\n", + " Inspection Performance Satisfaction Similarity Coverage\n", + " | | | | |\n", + " \"Does it \"Does it \"Is it \"Does it \"Did we\n", + " look right?\" work?\" legal?\" match explore?\"\n", + " reality?\"\n", + " | | | | |\n", + " Residual Compliance Volfrac error MMD Pairwise L2\n", + " heatmaps histogram distribution (Gaussian DPP diversity\n", + " + scatter + feasibility kernel) NN novelty\n", + " + per-sample rate bars PCA embedding\n", + " gap bars\n", + "```\n", + "\n", + "No single metric tells the whole story. A model can ace one category and\n", + "fail another -- and *which failure matters most* depends on your application.\n", + "\n", + "| Category | Question | Beams2D metric | Why it matters |\n", + "|----------|----------|---------------|----------------|\n", + "| **Visual inspection** | Does it look like a real beam? | Residual heatmaps | Quick sanity check; catches gross failures |\n", + "| **Simulation performance** | Does the physics solver confirm it works? | Compliance gap vs baseline | The ground truth -- simulation is our oracle |\n", + "| **Constraint satisfaction** | Does it obey the engineering spec? | Volume fraction error | A stiff beam using too much material is invalid |\n", + "| **Distributional similarity** | Does the generator match the real data distribution? | MMD (Maximum Mean Discrepancy) | Detects mode collapse, unrealistic densities |\n", + "| **Diversity & coverage** | Did the model explore, or did it memorise? | Pairwise L2, DPP, NN novelty | A model outputting one beam 24 times is useless |\n", + "| **Optimization warmstarting** | Does it give the optimizer a head start? | IOG, COG, FOG | The ultimate downstream utility test |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The evaluation pipeline at a glance\n", + "\n", + "```\n", + "Generated Designs Baseline Designs Training Designs\n", + " | | |\n", + " v v v\n", + " [ Visual Inspection ] [ Reference set ]\n", + " | | |\n", + " v v |\n", + " [ Simulate ] [ Simulate ] |\n", + " | | |\n", + " v v |\n", + " Objectives Objectives |\n", + " \\ / |\n", + " \\ / |\n", + " v v |\n", + " Simulation Metrics |\n", + " (gap, improvement rate) |\n", + " | |\n", + " v v\n", + " Constraint Metrics Distributional Metrics\n", + " (volfrac error, feasibility) (MMD, pixel distributions)\n", + " | |\n", + " v v\n", + " Diversity Metrics <──────────────── PCA Embedding\n", + " (pairwise L2, DPP, NN novelty)\n", + " |\n", + " v\n", + " Optimization Warmstarting\n", + " (IOG, COG, FOG trajectories)\n", + " |\n", + " v\n", + " Summary Dashboard\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Notebook map\n", + "\n", + "| Part | What you do | Key output |\n", + "|------|-------------|------------|\n", + "| Setup | Install deps, load artifacts | `gen_designs`, `baseline_designs`, `conditions` |\n", + "| Part 1 | Visual inspection (the eye test) | Residual heatmaps |\n", + "| Part 2 (Fill-in 02-A) | Per-sample simulation | `results` DataFrame |\n", + "| Part 3 | Constraint satisfaction analysis | Volfrac scatter + error distribution |\n", + "| Part 4 (Fill-in 02-B) | Distributional similarity (MMD) | `mmd_value` |\n", + "| Part 5 | Diversity & coverage | Pairwise heatmap + PCA embedding |\n", + "| Part 6 | Optimization warmstarting (demo) | Trajectory plots with IOG/COG/FOG |\n", + "| Part 7 (Fill-in 02-C) | Comprehensive summary dashboard | `summary_df` |\n", + "\n", + "### Legend\n", + "- `PUBLIC FILL-IN CELL` -- you write code here.\n", + "- `CHECKPOINT` -- run this assertion block to verify before moving on.\n", + "- `# START FILL` / `# END FILL` -- your edits go between these markers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 0: Install dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def pip_install(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " pip_install([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n", + " pip_install([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1: Load artifacts from Notebook 01\n", + "\n", + "We need three files that Notebook 01 exported:\n", + "- `generated_designs.npy` -- the designs your model produced\n", + "- `baseline_designs.npy` -- optimised reference designs from the dataset\n", + "- `conditions.json` -- the boundary-condition configs for each sample\n", + "\n", + "The next cell contains a recovery function that **automatically rebuilds** these\n", + "artifacts if they are missing (e.g., if you jumped straight to NB02). You do not\n", + "need to read or understand that function -- just run it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Artifact recovery (runs only if NB01 artifacts are missing) ──────────\n", + "# This cell auto-builds NB01 artifacts so NB02 works standalone.\n", + "# You do NOT need to read this code -- just run the cell.\n", + "\n", + "import importlib\n", + "import json, random, sys, os\n", + "from pathlib import Path\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch as th\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader, TensorDataset\n", + "\n", + "# Workshop helpers from the installed EngiOpt package\n", + "import engiopt.workshops.dcc26.notebook_helpers as notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers)\n", + "from engiopt.workshops.dcc26.notebook_helpers import * # noqa: F401,F403\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "PROBLEM_ID = \"beams2d\"\n", + "\n", + "try:\n", + " from engiopt.cgan_2d.cgan_2d import Generator as EngiOptCGAN2DGenerator\n", + "except ModuleNotFoundError as exc:\n", + " raise ModuleNotFoundError(\n", + " \"Could not import engiopt. Run the install cell first; on Colab, restart runtime after install.\"\n", + " ) from exc\n", + "\n", + "\n", + "def _resolve_artifact_dir(create=False):\n", + " p = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + " if create:\n", + " p.mkdir(parents=True, exist_ok=True)\n", + " return p\n", + "\n", + "\n", + "def _build_artifacts_locally(artifact_dir, seed=7, n_train=512, n_samples=24, epochs=8, batch_size=64, latent_dim=32):\n", + " \"\"\"Replicate the NB01 train+generate pipeline to produce evaluation artifacts.\"\"\"\n", + " print(\"Auto-building NB01 artifacts (this takes ~1 min)...\")\n", + " random.seed(seed); np.random.seed(seed); th.manual_seed(seed)\n", + " if th.cuda.is_available(): th.cuda.manual_seed_all(seed)\n", + " device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n", + " problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=seed)\n", + " train_ds, test_ds = problem.dataset[\"train\"], problem.dataset[\"test\"]\n", + " ckeys = problem.conditions_keys\n", + " rng = np.random.default_rng(seed)\n", + " idx = rng.choice(len(train_ds), size=min(n_train, len(train_ds)), replace=False)\n", + " conds = np.stack([np.array(train_ds[k])[idx].astype(np.float32) for k in ckeys], axis=1)\n", + " designs = np.array(train_ds[\"optimal_design\"])[idx].astype(np.float32)\n", + " targets = designs * 2.0 - 1.0\n", + " model = EngiOptCGAN2DGenerator(latent_dim=latent_dim, n_conds=conds.shape[1], design_shape=problem.design_space.shape).to(device)\n", + " opt = th.optim.Adam(model.parameters(), lr=1e-3)\n", + " crit = nn.MSELoss()\n", + " dl = DataLoader(TensorDataset(th.tensor(conds), th.tensor(targets)), batch_size=batch_size, shuffle=True)\n", + " losses = []\n", + " for ep in range(epochs):\n", + " model.train(); ep_loss = 0.0\n", + " for cb, tb in dl:\n", + " cb, tb = cb.to(device), tb.to(device)\n", + " pred = model(th.randn(cb.shape[0], latent_dim, device=device), cb)\n", + " loss = crit(pred, tb); opt.zero_grad(); loss.backward(); opt.step()\n", + " ep_loss += loss.item()\n", + " avg = ep_loss / len(dl); losses.append(avg)\n", + " print(f\" epoch {ep+1:02d}/{epochs} loss={avg:.4f}\")\n", + " sc = min(n_samples, len(test_ds))\n", + " sel = rng.choice(len(test_ds), size=sc, replace=False)\n", + " tc = np.stack([np.array(test_ds[k])[sel].astype(np.float32) for k in ckeys], axis=1)\n", + " bl = np.array(test_ds[\"optimal_design\"])[sel].astype(np.float32)\n", + " model.eval()\n", + " with th.no_grad():\n", + " out = model(th.randn(sc, latent_dim, device=device), th.tensor(tc, device=device))\n", + " gd = ((out.clamp(-1, 1) + 1) / 2).clamp(0, 1).cpu().numpy().astype(np.float32)\n", + " cond_recs = []\n", + " for i in range(sc):\n", + " rec = {}\n", + " for j, k in enumerate(ckeys):\n", + " rec[k] = bool(tc[i, j]) if k == \"overhang_constraint\" else float(tc[i, j])\n", + " cond_recs.append(rec)\n", + " artifact_dir.mkdir(parents=True, exist_ok=True)\n", + " np.save(artifact_dir / \"generated_designs.npy\", gd)\n", + " np.save(artifact_dir / \"baseline_designs.npy\", bl)\n", + " with open(artifact_dir / \"conditions.json\", \"w\") as f: json.dump(cond_recs, f, indent=2)\n", + " pd.DataFrame({\"epoch\": range(1, len(losses)+1), \"train_loss\": losses}).to_csv(artifact_dir / \"training_history.csv\", index=False)\n", + " th.save({\"model\": model.state_dict(), \"condition_keys\": ckeys, \"latent_dim\": latent_dim}, artifact_dir / \"engiopt_cgan2d_generator_supervised.pt\")\n", + " print(\"Artifacts ready at\", artifact_dir)\n", + "\n", + "\n", + "ARTIFACT_DIR = _resolve_artifact_dir(create=True)\n", + "_required = [ARTIFACT_DIR / f for f in (\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\")]\n", + "if not all(p.exists() for p in _required):\n", + " _build_artifacts_locally(ARTIFACT_DIR)\n", + "\n", + "print(\"Artifact directory:\", ARTIFACT_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Load artifacts ───────────────────────────────────────────────────────\n", + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "from scipy.spatial.distance import cdist\n", + "\n", + "gen_designs = np.load(ARTIFACT_DIR / \"generated_designs.npy\")\n", + "baseline_designs = np.load(ARTIFACT_DIR / \"baseline_designs.npy\")\n", + "with open(ARTIFACT_DIR / \"conditions.json\") as f:\n", + " conditions = json.load(f)\n", + "\n", + "print(f\"Generated designs : {gen_designs.shape} (values in [{gen_designs.min():.2f}, {gen_designs.max():.2f}])\")\n", + "print(f\"Baseline designs : {baseline_designs.shape}\")\n", + "print(f\"Condition records : {len(conditions)}\")\n", + "print(f\"Condition keys : {list(conditions[0].keys())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load a reference subset of training designs for distributional + novelty metrics\n", + "problem_ref = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "train_designs_full = np.array(problem_ref.dataset[\"train\"][\"optimal_design\"]).astype(np.float32)\n", + "ref_idx = np.random.default_rng(7).choice(\n", + " len(train_designs_full), size=min(1024, len(train_designs_full)), replace=False\n", + ")\n", + "train_reference = train_designs_full[ref_idx]\n", + "print(f\"Training reference set: {train_reference.shape[0]} designs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 1: Visual Inspection -- The Eye Test\n", + "\n", + "Before computing any metric, **look at the designs**. Visual inspection catches\n", + "gross failures immediately: is the model producing solid blocks? random noise?\n", + "something that looks vaguely beam-like?\n", + "\n", + "We show three views:\n", + "1. **Side-by-side gallery** -- generated vs optimised baseline\n", + "2. **Pixel residual heatmaps** -- where exactly do the designs differ?\n", + "\n", + "Visual inspection is *necessary* but **not sufficient**. A design can look\n", + "plausible yet perform terribly in simulation, or violate constraints that\n", + "are invisible to the eye. The rest of this notebook quantifies what your\n", + "eyes cannot." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_residual_heatmaps(gen_designs, baseline_designs, n_show=6)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Takeaway:** The residual heatmaps reveal where the generator struggles most.\n", + "Bright regions = large pixel error. Notice how errors tend to cluster at\n", + "structural boundaries and fine features -- exactly the details that matter\n", + "most for physical performance.\n", + "\n", + "But pixels alone don't tell us about *compliance*, *constraint violations*, or\n", + "*diversity*. We need simulation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 2: Simulation Performance -- \"Does it work?\"\n", + "\n", + "The **physics simulator** is our oracle. For Beams2D, it computes the\n", + "*compliance* of each design under the given boundary conditions:\n", + "- **Lower compliance = stiffer beam = better design**\n", + "\n", + "We simulate both the generated design and its corresponding baseline\n", + "(the optimised design from the dataset) under **identical conditions**.\n", + "The difference tells us how far the generator is from optimal.\n", + "\n", + "> **Analogy:** Imagine you asked an architecture student to sketch a bridge.\n", + "> Visual inspection tells you the sketch looks bridge-like. But only a\n", + "> structural engineer (our simulator) can tell you whether it would actually\n", + "> stand up." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "\n", + "# Feasibility tolerance: how close must volfrac be to the target?\n", + "VOLFRAC_TOL = 0.05\n", + "\n", + "# PUBLIC FILL-IN CELL 02-A\n", + "# Goal: build a list of dicts, one per sample, with objective + feasibility info.\n", + "#\n", + "# For each sample i, you have:\n", + "# g = gen_designs[i] -- generated design (2D numpy array)\n", + "# b = baseline_designs[i] -- baseline design (2D numpy array)\n", + "# cfg = conditions[i] -- dict with keys like 'volfrac', 'rmin', etc.\n", + "\n", + "rows = []\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "for i in range(len(gen_designs)):\n", + " g = gen_designs[i]\n", + " b = baseline_designs[i]\n", + " cfg = dict(conditions[i])\n", + "\n", + " # 1) Compute volume fractions (mean pixel value of each design)\n", + " g_vf = None # TODO: compute mean of g\n", + " b_vf = None # TODO: compute mean of b\n", + " target_vf = cfg[\"volfrac\"]\n", + "\n", + " # 2) Check feasibility: is |actual_vf - target_vf| <= VOLFRAC_TOL?\n", + " g_feasible = None # TODO: True/False\n", + " b_feasible = None # TODO: True/False\n", + "\n", + " # 3) Simulate both designs under identical conditions\n", + " # Hint: call problem.reset(seed=...) before each simulate for reproducibility\n", + " # Hint: problem.simulate(design, config=cfg) returns an array; take element [0]\n", + " problem.reset(seed=7 + i)\n", + " g_obj = None # TODO: simulate the generated design\n", + " problem.reset(seed=7 + i)\n", + " b_obj = None # TODO: simulate the baseline design\n", + "\n", + " # 4) Record everything\n", + " rows.append({\n", + " \"sample\": i,\n", + " \"gen_obj\": g_obj,\n", + " \"base_obj\": b_obj,\n", + " \"gen_minus_base\": g_obj - b_obj,\n", + " \"gen_volfrac\": g_vf,\n", + " \"target_volfrac\": target_vf,\n", + " \"gen_feasible\": g_feasible,\n", + " \"base_feasible\": b_feasible,\n", + " })\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "results = pd.DataFrame(rows)\n", + "results.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-A\n", + "expected_cols = {\"sample\", \"gen_obj\", \"base_obj\", \"gen_minus_base\", \"gen_volfrac\",\n", + " \"target_volfrac\", \"gen_feasible\", \"base_feasible\"}\n", + "missing_cols = expected_cols - set(results.columns)\n", + "assert not missing_cols, f\"Missing columns: {missing_cols}\"\n", + "assert len(results) == len(gen_designs), f\"Expected {len(gen_designs)} rows, got {len(results)}\"\n", + "assert results[\"gen_obj\"].notna().all(), \"gen_obj contains NaN -- did you forget to simulate?\"\n", + "assert results[\"gen_feasible\"].dtype == bool, \"gen_feasible should be boolean\"\n", + "print(f\"Checkpoint 02-A passed: {len(results)} samples evaluated.\")\n", + "print(f\" Feasible generated: {results['gen_feasible'].sum()}/{len(results)}\")\n", + "print(f\" Feasible baseline: {results['base_feasible'].sum()}/{len(results)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualising simulation results\n", + "\n", + "Three complementary views:\n", + "1. **Histogram** -- overall distribution of objectives (generated vs baseline)\n", + "2. **Scatter plot** -- per-sample pairing (points below diagonal = generated is better)\n", + "3. **Residual bar chart** -- per-sample gap, signed (green = generated outperforms)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_comparison(results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_residuals(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the simulation results\n", + "\n", + "- **Histogram overlap**: If the blue (generated) and orange (baseline) distributions\n", + " overlap heavily, the generator is competitive. If blue is shifted right (higher\n", + " compliance), the generator produces weaker designs.\n", + "\n", + "- **Scatter diagonal**: Points *below* the diagonal line mean the generated design\n", + " outperformed the optimised baseline for that sample -- a strong result.\n", + "\n", + "- **Residual bars**: The bar chart makes the per-sample gap immediately visible.\n", + " Consistent green bars = the model is competitive. Large red bars = specific\n", + " failure modes worth investigating (check the design images for those samples)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 3: Constraint Satisfaction -- \"Is it legal?\"\n", + "\n", + "A design that performs well but **violates constraints** is useless in practice.\n", + "For Beams2D, the key constraint is **volume fraction**: the design must use\n", + "a specific amount of material (neither too much nor too little).\n", + "\n", + "> **Analogy:** An architect who designs a beautiful building that exceeds the\n", + "> budget by 50% has not solved the problem -- they have created a new one.\n", + "\n", + "We already computed `gen_volfrac` and `target_volfrac` in the simulation loop.\n", + "Now let's visualise how well the generator satisfies this constraint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_volfrac_analysis(results, volfrac_tol=VOLFRAC_TOL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_feasibility_bars(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the constraint results\n", + "\n", + "- **Left scatter**: Points near the diagonal are feasible; points far from it\n", + " are violating the volume fraction constraint. The green band shows the\n", + " tolerance window.\n", + "\n", + "- **Error histogram**: A narrow distribution centered at zero means the generator\n", + " has learned to control material usage. A wide or biased distribution suggests\n", + " the model ignores the volume fraction condition.\n", + "\n", + "- **Feasibility rate**: The bar chart gives the bottom line. If the baseline\n", + " achieves ~100% feasibility but the generator is at 50%, there is a clear\n", + " conditioning failure.\n", + "\n", + "**Why this matters beyond beams:** In real engineering, constraints can be\n", + "stress limits, manufacturing tolerances, thermal budgets, or regulatory\n", + "requirements. A generative model that ignores constraints generates\n", + "*interesting but unusable* designs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 4: Distributional Similarity -- \"Does it match reality?\"\n", + "\n", + "The previous metrics evaluated designs **individually** (per-sample objective,\n", + "per-sample feasibility). But we also need to ask: does the *distribution*\n", + "of generated designs match the distribution of the ground-truth optimal designs\n", + "**for the same conditions**?\n", + "\n", + "### What is MMD?\n", + "\n", + "**Maximum Mean Discrepancy (MMD)** is a kernel-based distance between two\n", + "distributions. Intuitively:\n", + "\n", + "1. Map each design into a high-dimensional feature space via a Gaussian kernel\n", + "2. Compare the *mean embeddings* of the two sets\n", + "3. If the means match, the distributions are similar; if they diverge, they are different\n", + "\n", + "$$\\text{MMD}^2 = \\underbrace{\\mathbb{E}[k(x, x')]}_{\\text{gen-gen similarity}} + \\underbrace{\\mathbb{E}[k(y, y')]}_{\\text{base-base similarity}} - 2\\,\\underbrace{\\mathbb{E}[k(x, y)]}_{\\text{cross similarity}}$$\n", + "\n", + "- **MMD = 0**: generated and baseline distributions are identical\n", + "- **MMD > 0**: they differ (larger = more different)\n", + "- The kernel bandwidth $\\sigma$ controls the scale of comparison\n", + "\n", + "### Why compare generated vs baseline?\n", + "\n", + "Our generator is **conditional** -- it takes test conditions and produces\n", + "designs. The baseline contains the ground-truth optima for those *same*\n", + "test conditions. Comparing generated vs baseline directly measures whether\n", + "the generator has learned to produce the right designs for the right conditions.\n", + "\n", + "### Choosing sigma without test-data leakage\n", + "\n", + "The Gaussian kernel bandwidth $\\sigma$ determines what scale of difference\n", + "the kernel is sensitive to. We set it using the **median heuristic** on the\n", + "*training data only* -- the median pairwise distance among training designs.\n", + "This avoids leaking test information into the metric while ensuring the\n", + "kernel operates in a meaningful range.\n", + "\n", + "### Why MMD and not just \"average quality\"?\n", + "\n", + "A model could produce 24 copies of the single best design. Per-sample metrics\n", + "would look great! But the *distribution* would be nothing like the diverse\n", + "baseline set. MMD catches this." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visual intuition: where does each set place material?\n", + "show_spatial_distribution_comparison(gen_designs, baseline_designs, train_reference)\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-B\n", + "# Goal: compute MMD between generated designs and baseline designs (same conditions).\n", + "#\n", + "# MMD uses a Gaussian (RBF) kernel: k(x,y) = exp(-||x-y||^2 / (2*sigma^2))\n", + "#\n", + "# We set sigma from the TRAINING data (median heuristic) to avoid test-data leakage,\n", + "# then apply that fixed sigma to the gen-vs-baseline comparison.\n", + "#\n", + "# You have:\n", + "# gen_designs -- (N, H, W) numpy array of generated designs\n", + "# baseline_designs -- (N, H, W) numpy array of optimized designs (same conditions)\n", + "# train_reference -- (M, H, W) numpy array of training designs\n", + "# cdist -- from scipy.spatial.distance (already imported)\n", + "#\n", + "# Steps:\n", + "# 1. Flatten all design sets to 2D\n", + "# 2. Compute sigma from training pairwise distances (median heuristic)\n", + "# 3. Compute pairwise squared distances between generated and baseline\n", + "# 4. Apply Gaussian kernel: K = exp(-D / (2 * sigma^2))\n", + "# 5. MMD = mean(K_gg) + mean(K_bb) - 2 * mean(K_gb)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1. Flatten\n", + "gen_flat = None # TODO: reshape gen_designs to (N, H*W)\n", + "base_flat = None # TODO: reshape baseline_designs to (N, H*W)\n", + "ref_flat = None # TODO: reshape train_reference to (M, H*W)\n", + "\n", + "# 2. Sigma from training data only (no test leakage)\n", + "# Hint: compute pairwise sqeuclidean distances within ref_flat,\n", + "# then sigma = sqrt(median of those distances)\n", + "D_ref = None # TODO: cdist(ref_flat, ref_flat, \"sqeuclidean\")\n", + "sigma = None # TODO: float(np.sqrt(np.median(D_ref)))\n", + "\n", + "# 3. Pairwise squared distances for gen vs baseline\n", + "D_gg = None # TODO: cdist(gen_flat, gen_flat, \"sqeuclidean\")\n", + "D_bb = None # TODO: cdist(base_flat, base_flat, \"sqeuclidean\")\n", + "D_gb = None # TODO: cdist(gen_flat, base_flat, \"sqeuclidean\")\n", + "\n", + "# 4. Gaussian kernel\n", + "K_gg = None # TODO: np.exp(-D_gg / (2 * sigma**2))\n", + "K_bb = None # TODO: same for D_bb\n", + "K_gb = None # TODO: same for D_gb\n", + "\n", + "# 5. MMD\n", + "mmd_value = None # TODO: float(K_gg.mean() + K_bb.mean() - 2 * K_gb.mean())\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"Sigma (median heuristic on training data): {sigma:.2f}\")\n", + "print(f\"MMD(generated, baseline) = {mmd_value:.6f}\")\n", + "print(f\" K_gg mean (gen-gen similarity): {K_gg.mean():.6f}\")\n", + "print(f\" K_bb mean (base-base similarity): {K_bb.mean():.6f}\")\n", + "print(f\" K_gb mean (cross similarity): {K_gb.mean():.6f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-B\n", + "assert mmd_value is not None, \"mmd_value is None -- did you compute it?\"\n", + "assert isinstance(mmd_value, float), \"mmd_value should be a float\"\n", + "assert mmd_value >= 0, f\"MMD should be non-negative, got {mmd_value}\"\n", + "assert sigma is not None and sigma > 1, f\"sigma should be > 1 for 10k-dim data (got {sigma}); did you use the median heuristic?\"\n", + "assert K_gg is not None and K_gb is not None, \"Kernel matrices not computed\"\n", + "print(f\"Checkpoint 02-B passed: MMD = {mmd_value:.6f} (sigma = {sigma:.2f})\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Put MMD in context: compare against reference points (same training-derived sigma)\n", + "#\n", + "# 1. Training sample vs baseline: what you'd get by grabbing random training\n", + "# designs instead of conditioning on the test conditions.\n", + "# 2. Random noise vs baseline: worst case (meaningless generator).\n", + "\n", + "rng_mmd = np.random.default_rng(42)\n", + "\n", + "# Training sample vs baseline (no conditioning)\n", + "train_sample_idx = rng_mmd.choice(len(train_reference), size=len(baseline_designs), replace=False)\n", + "train_sample_flat = train_reference[train_sample_idx].reshape(len(baseline_designs), -1)\n", + "D_tt = cdist(train_sample_flat, train_sample_flat, \"sqeuclidean\")\n", + "D_tb = cdist(train_sample_flat, base_flat, \"sqeuclidean\")\n", + "mmd_train_base = float(\n", + " np.exp(-D_tt / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_tb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "# Random noise vs baseline\n", + "random_designs = rng_mmd.random(gen_designs.shape).astype(np.float32)\n", + "rand_flat = random_designs.reshape(random_designs.shape[0], -1)\n", + "D_rr = cdist(rand_flat, rand_flat, \"sqeuclidean\")\n", + "D_rb = cdist(rand_flat, base_flat, \"sqeuclidean\")\n", + "mmd_random_base = float(\n", + " np.exp(-D_rr / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_rb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "print(f\"MMD reference points (sigma={sigma:.2f}, from training data):\")\n", + "print(f\" Generated vs Baseline: {mmd_value:.6f} (our model)\")\n", + "print(f\" Train sample vs Baseline: {mmd_train_base:.6f} (no conditioning)\")\n", + "print(f\" Random noise vs Baseline: {mmd_random_base:.6f} (worst case)\")\n", + "\n", + "show_mmd_comparison_bar(mmd_value, mmd_train_base, mmd_random_base)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PCA embedding: where do generated designs live relative to training data?\n", + "show_embedding_scatter(gen_designs, baseline_designs, train_reference)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the distributional similarity results\n", + "\n", + "- **Mean design images**: If the generated mean image looks similar to the\n", + " baseline mean image, the model has learned where material should go on\n", + " average for these conditions. Differences reveal spatial biases.\n", + "\n", + "- **Volume fraction distributions**: If the generated distribution is narrower\n", + " or shifted relative to baseline, the model isn't capturing the full range\n", + " of volume fractions needed for these test conditions.\n", + "\n", + "- **MMD in context**: The comparison bar chart places the generator's MMD on\n", + " a meaningful scale:\n", + " - **Train sample vs Baseline** (retrieval baseline): What you'd get by\n", + " grabbing random training designs instead of conditioning. If the\n", + " generator beats this, it has genuinely learned to condition.\n", + " - **Random vs Baseline** (worst case): Uniform noise -- the floor for\n", + " a non-functional generator.\n", + " A generator close to zero has matched the baseline distribution. A\n", + " generator near the train-sample bar is no better than memorising\n", + " training data without using the conditions.\n", + "\n", + "- **PCA embedding**: If generated designs (blue) cluster tightly in one\n", + " corner while training data (grey) spans a wide region, the model has\n", + " **mode collapse**. Ideally, blue points should overlap with the orange\n", + " baseline points (same conditions) while spanning a similar spread." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 5: Diversity & Coverage -- \"Did we explore?\"\n", + "\n", + "A generative model should produce **varied** designs, not 24 copies of the same\n", + "beam. We measure two complementary aspects:\n", + "\n", + "### Diversity (intra-set variation)\n", + "How different are the generated designs from *each other*?\n", + "- **Pairwise L2 distance**: Average Euclidean distance between all pairs of\n", + " generated designs. Higher = more diverse.\n", + "- **DPP diversity**: Determinantal Point Process log-determinant of the\n", + " similarity matrix. Captures both volume and spread of the set.\n", + "\n", + "### Novelty (distance to training data)\n", + "How different are the generated designs from the *training set*?\n", + "- **Nearest-neighbour distance**: For each generated design, find the closest\n", + " training example. If NN distance is near zero, the model may be memorising.\n", + " Higher = more novel.\n", + "\n", + "> **The diversity-quality trade-off:** A model that generates random noise\n", + "> would score very high on diversity but terribly on quality. We want designs\n", + "> that are diverse *and* feasible *and* performant. This is the fundamental\n", + "> tension in generative model evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pairwise_distance_heatmap(gen_designs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Compute diversity and novelty metrics\n", + "diversity_l2 = mean_pairwise_l2(gen_designs)\n", + "novelty_nn = mean_nn_distance_to_reference(gen_designs, train_reference)\n", + "\n", + "# Also compute for baseline as a reference point\n", + "baseline_diversity = mean_pairwise_l2(baseline_designs)\n", + "baseline_novelty = mean_nn_distance_to_reference(baseline_designs, train_reference)\n", + "\n", + "print(\"Diversity (mean pairwise L2):\")\n", + "print(f\" Generated: {diversity_l2:.2f}\")\n", + "print(f\" Baseline: {baseline_diversity:.2f}\")\n", + "print()\n", + "print(\"Novelty (mean NN distance to training):\")\n", + "print(f\" Generated: {novelty_nn:.2f}\")\n", + "print(f\" Baseline: {baseline_novelty:.2f}\")\n", + "print()\n", + "if diversity_l2 < baseline_diversity * 0.5:\n", + " print(\"Warning: Generated diversity is much lower than baseline -- possible mode collapse.\")\n", + "elif diversity_l2 > baseline_diversity * 1.5:\n", + " print(\"Note: Generated diversity exceeds baseline -- check if the extra variation is meaningful.\")\n", + "else:\n", + " print(\"Generated diversity is comparable to baseline diversity.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the diversity results\n", + "\n", + "- **Pairwise heatmap**: Uniform warm colours = good diversity (all designs differ\n", + " from each other). A block of cool/dark colours = a cluster of near-identical\n", + " designs (partial mode collapse).\n", + "\n", + "- **Diversity vs baseline**: The baseline designs come from an optimiser run on\n", + " diverse conditions, so they naturally vary. If the generator's diversity is\n", + " much lower, it is producing less variety than the problem demands.\n", + "\n", + "- **Novelty**: Very low NN distance means the generator is reproducing training\n", + " examples almost exactly. Some proximity is expected (it learned from them),\n", + " but near-zero distance suggests memorisation rather than generalisation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 6: Optimization Warmstarting -- \"Does it speed up search?\"\n", + "\n", + "The **ultimate downstream test** for a generative model in engineering design:\n", + "if we use its output as a *starting point* for topology optimisation, does the\n", + "optimiser converge faster or find better solutions than starting from scratch?\n", + "\n", + "### The optimality gap metrics\n", + "\n", + "Starting from a generated design, we run the problem's optimiser and track the\n", + "objective at each step:\n", + "\n", + "- **IOG (Initial Optimality Gap)** = objective at step 0 minus baseline optimum.\n", + " *How good is the starting point?*\n", + "\n", + "- **FOG (Final Optimality Gap)** = objective at final step minus baseline optimum.\n", + " *How good is the final result?*\n", + "\n", + "- **COG (Cumulative Optimality Gap)** = sum of all per-step gaps.\n", + " *How much total \"wasted effort\" occurred across the trajectory?*\n", + " The shaded area in the trajectory plot.\n", + "\n", + "```\n", + "Objective\n", + " ^\n", + " | * IOG = obj[0] - baseline\n", + " | \\ *\n", + " | \\ * * Shaded area = COG\n", + " | \\ * * *\n", + " | ─ ─ ─ ─ ─ ─ ─ ─ FOG = obj[-1] - baseline\n", + " | - - - - - - - - - - - ← baseline (optimised reference)\n", + " └────────────────────────> Step\n", + "```\n", + "\n", + "- IOG < 0 is ideal: the generated design is *already better* than the baseline\n", + "- FOG ≈ 0: the optimiser recovers to baseline quality regardless of start\n", + "- Small COG: the optimiser converges quickly from this warmstart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DEMO: Optimization warmstarting on a small subset (3 samples)\n", + "# This runs the EngiBench optimiser from each generated design and tracks the trajectory.\n", + "# We use only 3 samples because optimization is slower than simulation.\n", + "\n", + "problem_opt = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "n_opt_demo = min(3, len(gen_designs))\n", + "opt_data = []\n", + "\n", + "for i in range(n_opt_demo):\n", + " cfg = dict(conditions[i])\n", + "\n", + " # Run optimiser from generated design\n", + " problem_opt.reset(seed=7 + i)\n", + " _, opt_history = problem_opt.optimize(gen_designs[i], config=cfg)\n", + "\n", + " # Get baseline objective for reference\n", + " problem_opt.reset(seed=7 + i)\n", + " base_obj = float(problem_opt.simulate(baseline_designs[i], config=cfg)[0])\n", + "\n", + " # Extract objective trajectory\n", + " obj_trajectory = [float(step.obj_values) for step in opt_history]\n", + "\n", + " opt_data.append({\n", + " \"sample_idx\": i,\n", + " \"obj_trajectory\": obj_trajectory,\n", + " \"base_obj\": base_obj,\n", + " })\n", + " iog = obj_trajectory[0] - base_obj\n", + " fog = obj_trajectory[-1] - base_obj\n", + " cog = sum(o - base_obj for o in obj_trajectory)\n", + " print(f\"Sample {i}: IOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f} ({len(opt_history)} steps)\")\n", + "\n", + "print(f\"\\nOptimization complete for {n_opt_demo} samples.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_optimization_trajectories(opt_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the optimization trajectories\n", + "\n", + "- **Steep drop at step 0→1**: The generated design was far from optimal, but the\n", + " optimiser quickly improved it. This still counts as a useful warmstart if\n", + " the total trajectory (COG) is shorter than starting from scratch.\n", + "\n", + "- **Flat trajectory near baseline**: The generated design was already near-optimal\n", + " and the optimiser had little work to do. Best-case scenario.\n", + "\n", + "- **Trajectory above baseline throughout**: The generated design was so far from\n", + " optimal that even after optimisation it never reached baseline quality. This\n", + " suggests the model is producing designs in the wrong region of design space.\n", + "\n", + "**In practice**, you would run this on many more samples and average the IOG/COG/FOG\n", + "to get statistically robust estimates. For the workshop, 3 samples illustrate\n", + "the concept." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 7: Putting It All Together\n", + "\n", + "Now we aggregate all the metrics from Parts 2-6 into a single summary table.\n", + "This is the kind of table you would report in a paper or use to compare\n", + "different generative models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-C\n", + "# Goal: build a comprehensive summary dict and wrap it in a DataFrame.\n", + "#\n", + "# You have:\n", + "# results -- per-sample DataFrame from Part 2\n", + "# mmd_value -- MMD from Part 4\n", + "# diversity_l2 -- from Part 5\n", + "# novelty_nn -- from Part 5\n", + "# opt_data -- optimization results from Part 6\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Compute average IOG/FOG/COG from the optimization demo\n", + "avg_iog = None # TODO: mean of (first obj - base_obj) across opt_data\n", + "avg_fog = None # TODO: mean of (last obj - base_obj) across opt_data\n", + "avg_cog = None # TODO: mean of (sum of gaps) across opt_data\n", + "\n", + "summary = {\n", + " # Simulation performance\n", + " \"n_samples\": len(results),\n", + " \"gen_obj_mean\": None, # TODO: mean of gen_obj column\n", + " \"base_obj_mean\": None, # TODO: mean of base_obj column\n", + " \"objective_gap_mean\": None, # TODO: mean of gen_minus_base column\n", + " \"improvement_rate\": None, # TODO: fraction where gen_obj < base_obj\n", + " # Constraint satisfaction\n", + " \"gen_feasible_rate\": None, # TODO: fraction of feasible generated designs\n", + " \"base_feasible_rate\": None, # TODO: fraction of feasible baseline designs\n", + " \"gen_violation_ratio\": None, # TODO: 1 - gen_feasible_rate\n", + " \"base_violation_ratio\": None, # TODO: 1 - base_feasible_rate\n", + " # Distributional similarity\n", + " \"mmd\": mmd_value,\n", + " # Diversity & novelty\n", + " \"gen_diversity_l2\": diversity_l2,\n", + " \"gen_novelty_to_train_l2\": novelty_nn,\n", + " # Optimization warmstarting (from demo subset)\n", + " \"avg_iog\": avg_iog,\n", + " \"avg_fog\": avg_fog,\n", + " \"avg_cog\": avg_cog,\n", + "}\n", + "\n", + "summary_df = None # TODO: pd.DataFrame([summary])\n", + "\n", + "raise NotImplementedError(\"Fill in the TODOs above, then delete this line.\")\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "# Show the summary transposed for readability (one metric per row)\n", + "display(summary_df.T.rename(columns={0: \"value\"}))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-C\n", + "assert \"summary_df\" in dir() and summary_df is not None, \"Define summary_df\"\n", + "assert len(summary_df) == 1, \"summary_df should have exactly one row\"\n", + "required_keys = {\n", + " \"n_samples\", \"gen_obj_mean\", \"base_obj_mean\", \"objective_gap_mean\",\n", + " \"improvement_rate\", \"gen_feasible_rate\", \"base_feasible_rate\",\n", + " \"gen_violation_ratio\", \"base_violation_ratio\",\n", + " \"mmd\", \"gen_diversity_l2\", \"gen_novelty_to_train_l2\",\n", + " \"avg_iog\", \"avg_fog\", \"avg_cog\",\n", + "}\n", + "missing = required_keys - set(summary_df.columns)\n", + "assert not missing, f\"Missing summary columns: {missing}\"\n", + "assert summary_df[\"gen_obj_mean\"].notna().all(), \"gen_obj_mean is NaN\"\n", + "print(\"Checkpoint 02-C passed: comprehensive summary table is complete.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_metric_summary_dashboard(summary)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Export artifacts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_path = ARTIFACT_DIR / \"per_sample_metrics.csv\"\n", + "summary_path = ARTIFACT_DIR / \"metrics_summary.csv\"\n", + "\n", + "results.to_csv(results_path, index=False)\n", + "summary_df.to_csv(summary_path, index=False)\n", + "\n", + "# Save objective histogram\n", + "hist_path = ARTIFACT_DIR / \"objective_histogram.png\"\n", + "fig, ax = plt.subplots(figsize=(7, 4))\n", + "ax.hist(results[\"gen_obj\"], bins=10, alpha=0.7, label=\"Generated\", color=\"#4C72B0\")\n", + "ax.hist(results[\"base_obj\"], bins=10, alpha=0.7, label=\"Baseline\", color=\"#DD8452\")\n", + "ax.set_xlabel(\"Compliance (lower is better)\")\n", + "ax.set_ylabel(\"Count\")\n", + "ax.set_title(\"Generated vs baseline objective distribution\")\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "fig.savefig(hist_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save scatter plot\n", + "scatter_path = ARTIFACT_DIR / \"objective_scatter.png\"\n", + "fig, ax = plt.subplots(figsize=(5, 5))\n", + "ax.scatter(results[\"base_obj\"], results[\"gen_obj\"], alpha=0.8)\n", + "lo = min(results[\"base_obj\"].min(), results[\"gen_obj\"].min()) * 0.9\n", + "hi = max(results[\"base_obj\"].max(), results[\"gen_obj\"].max()) * 1.1\n", + "ax.plot([lo, hi], [lo, hi], \"--\", color=\"gray\", linewidth=1)\n", + "ax.set_xlabel(\"Baseline compliance\")\n", + "ax.set_ylabel(\"Generated compliance\")\n", + "ax.set_title(\"Per-sample objective comparison\")\n", + "fig.tight_layout()\n", + "fig.savefig(scatter_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save design grid\n", + "grid_path = ARTIFACT_DIR / \"design_grid.png\"\n", + "fig, axes_grid = plt.subplots(2, min(6, len(gen_designs)), figsize=(14, 5))\n", + "for i in range(min(6, len(gen_designs))):\n", + " axes_grid[0, i].imshow(gen_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[0, i].set_title(f\"gen {i}\", fontsize=9)\n", + " axes_grid[0, i].axis(\"off\")\n", + " axes_grid[1, i].imshow(baseline_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[1, i].set_title(f\"base {i}\", fontsize=9)\n", + " axes_grid[1, i].axis(\"off\")\n", + "fig.tight_layout()\n", + "fig.savefig(grid_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "print(\"Exported:\")\n", + "for p in [results_path, summary_path, hist_path, scatter_path, grid_path]:\n", + " print(f\" {p}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Discussion prompts\n", + "\n", + "Use these questions to prepare for the workshop breakout discussion. There are no\n", + "\"right\" answers -- the goal is to develop your own informed perspective.\n", + "\n", + "1. **Which metric category matters most for your domain?** In safety-critical\n", + " applications (aerospace, medical devices), constraint satisfaction is a hard\n", + " requirement. In early-stage concept exploration, diversity might matter more.\n", + " What about your own research area?\n", + "\n", + "2. **When do metrics disagree?** A model might score well on MMD (distributional\n", + " match) but poorly on per-sample objective (simulation performance). What does\n", + " that disagreement tell you? Which metric would you trust more?\n", + "\n", + "3. **Is diversity always good?** A model that produces wildly different designs\n", + " scores high on diversity -- but some of those designs might be nonsensical.\n", + " When does high diversity indicate a problem rather than a strength?\n", + "\n", + "4. **The warmstarting test.** If a model's IOG is poor (bad starting points) but\n", + " FOG is near zero (optimiser recovers), is the model useful? What if IOG is\n", + " great but the optimiser diverges (FOG increases)?\n", + "\n", + "5. **When would you trust these results for a paper?** We evaluated 24 samples\n", + " with a model trained for 8 epochs on 512 examples. What would need to change\n", + " to make these numbers publication-ready? (Think: sample size, training budget,\n", + " statistical significance, multiple seeds.)\n", + "\n", + "6. **Objective vs feasibility trade-off.** If your model produces designs with\n", + " great compliance but poor volume-fraction adherence, is that progress or a\n", + " failure? How would you communicate this nuance in a benchmark table?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection: what did you learn in NB02?\n", + "\n", + "Before closing, write down your answers to these prompts:\n", + "\n", + "1. **What do the metrics tell you about your model?** Look at your summary table.\n", + " Where does the generator excel, and where does it fall short? Which metric\n", + " surprised you most?\n", + "\n", + "2. **Which visualisation was most informative?** Was it the residual heatmaps,\n", + " the PCA embedding, the optimization trajectories, or something else? Why?\n", + "\n", + "3. **What would a full benchmark study add?** A complete EngiBench evaluation\n", + " would test across multiple problems, multiple seeds, larger sample sizes, and\n", + " the full metric suite (MMD, DPP, IOG/COG/FOG, violation ratio). How would\n", + " that change your confidence in the conclusions?\n", + "\n", + "4. **How would you improve the generator?** Based on the diagnostic pattern you\n", + " see (which categories are strong vs weak), what would you change about the\n", + " model architecture, training procedure, or data pipeline?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Troubleshooting\n", + "\n", + "If a section fails, do not continue downstream. Fix the failing cell first, then\n", + "rerun it and its checkpoint before moving on. The notebook is staged so that\n", + "failures are localised." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb b/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb new file mode 100644 index 00000000..8801e5cd --- /dev/null +++ b/workshops/dcc26/participant/03_add_new_problem_scaffold.ipynb @@ -0,0 +1,785 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 03 (Participant): Add a New Problem to EngiBench\n", + "\n", + "**Time budget: ~25 minutes** | 3 fill-in exercises | Mostly guided walkthrough\n", + "\n", + "In this notebook you will see how to wrap a **new simulator** as an EngiBench `Problem`,\n", + "so that every model in EngiOpt can immediately train on it with zero code changes.\n", + "\n", + "We will build a **planar 2-link robot manipulator co-design problem**: choose link\n", + "lengths, motor strength, and control gains so the arm reaches a target with minimal\n", + "tracking error and energy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Notebook map\n", + "\n", + "This notebook is a **guided walkthrough** with 3 small fill-in exercises.\n", + "Most code is pre-written -- your job is to **read, run, and understand** the\n", + "EngiBench Problem contract, then fill in 3 targeted methods.\n", + "\n", + "### Public exercise legend\n", + "- `PUBLIC FILL-IN CELL`: implement this method (skeleton + hints provided).\n", + "- `CHECKPOINT`: run and verify before continuing.\n", + "- Pre-written cells: read and run -- these are fully working code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The problem: Planar manipulator co-design\n", + "\n", + "Imagine a simple robot arm bolted to a table. It has **two rigid links**\n", + "connected by revolute joints, and it needs to reach a target point in 2D space.\n", + "\n", + "```\n", + " target\n", + " X (target_x, target_y)\n", + " /\n", + " / link 2 (length l2)\n", + " /\n", + " joint 2\n", + " /\n", + " / link 1 (length l1)\n", + " /\n", + " joint 1\n", + " *------------ table / base\n", + "```\n", + "\n", + "**What we design** (the design vector, 6 variables):\n", + "\n", + "| Index | Variable | Range | Meaning |\n", + "|-------|----------|-------|---------|\n", + "| 0 | `link1_m` | 0.25 -- 1.00 | Length of link 1 (meters) |\n", + "| 1 | `link2_m` | 0.20 -- 0.95 | Length of link 2 (meters) |\n", + "| 2 | `motor_strength` | 2.0 -- 30.0 | Motor torque multiplier |\n", + "| 3 | `kp` | 5.0 -- 120.0 | Proportional control gain |\n", + "| 4 | `kd` | 0.2 -- 18.0 | Derivative control gain |\n", + "| 5 | `damping` | 0.0 -- 1.5 | Joint damping coefficient |\n", + "\n", + "**Conditions** (set by the environment, not the designer):\n", + "- `target_x`, `target_y`: where the arm must reach\n", + "- `payload_kg`: mass at the end-effector\n", + "- `disturbance_scale`: random torque noise during simulation\n", + "\n", + "**Objectives** (both minimized):\n", + "1. `final_tracking_error_m`: how far the end-effector is from the target at the end\n", + "2. `actuation_energy_j`: total energy spent by the motors\n", + "\n", + "**Why this is a co-design problem**: we are simultaneously choosing the *hardware*\n", + "(link lengths, motor) and the *controller* (gains, damping). This is exactly the\n", + "kind of coupled design problem where generative models can help explore the space." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The EngiBench Problem contract\n", + "\n", + "Every problem in EngiBench implements the same interface. This is what makes it\n", + "possible to train **any** EngiOpt model on **any** problem with zero model code changes.\n", + "\n", + "The key pieces:\n", + "\n", + "| Attribute / Method | Purpose |\n", + "|---|---|\n", + "| `design_space` | A `gymnasium.spaces.Box` defining valid designs |\n", + "| `objectives` | Tuple of `(name, direction)` pairs |\n", + "| `conditions` | Dataclass of environmental conditions |\n", + "| `design_constraints` | List of constraint functions |\n", + "| `check_constraints(design, config)` | Returns list of violations (empty = feasible) |\n", + "| `simulate(design, config)` | Runs the simulator, returns objective values |\n", + "| `optimize(start, config)` | Simple optimizer, returns `(best_design, history)` |\n", + "| `render(design)` | Visualization for human inspection |\n", + "| `random_design()` | Sample a random valid design |\n", + "\n", + "In this notebook, most of these are **pre-written**. You will fill in 3 methods\n", + "that test your understanding of the contract." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "\n", + "def pip_install(packages: list[str]):\n", + " cmd = [sys.executable, \"-m\", \"pip\", \"install\", *packages]\n", + " print(\"Running:\", \" \".join(cmd))\n", + " subprocess.check_call(cmd)\n", + "\n", + "\n", + "BASE_PACKAGES = [\"engibench[all]\", \"matplotlib\", \"gymnasium\", \"pybullet\"]\n", + "ENGIOPT_GIT = \"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " print(\"Installing dependencies...\")\n", + " pip_install(BASE_PACKAGES)\n", + " pip_install([ENGIOPT_GIT])\n", + "\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + "\n", + " print(\"Dependency install complete.\")\n", + "else:\n", + " print(\"Skipping install (using current environment). Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1 -- Imports\n", + "\n", + "These are the EngiBench building blocks we need to define a Problem." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Annotated\n", + "\n", + "import numpy as np\n", + "from gymnasium import spaces\n", + "\n", + "from engibench.constraint import bounded\n", + "from engibench.constraint import constraint\n", + "from engibench.core import ObjectiveDirection\n", + "from engibench.core import OptiStep\n", + "from engibench.core import Problem\n", + "\n", + "import pybullet as p" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2 -- Build the Problem class (guided walkthrough + 3 fill-ins)\n", + "\n", + "The cell below contains the **complete** `PlanarManipulatorCoDesignProblem` class.\n", + "Most methods are pre-written and working. **Three methods** are left for you to fill in.\n", + "\n", + "Read through the pre-written code to understand the structure, then complete:\n", + "\n", + "1. **Fill-in 03-A** (`simulate`): Merge config, clip design to bounds, call the rollout. A short wrapper method.\n", + "2. **Fill-in 03-B** (`random_design`): Sample a design from the design space. Essentially a one-liner.\n", + "3. **Fill-in 03-C** (`optimize`): Wire up a simple random-perturbation search loop using the hints provided.\n", + "\n", + "The pre-written methods handle all the PyBullet complexity -- you do NOT need to\n", + "understand robotics or physics simulation to complete the exercises." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pre-written methods tour (read before filling in)\n", + "\n", + "Here is a quick guide to the pre-written methods you will see in the class:\n", + "\n", + "- **`__init__`**: Sets up the design space (6-dim Box), conditions, and constraints.\n", + "- **`_build_robot`**: Creates a 2-link arm in PyBullet with configurable link lengths and damping.\n", + "- **`_inverse_kinematics_2link`**: Given a target (x, y), computes the joint angles using the law of cosines. Standard closed-form 2-link IK.\n", + "- **`_forward_kinematics_2link`**: Given joint angles, computes end-effector (x, y). Simple trig.\n", + "- **`_rollout`**: Runs the full PyBullet simulation -- sets up PD control to track the target, applies disturbances, records tracking error and energy at each step.\n", + "- **`optimize`**: Random search over the design space -- tries perturbations, keeps the best.\n", + "- **`render`**: 4-panel matplotlib figure showing design variables, end-effector path, tracking error, and joint torques." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class PlanarManipulatorCoDesignProblem(Problem[np.ndarray]):\n", + " \"\"\"Robotics co-design problem: choose arm geometry + controller to reach a target.\n", + "\n", + " This wraps a PyBullet physics simulation as an EngiBench Problem so that\n", + " any EngiOpt generative model can train on it.\n", + " \"\"\"\n", + "\n", + " version = 0\n", + " objectives = (\n", + " (\"final_tracking_error_m\", ObjectiveDirection.MINIMIZE),\n", + " (\"actuation_energy_j\", ObjectiveDirection.MINIMIZE),\n", + " )\n", + "\n", + " @dataclass\n", + " class Conditions:\n", + " target_x: Annotated[float, bounded(lower=0.20, upper=1.35)] = 0.85\n", + " target_y: Annotated[float, bounded(lower=0.05, upper=1.20)] = 0.45\n", + " payload_kg: Annotated[float, bounded(lower=0.0, upper=2.0)] = 0.8\n", + " disturbance_scale: Annotated[float, bounded(lower=0.0, upper=0.30)] = 0.05\n", + "\n", + " @dataclass\n", + " class Config(Conditions):\n", + " sim_steps: Annotated[int, bounded(lower=60, upper=1200)] = 240\n", + " dt: Annotated[float, bounded(lower=1e-4, upper=0.05)] = 1.0 / 120.0\n", + " torque_limit: Annotated[float, bounded(lower=1.0, upper=50.0)] = 12.0\n", + " max_iter: Annotated[int, bounded(lower=1, upper=300)] = 60\n", + "\n", + " dataset_id = \"IDEALLab/planar_manipulator_codesign_v0\" # placeholder\n", + " container_id = None\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # __init__ (pre-written)\n", + " # ------------------------------------------------------------------ #\n", + " def __init__(self, seed: int = 0, **kwargs):\n", + " super().__init__(seed=seed)\n", + " self.config = self.Config(**kwargs)\n", + " self.conditions = self.Conditions(\n", + " target_x=self.config.target_x,\n", + " target_y=self.config.target_y,\n", + " payload_kg=self.config.payload_kg,\n", + " disturbance_scale=self.config.disturbance_scale,\n", + " )\n", + "\n", + " # Design vector = [link1_m, link2_m, motor_strength, kp, kd, damping]\n", + " self.design_space = spaces.Box(\n", + " low=np.array([0.25, 0.20, 2.0, 5.0, 0.2, 0.0], dtype=np.float32),\n", + " high=np.array([1.00, 0.95, 30.0, 120.0, 18.0, 1.5], dtype=np.float32),\n", + " dtype=np.float32,\n", + " )\n", + "\n", + " # --- Constraints ------------------------------------------------\n", + " # These use the @constraint decorator from EngiBench.\n", + " # A constraint function receives (design, **config_kwargs).\n", + " # It should ASSERT what must be true. If the assert fails,\n", + " # check_constraints() catches it and reports a violation.\n", + "\n", + " @constraint\n", + " def reachable_workspace(design: np.ndarray, target_x: float, target_y: float, **_) -> None:\n", + " l1, l2 = float(design[0]), float(design[1])\n", + " r = float(np.sqrt(target_x**2 + target_y**2))\n", + " assert l1 + l2 >= r + 0.03, f\"target radius {r:.3f} exceeds reach {l1 + l2:.3f}\"\n", + "\n", + " @constraint\n", + " def gain_consistency(design: np.ndarray, **_) -> None:\n", + " kp, kd = float(design[3]), float(design[4])\n", + " assert kd <= 2.2 * np.sqrt(max(kp, 1e-6)), f\"kd={kd:.3f} too high for kp={kp:.3f}\"\n", + "\n", + " self.design_constraints = [reachable_workspace, gain_consistency]\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _build_robot (pre-written -- PyBullet internals)\n", + " # ------------------------------------------------------------------ #\n", + " def _build_robot(self, l1: float, l2: float, payload_kg: float, damping: float) -> tuple[int, int]:\n", + " \"\"\"Create a 2-link planar arm in PyBullet. Returns (robot_id, ee_link_index).\"\"\"\n", + " p.resetSimulation()\n", + " p.setGravity(0, 0, -9.81)\n", + "\n", + " link_masses = [0.5 + 0.2 * payload_kg, 0.35 + 0.25 * payload_kg]\n", + " link_collision = [-1, -1]\n", + " link_visual = [\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.025, length=l1, rgbaColor=[0.2, 0.5, 0.9, 1.0]),\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.020, length=l2, rgbaColor=[0.9, 0.4, 0.2, 1.0]),\n", + " ]\n", + " qx = p.getQuaternionFromEuler([0.0, np.pi / 2.0, 0.0])\n", + "\n", + " robot = p.createMultiBody(\n", + " baseMass=0.0,\n", + " baseCollisionShapeIndex=-1,\n", + " baseVisualShapeIndex=-1,\n", + " basePosition=[0, 0, 0],\n", + " linkMasses=link_masses,\n", + " linkCollisionShapeIndices=link_collision,\n", + " linkVisualShapeIndices=link_visual,\n", + " linkPositions=[[0, 0, 0], [l1, 0, 0]],\n", + " linkOrientations=[qx, qx],\n", + " linkInertialFramePositions=[[l1 / 2.0, 0, 0], [l2 / 2.0, 0, 0]],\n", + " linkInertialFrameOrientations=[[0, 0, 0, 1], [0, 0, 0, 1]],\n", + " linkParentIndices=[0, 1],\n", + " linkJointTypes=[p.JOINT_REVOLUTE, p.JOINT_REVOLUTE],\n", + " linkJointAxis=[[0, 0, 1], [0, 0, 1]],\n", + " )\n", + "\n", + " for j in [0, 1]:\n", + " p.changeDynamics(robot, j, linearDamping=0.0, angularDamping=float(damping))\n", + "\n", + " return robot, 1\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _inverse_kinematics_2link (pre-written -- standard 2-link IK)\n", + " # ------------------------------------------------------------------ #\n", + " def _inverse_kinematics_2link(self, x: float, y: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Closed-form IK for a 2-link planar arm using the law of cosines.\"\"\"\n", + " r2 = x * x + y * y\n", + " c2 = (r2 - l1 * l1 - l2 * l2) / (2.0 * l1 * l2)\n", + " c2 = float(np.clip(c2, -1.0, 1.0))\n", + " s2 = float(np.sqrt(max(0.0, 1.0 - c2 * c2)))\n", + " q2 = float(np.arctan2(s2, c2))\n", + " q1 = float(np.arctan2(y, x) - np.arctan2(l2 * s2, l1 + l2 * c2))\n", + " return q1, q2\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _forward_kinematics_2link (pre-written -- simple trig)\n", + " # ------------------------------------------------------------------ #\n", + " def _forward_kinematics_2link(self, q1: float, q2: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Compute end-effector (x, y) from joint angles and link lengths.\"\"\"\n", + " x = l1 * np.cos(q1) + l2 * np.cos(q1 + q2)\n", + " y = l1 * np.sin(q1) + l2 * np.sin(q1 + q2)\n", + " return float(x), float(y)\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _rollout (pre-written -- runs the full PyBullet simulation)\n", + " # ------------------------------------------------------------------ #\n", + " def _rollout(self, design: np.ndarray, cfg: dict, return_trace: bool = False):\n", + " \"\"\"Run PyBullet simulation with PD control. Returns objective vector.\"\"\"\n", + " l1, l2, motor_strength, kp, kd, damping = [float(v) for v in design]\n", + "\n", + " cid = p.connect(p.DIRECT)\n", + " try:\n", + " robot, _ = self._build_robot(l1, l2, cfg[\"payload_kg\"], damping)\n", + " q1_t, q2_t = self._inverse_kinematics_2link(cfg[\"target_x\"], cfg[\"target_y\"], l1, l2)\n", + "\n", + " err_trace = []\n", + " tau_trace = []\n", + " ee_trace = []\n", + " energy = 0.0\n", + "\n", + " for _step in range(int(cfg[\"sim_steps\"])):\n", + " for j, q_t in enumerate([q1_t, q2_t]):\n", + " p.setJointMotorControl2(\n", + " bodyUniqueId=robot,\n", + " jointIndex=j,\n", + " controlMode=p.POSITION_CONTROL,\n", + " targetPosition=q_t,\n", + " positionGain=float(kp) / 120.0,\n", + " velocityGain=float(kd) / 50.0,\n", + " force=float(cfg[\"torque_limit\"]) * float(motor_strength),\n", + " )\n", + "\n", + " if cfg[\"disturbance_scale\"] > 0:\n", + " disturb = self.np_random.normal(0.0, cfg[\"disturbance_scale\"], size=2)\n", + " p.applyExternalTorque(robot, 0, [0, 0, float(disturb[0])], p.LINK_FRAME)\n", + " p.applyExternalTorque(robot, 1, [0, 0, float(disturb[1])], p.LINK_FRAME)\n", + "\n", + " p.stepSimulation()\n", + "\n", + " js0 = p.getJointState(robot, 0)\n", + " js1 = p.getJointState(robot, 1)\n", + " q1, q2 = float(js0[0]), float(js1[0])\n", + " dq1, dq2 = float(js0[1]), float(js1[1])\n", + " tau1, tau2 = float(js0[3]), float(js1[3])\n", + "\n", + " ee_x, ee_y = self._forward_kinematics_2link(q1, q2, l1, l2)\n", + " err = float(np.sqrt((ee_x - cfg[\"target_x\"]) ** 2 + (ee_y - cfg[\"target_y\"]) ** 2))\n", + "\n", + " err_trace.append(err)\n", + " tau_trace.append((tau1, tau2))\n", + " ee_trace.append((ee_x, ee_y))\n", + " energy += (abs(tau1 * dq1) + abs(tau2 * dq2)) * float(cfg[\"dt\"])\n", + "\n", + " final_error = float(err_trace[-1])\n", + " obj = np.array([final_error, float(energy)], dtype=np.float32)\n", + "\n", + " if return_trace:\n", + " trace = {\n", + " \"ee_trace\": np.array(ee_trace, dtype=np.float32),\n", + " \"err_trace\": np.array(err_trace, dtype=np.float32),\n", + " \"tau_trace\": np.array(tau_trace, dtype=np.float32),\n", + " \"target\": np.array([cfg[\"target_x\"], cfg[\"target_y\"]], dtype=np.float32),\n", + " \"design\": np.array(design, dtype=np.float32),\n", + " \"objectives\": obj,\n", + " }\n", + " return obj, trace\n", + "\n", + " return obj\n", + " finally:\n", + " p.disconnect(cid)\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-A: simulate\n", + " # ================================================================== #\n", + " def simulate(self, design: np.ndarray, config: dict | None = None) -> np.ndarray:\n", + " \"\"\"Run the simulator and return objective values.\n", + "\n", + " This is the main entry point that EngiOpt models call.\n", + " It should:\n", + " 1. Merge self.config defaults with any overrides from `config`\n", + " 2. Clip the design to the valid bounds\n", + " 3. Call self._rollout() and return the result\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: self.config.__dict__ gives you the default config as a dict.\n", + " # Use {**defaults, **(config or {})} to merge.\n", + " # np.clip(design, self.design_space.low, self.design_space.high)\n", + " # Return self._rollout(clipped_design, merged_cfg, return_trace=False)\n", + " raise NotImplementedError(\"Fill in simulate()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-B: random_design\n", + " # ================================================================== #\n", + " # Note: check_constraints() is inherited from Problem. It calls each\n", + " # function in self.design_constraints and collects assertion failures.\n", + " # The constraints are defined in __init__ above -- look at them!\n", + " #\n", + " # This fill-in is about random_design(), which is used by the optimizer\n", + " # and by dataset generation to sample starting points.\n", + "\n", + " def random_design(self):\n", + " \"\"\"Return (design, reward) where design is sampled uniformly from bounds.\n", + "\n", + " Convention: reward = -1 (dummy value, since we have not simulated yet).\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: self.np_random.uniform(low, high) samples from the design space.\n", + " # self.design_space.low and .high give the bounds.\n", + " # Return (design_array, -1)\n", + " raise NotImplementedError(\"Fill in random_design()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-C: optimize\n", + " # ================================================================== #\n", + " def optimize(self, starting_point: np.ndarray, config: dict | None = None):\n", + " \"\"\"Simple random-perturbation optimizer.\n", + "\n", + " Returns (best_design, history) where history is a list of OptiStep.\n", + " Each OptiStep records the best objective values seen so far at that step.\n", + "\n", + " Algorithm:\n", + " 1. Start from starting_point, evaluate it\n", + " 2. For each iteration: perturb the best design with Gaussian noise,\n", + " clip to bounds, check constraints, simulate, keep if better\n", + " 3. \"Better\" = lower score, where score = error + 0.02 * energy\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " # Hint: Follow the docstring algorithm above.\n", + " # cfg = {**self.config.__dict__, **(config or {})}\n", + " # x = np.clip(starting_point, self.design_space.low, self.design_space.high)\n", + " # best, best_obj = x.copy(), self.simulate(x, cfg)\n", + " # best_score = float(best_obj[0] + 0.02 * best_obj[1])\n", + " # history = [OptiStep(obj_values=best_obj, step=0)]\n", + " # step_scale = np.array([0.05, 0.05, 2.5, 8.0, 1.2, 0.08], dtype=np.float32)\n", + " #\n", + " # Loop max_iter times:\n", + " # candidate = best + self.np_random.normal(...) * step_scale\n", + " # clip, check constraints, simulate, compare scores\n", + " # Append OptiStep to history\n", + " #\n", + " # return best, history\n", + " raise NotImplementedError(\"Fill in optimize()\")\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # render (pre-written -- 4-panel visualization)\n", + " # ------------------------------------------------------------------ #\n", + " def render(self, design: np.ndarray, *, open_window: bool = False):\n", + " \"\"\"Create a 4-panel diagnostic figure for a given design.\"\"\"\n", + " import matplotlib.pyplot as plt\n", + "\n", + " cfg = self.config.__dict__\n", + " x = np.clip(design.astype(np.float32), self.design_space.low, self.design_space.high)\n", + " obj, trace = self._rollout(x, cfg, return_trace=True)\n", + "\n", + " ee = trace[\"ee_trace\"]\n", + " err = trace[\"err_trace\"]\n", + " target = trace[\"target\"]\n", + " tau = trace[\"tau_trace\"]\n", + "\n", + " fig, axes = plt.subplots(1, 4, figsize=(17, 4.2))\n", + "\n", + " labels = [\"link1\", \"link2\", \"motor\", \"kp\", \"kd\", \"damping\"]\n", + " axes[0].bar(labels, x, color=[\"#4c78a8\", \"#4c78a8\", \"#f58518\", \"#54a24b\", \"#e45756\", \"#72b7b2\"])\n", + " axes[0].set_title(\"Design variables\")\n", + " axes[0].tick_params(axis=\"x\", rotation=35)\n", + "\n", + " axes[1].plot(ee[:, 0], ee[:, 1], lw=2, label=\"end-effector path\")\n", + " axes[1].scatter([target[0]], [target[1]], c=\"red\", marker=\"x\", s=70, label=\"target\")\n", + " r = x[0] + x[1]\n", + " circle = plt.Circle((0, 0), r, color=\"gray\", fill=False, linestyle=\"--\", alpha=0.5)\n", + " axes[1].add_patch(circle)\n", + " axes[1].set_aspect(\"equal\", \"box\")\n", + " axes[1].set_title(\"Task-space trajectory\")\n", + " axes[1].set_xlabel(\"x [m]\")\n", + " axes[1].set_ylabel(\"y [m]\")\n", + " axes[1].legend(fontsize=8)\n", + "\n", + " axes[2].plot(err, color=\"#e45756\")\n", + " axes[2].set_title(\"Tracking error over time\")\n", + " axes[2].set_xlabel(\"step\")\n", + " axes[2].set_ylabel(\"error [m]\")\n", + " axes[2].grid(alpha=0.3)\n", + "\n", + " axes[3].plot(np.abs(tau[:, 0]), label=\"|tau1|\")\n", + " axes[3].plot(np.abs(tau[:, 1]), label=\"|tau2|\")\n", + " axes[3].set_title(\"Actuation effort\")\n", + " axes[3].set_xlabel(\"step\")\n", + " axes[3].set_ylabel(\"torque [Nm]\")\n", + " axes[3].legend(fontsize=8)\n", + " axes[3].grid(alpha=0.3)\n", + "\n", + " fig.suptitle(\n", + " f\"Objectives: final_error={obj[0]:.4f} m, energy={obj[1]:.3f} J\",\n", + " y=1.03,\n", + " )\n", + " fig.tight_layout()\n", + "\n", + " if open_window:\n", + " plt.show()\n", + " return fig, axes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CHECKPOINT: Quick sanity check before the smoke test\n", + "\n", + "Run this cell to verify the class can be instantiated and the pre-written\n", + "parts work. This does NOT require your fill-ins yet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT -- class instantiation (does not call your fill-ins)\n", + "prob_test = PlanarManipulatorCoDesignProblem(seed=0)\n", + "print(\"design_space:\", prob_test.design_space)\n", + "print(\"objectives:\", prob_test.objectives)\n", + "print(\"num constraints:\", len(prob_test.design_constraints))\n", + "print(\"conditions:\", prob_test.conditions)\n", + "print()\n", + "print(\"Class instantiation OK.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3 -- Smoke test\n", + "\n", + "Run this after completing **all 3 fill-ins** above.\n", + "\n", + "What success looks like:\n", + "- Non-empty optimization history\n", + "- Finite objective values (no NaN or Inf)\n", + "- A 4-panel figure renders without error\n", + "\n", + "**How to read the 4-panel figure**: Inspect the panels for (1) design parameter\n", + "values, (2) the end-effector path in task space with the target marked,\n", + "(3) tracking error decreasing over simulation steps, and (4) joint torque\n", + "profiles showing actuation effort." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Smoke test (run after implementing all 3 PUBLIC FILL-IN blocks)\n", + "problem = PlanarManipulatorCoDesignProblem(\n", + " seed=42,\n", + " target_x=0.9,\n", + " target_y=0.45,\n", + " payload_kg=0.8,\n", + " disturbance_scale=0.04,\n", + " sim_steps=220,\n", + " max_iter=40,\n", + ")\n", + "start, _ = problem.random_design()\n", + "\n", + "cfg = {\n", + " \"target_x\": 0.9,\n", + " \"target_y\": 0.45,\n", + " \"payload_kg\": 0.8,\n", + " \"disturbance_scale\": 0.04,\n", + " \"sim_steps\": 220,\n", + " \"dt\": 1.0 / 120.0,\n", + " \"torque_limit\": 12.0,\n", + " \"max_iter\": 40,\n", + "}\n", + "\n", + "print(\"design space:\", problem.design_space)\n", + "print(\"objectives:\", problem.objectives)\n", + "print(\"conditions:\", problem.conditions)\n", + "\n", + "viol = problem.check_constraints(start, config=cfg)\n", + "print(\"constraint violations:\", len(viol))\n", + "\n", + "obj0 = problem.simulate(start, config=cfg)\n", + "opt_design, history = problem.optimize(start, config=cfg)\n", + "objf = problem.simulate(opt_design, config=cfg)\n", + "\n", + "print(\"initial objectives [tracking_error_m, energy_J]:\", obj0.tolist())\n", + "print(\"final objectives [tracking_error_m, energy_J]:\", objf.tolist())\n", + "print(\"optimization steps:\", len(history))\n", + "\n", + "# CHECKPOINT\n", + "assert len(history) > 0, \"Optimization history should not be empty\"\n", + "assert np.all(np.isfinite(obj0)), \"Initial objective contains non-finite values\"\n", + "assert np.all(np.isfinite(objf)), \"Final objective contains non-finite values\"\n", + "print(\"All assertions passed.\")\n", + "\n", + "problem.render(opt_design)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The power of a standardized interface\n", + "\n", + "Notice what just happened: we wrapped a completely new simulator (PyBullet robotics)\n", + "as an EngiBench `Problem`, and it exposes the same interface as `beams2d`,\n", + "`heatconduction2d`, or any other problem in the benchmark.\n", + "\n", + "This means that **every generative model in EngiOpt** -- the CGAN you trained in\n", + "Notebook 01, the diffusion models, the VAEs -- could be trained on this manipulator\n", + "problem **with zero model code changes**. You would only need to point the training\n", + "script at the new problem ID.\n", + "\n", + "That is the core value proposition of EngiBench: **decouple the problem from the\n", + "method** so researchers can focus on one or the other without rewriting glue code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contributing to EngiBench: what you need\n", + "\n", + "If you have an engineering problem from your own domain that you would like to\n", + "contribute to the benchmark, here is the checklist:\n", + "\n", + "1. **Design space**: Define a `gymnasium.spaces.Box` (or `Dict`) for the design variables, with physically meaningful bounds.\n", + "\n", + "2. **Simulator**: Implement `simulate(design, config) -> objective_values`. This is the core -- it maps a design to measurable performance. Must be deterministic for a given seed.\n", + "\n", + "3. **Constraints**: Define constraint functions using the `@constraint` decorator. Each should `assert` what must be true for a design to be feasible.\n", + "\n", + "4. **Dataset**: Generate a dataset of (design, conditions, objectives) tuples and host it on HuggingFace. This is what generative models train on.\n", + "\n", + "5. **Render method**: A visualization that helps humans interpret designs. Not strictly required for training, but essential for debugging and papers.\n", + "\n", + "6. **Metadata**: Version number, objective names and directions, condition ranges, and a docstring explaining the problem physics.\n", + "\n", + "See the [EngiBench contribution guide](https://github.com/IDEALLab/EngiBench) for the full template and review process." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Takeaways\n", + "\n", + "Before closing, reflect on these questions:\n", + "\n", + "1. **What are the minimum requirements** for adding a new problem to EngiBench? Which methods and attributes are essential vs. nice-to-have?\n", + "\n", + "2. **Which part of the Problem interface** was most intuitive? Which was least intuitive? (For example: design_space, constraints, simulate, render, optimize...)\n", + "\n", + "3. **What engineering problem from YOUR domain** could you contribute as a benchmark? What would the design vector look like? What would you simulate?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optional extension -- Train an EngiOpt model on this problem\n", + "\n", + "The solutions notebook contains a full optional extension that:\n", + "\n", + "1. Generates a feasible dataset from simulator rollouts\n", + "2. Trains `engiopt.cgan_1d` (the same model architecture from Notebook 01) on the manipulator problem\n", + "3. Compares generated designs vs. a random baseline\n", + "\n", + "This demonstrates the key point: because our manipulator problem uses the standard\n", + "EngiBench interface, we can reuse EngiOpt model code directly.\n", + "\n", + "To try it yourself, see the **solutions notebook**:\n", + "`workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb`\n", + "\n", + "The essential idea in ~10 lines of pseudocode:\n", + "\n", + "```python\n", + "# 1. Generate dataset\n", + "for _ in range(N_SAMPLES):\n", + " design, _ = problem.random_design()\n", + " if problem.check_constraints(design, cfg) == []:\n", + " obj = problem.simulate(design, cfg)\n", + " dataset.append((design, conditions, obj))\n", + "\n", + "# 2. Train CGAN on top-performing designs\n", + "generator = cgan1d.Generator(latent_dim=8, n_conds=4, design_shape=(6,), ...)\n", + "# ... standard GAN training loop ...\n", + "\n", + "# 3. Generate + evaluate\n", + "new_design = generator(z, conditions)\n", + "obj = problem.simulate(new_design, cfg) # same interface!\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Troubleshooting\n", + "\n", + "- **`NotImplementedError`**: You have not yet filled in one of the 3 exercises. Check `simulate()`, `random_design()`, and `optimize()`.\n", + "- **`AssertionError` in smoke test**: Your fill-in runs but produces incorrect values. Re-read the hints in the `# START FILL` block.\n", + "- **PyBullet connection error**: Make sure `pybullet` is installed. On Colab, the bootstrap cell handles this.\n", + "- **If a section fails, do not continue downstream.** Fix locally first, then rerun." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/participant/04_heat_exchanger_design_problem.ipynb b/workshops/dcc26/participant/04_heat_exchanger_design_problem.ipynb new file mode 100644 index 00000000..b569bfae --- /dev/null +++ b/workshops/dcc26/participant/04_heat_exchanger_design_problem.ipynb @@ -0,0 +1,893 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "29dd7a28", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 04 Participant - Wrapping a Heat Exchanger Design Problem\n", + "\n", + "**Capstone idea:** take a small, recognizable engineering simulator and wrap it in the same interface you used in the earlier notebooks.\n", + "\n", + "In Notebooks 00-02, `beams2d` was already packaged for you. Here we build a new benchmark-shaped problem from scratch: a compact counterflow heat exchanger.\n", + "\n", + "> Colab users: click **File -> Save a copy in Drive** before editing so your changes persist.\n" + ] + }, + { + "cell_type": "markdown", + "id": "f2dd9fcf", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "The earlier notebooks used an existing EngiBench problem. This notebook flips the direction: instead of asking \"how do I train and evaluate a model on a benchmark?\", we ask:\n", + "\n", + "**What does a simulator need before it can become a reusable benchmark?**\n", + "\n", + "A heat exchanger is a good capstone because it is not another topology-optimization image. The design is a small vector of geometric choices, the physics is thermal-fluid system performance, and the constraints are things engineers actually care about: heat duty, pressure drop, size, and manufacturability.\n" + ] + }, + { + "cell_type": "markdown", + "id": "dfbaa22a", + "metadata": {}, + "source": [ + "## The problem: compact heat exchanger sizing\n", + "\n", + "Imagine a colleague says:\n", + "\n", + "> I have a hot stream and a cold stream. I need at least 5 kW of heat transfer, but I cannot allow more than 35 kPa pressure drop on the cold side. Can an optimizer or generative model propose useful exchanger geometries?\n", + "\n", + "We will use a deliberately small design vector:\n", + "\n", + "| Design variable | Meaning | Typical effect |\n", + "|---|---|---|\n", + "| `tube_diameter_m` | inner tube diameter | larger diameter lowers pressure drop but can lower velocity and heat transfer coefficient |\n", + "| `tube_length_m` | length of each tube | more area, more pressure drop |\n", + "| `n_tubes` | number of parallel tubes | more area and lower velocity, but larger/costlier exchanger |\n", + "\n", + "The simulator estimates heat transfer with the effectiveness-NTU method and pressure drop with a Darcy friction-factor model. When `ht` and `fluids` are installed, we use their implementations for those two standard engineering calculations. If not, the notebook falls back to the same textbook formulas so the story still runs.\n", + "\n", + "\n", + "### Exercise legend\n", + "\n", + "| Marker | Meaning |\n", + "|---|---|\n", + "| `PUBLIC FILL-IN CELL` | Your turn: edit the code between `START FILL` and `END FILL` |\n", + "| `CHECKPOINT` | Automated check: if it fails, fix the fill-in before continuing |\n" + ] + }, + { + "cell_type": "markdown", + "id": "6dd4af6d", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "The notebook is self-contained, but Colab should install `ht` and `fluids` so we demonstrate wrapping real Python engineering libraries.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a1199faf", + "metadata": {}, + "outputs": [], + "source": [ + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # set True to force install in a local notebook runtime\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " _pip([\"numpy\", \"pandas\", \"matplotlib\", \"scipy\", \"ht\", \"fluids\"])\n", + " print(\"Install complete. If imports fail, restart the runtime and rerun from the top.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install optional libraries here.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81e3012b", + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from types import SimpleNamespace\n", + "import math\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "try:\n", + " from ht.hx import NTU_from_UA, effectiveness_from_NTU\n", + " HT_AVAILABLE = True\n", + "except Exception:\n", + " HT_AVAILABLE = False\n", + "\n", + "try:\n", + " from fluids.friction import friction_factor\n", + " FLUIDS_AVAILABLE = True\n", + "except Exception:\n", + " FLUIDS_AVAILABLE = False\n", + "\n", + "print(\"ht available: \", HT_AVAILABLE)\n", + "print(\"fluids available: \", FLUIDS_AVAILABLE)\n" + ] + }, + { + "cell_type": "markdown", + "id": "3780e38c", + "metadata": {}, + "source": [ + "---\n", + "## 1 - What is fixed by the benchmark?\n", + "\n", + "A reusable design benchmark has to pin down a contract:\n", + "\n", + "1. **Design space:** what the algorithm is allowed to output.\n", + "2. **Conditions:** what scenario the design must work under.\n", + "3. **Simulator:** how a candidate design is scored.\n", + "4. **Constraints:** when a candidate is invalid or suspect.\n", + "5. **Baseline optimizer:** a simple reference method to compare against.\n", + "6. **Renderer:** a canonical way to inspect the design.\n", + "\n", + "The class below is intentionally small, but it has the same moving parts as an EngiBench `Problem`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7390f22e", + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Box:\n", + " \"\"\"Tiny stand-in for a Gymnasium Box design space.\"\"\"\n", + "\n", + " low: np.ndarray\n", + " high: np.ndarray\n", + " labels: tuple[str, ...]\n", + "\n", + " @property\n", + " def shape(self):\n", + " return self.low.shape\n", + "\n", + " def sample(self, rng):\n", + " return rng.uniform(self.low, self.high).astype(float)\n", + "\n", + " def clip(self, x):\n", + " return np.clip(np.asarray(x, dtype=float), self.low, self.high)\n", + "\n", + " def contains(self, x):\n", + " x = np.asarray(x, dtype=float)\n", + " return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n", + "\n", + "\n", + "@dataclass\n", + "class OptiStep:\n", + " obj_values: np.ndarray\n", + " step: int\n", + " design: np.ndarray\n" + ] + }, + { + "cell_type": "markdown", + "id": "0b55f9f0", + "metadata": {}, + "source": [ + "---\n", + "## 2 - The thermal-fluid model\n", + "\n", + "This is the physics core. We use:\n", + "\n", + "- heat capacity rates: `C = mdot * cp`\n", + "- heat-transfer area: `A = pi * D * L * n_tubes`\n", + "- overall conductance: `UA = U * A`\n", + "- effectiveness-NTU method for counterflow heat exchange\n", + "- Darcy-Weisbach pressure drop on the cold side\n", + "\n", + "The design lesson is not that this is the world's most detailed exchanger model. The lesson is that a benchmark should make all assumptions explicit and executable.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91b5882c", + "metadata": {}, + "outputs": [], + "source": [ + "def _fallback_effectiveness_from_ntu(ntu: float, cr: float) -> float:\n", + " \"\"\"Counterflow heat-exchanger effectiveness from NTU and capacity ratio.\"\"\"\n", + " ntu = max(float(ntu), 0.0)\n", + " cr = min(max(float(cr), 1e-9), 0.999999)\n", + " if abs(1.0 - cr) < 1e-6:\n", + " return ntu / (1.0 + ntu)\n", + " numerator = 1.0 - math.exp(-ntu * (1.0 - cr))\n", + " denominator = 1.0 - cr * math.exp(-ntu * (1.0 - cr))\n", + " return numerator / denominator\n", + "\n", + "\n", + "def _fallback_friction_factor(re: float, relative_roughness: float) -> float:\n", + " \"\"\"Darcy friction factor: laminar exact, turbulent Haaland approximation.\"\"\"\n", + " re = max(float(re), 1e-9)\n", + " if re < 2300:\n", + " return 64.0 / re\n", + " term = (relative_roughness / 3.7) ** 1.11 + 6.9 / re\n", + " return 1.0 / (-1.8 * math.log10(term)) ** 2\n", + "\n", + "\n", + "def _ntu_from_ua(ua: float, c_min: float) -> float:\n", + " if HT_AVAILABLE:\n", + " return float(NTU_from_UA(UA=ua, Cmin=c_min))\n", + " return float(ua / max(c_min, 1e-12))\n", + "\n", + "\n", + "def _effectiveness(ntu: float, cr: float) -> float:\n", + " if HT_AVAILABLE:\n", + " return float(effectiveness_from_NTU(NTU=ntu, Cr=cr, subtype=\"counterflow\"))\n", + " return _fallback_effectiveness_from_ntu(ntu, cr)\n", + "\n", + "\n", + "def _friction_factor(re: float, eD: float) -> float:\n", + " if FLUIDS_AVAILABLE:\n", + " return float(friction_factor(Re=re, eD=eD, Darcy=True))\n", + " return _fallback_friction_factor(re, eD)\n" + ] + }, + { + "cell_type": "markdown", + "id": "d6d31f9b", + "metadata": {}, + "source": [ + "---\n", + "## 3 - Wrap the simulator as a problem\n", + "\n", + "Read this class the way you would read a real benchmark implementation. The important question is not \"do I like these exact constants?\" It is:\n", + "\n", + "**Can another lab run the same design under the same conditions and get the same metrics?**\n", + "\n", + "That is what a benchmark wrapper buys us.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "529cba00", + "metadata": {}, + "outputs": [], + "source": [ + "class HeatExchangerDesignProblem:\n", + " \"\"\"Small EngiBench-style heat-exchanger design problem.\"\"\"\n", + "\n", + " objectives = (\n", + " (\"heat_shortfall_W\", \"MINIMIZE\"),\n", + " (\"pumping_power_W\", \"MINIMIZE\"),\n", + " (\"area_m2\", \"MINIMIZE\"),\n", + " )\n", + "\n", + " design_space = Box(\n", + " low=np.array([0.006, 0.50, 2.0]),\n", + " high=np.array([0.030, 6.00, 40.0]),\n", + " labels=(\"tube_diameter_m\", \"tube_length_m\", \"n_tubes\"),\n", + " )\n", + "\n", + " default_conditions = {\n", + " \"hot_in_C\": 80.0,\n", + " \"cold_in_C\": 20.0,\n", + " \"hot_mdot_kg_s\": 0.32,\n", + " \"cold_mdot_kg_s\": 0.24,\n", + " \"required_heat_W\": 5000.0,\n", + " \"max_cold_dp_kPa\": 35.0,\n", + " \"hot_side_h_W_m2K\": 180.0,\n", + " }\n", + "\n", + " def __init__(self, seed: int = 7, **condition_overrides):\n", + " self.seed = seed\n", + " self.rng = np.random.default_rng(seed)\n", + " self.conditions = {**self.default_conditions, **condition_overrides}\n", + "\n", + " # Constant properties keep the notebook focused. A production problem\n", + " # could call CoolProp here for temperature-dependent properties.\n", + " self.cold = SimpleNamespace(rho=997.0, cp=4180.0, mu=1.0e-3, k=0.60, pr=7.0)\n", + " self.hot = SimpleNamespace(rho=850.0, cp=2200.0, mu=3.0e-3, k=0.13, pr=50.0)\n", + " self.wall_k_W_mK = 16.0\n", + " self.wall_thickness_m = 0.001\n", + " self.roughness_m = 1.5e-5\n", + " self.pump_efficiency = 0.65\n", + "\n", + " def reset(self, seed: int | None = None):\n", + " if seed is not None:\n", + " self.seed = seed\n", + " self.rng = np.random.default_rng(self.seed)\n", + "\n", + " def unpack_design(self, design):\n", + " d, length, n_tubes = self.design_space.clip(design)\n", + " return float(d), float(length), int(round(float(n_tubes)))\n", + "\n", + " def _inside_heat_transfer_coefficient(self, diameter_m, n_tubes, cold_mdot):\n", + " area_per_tube = math.pi * diameter_m**2 / 4.0\n", + " velocity = cold_mdot / (self.cold.rho * area_per_tube * max(n_tubes, 1))\n", + " reynolds = self.cold.rho * velocity * diameter_m / self.cold.mu\n", + " if reynolds < 2300:\n", + " nusselt = 3.66\n", + " else:\n", + " nusselt = 0.023 * reynolds**0.8 * self.cold.pr**0.4\n", + " h_inside = nusselt * self.cold.k / diameter_m\n", + " return h_inside, velocity, reynolds\n", + "\n", + " def simulate(self, design, config: dict | None = None) -> np.ndarray:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " diameter_m, length_m, n_tubes = self.unpack_design(design)\n", + "\n", + " h_inside, velocity, reynolds = self._inside_heat_transfer_coefficient(\n", + " diameter_m, n_tubes, cfg[\"cold_mdot_kg_s\"]\n", + " )\n", + " h_outside = float(cfg[\"hot_side_h_W_m2K\"])\n", + " u_overall = 1.0 / (\n", + " 1.0 / h_inside\n", + " + self.wall_thickness_m / self.wall_k_W_mK\n", + " + 1.0 / h_outside\n", + " )\n", + "\n", + " area_m2 = math.pi * diameter_m * length_m * n_tubes\n", + " ua = u_overall * area_m2\n", + "\n", + " c_hot = cfg[\"hot_mdot_kg_s\"] * self.hot.cp\n", + " c_cold = cfg[\"cold_mdot_kg_s\"] * self.cold.cp\n", + " c_min = min(c_hot, c_cold)\n", + " c_max = max(c_hot, c_cold)\n", + " cr = c_min / c_max\n", + " ntu = _ntu_from_ua(ua, c_min)\n", + " eps = _effectiveness(ntu, cr)\n", + "\n", + " q_max = c_min * (cfg[\"hot_in_C\"] - cfg[\"cold_in_C\"])\n", + " q_W = eps * q_max\n", + " heat_shortfall_W = max(cfg[\"required_heat_W\"] - q_W, 0.0)\n", + "\n", + " eD = self.roughness_m / diameter_m\n", + " f_darcy = _friction_factor(reynolds, eD)\n", + " minor_loss_K = 1.5\n", + " cold_dp_Pa = (f_darcy * length_m / diameter_m + minor_loss_K) * 0.5 * self.cold.rho * velocity**2\n", + " pumping_power_W = cold_dp_Pa * (cfg[\"cold_mdot_kg_s\"] / self.cold.rho) / self.pump_efficiency\n", + "\n", + " cold_out_C = cfg[\"cold_in_C\"] + q_W / c_cold\n", + " hot_out_C = cfg[\"hot_in_C\"] - q_W / c_hot\n", + "\n", + " self.last_details = {\n", + " \"diameter_m\": diameter_m,\n", + " \"length_m\": length_m,\n", + " \"n_tubes\": n_tubes,\n", + " \"area_m2\": area_m2,\n", + " \"U_W_m2K\": u_overall,\n", + " \"UA_W_K\": ua,\n", + " \"NTU\": ntu,\n", + " \"effectiveness\": eps,\n", + " \"heat_transfer_W\": q_W,\n", + " \"heat_shortfall_W\": heat_shortfall_W,\n", + " \"cold_dp_kPa\": cold_dp_Pa / 1000.0,\n", + " \"pumping_power_W\": pumping_power_W,\n", + " \"cold_velocity_m_s\": velocity,\n", + " \"cold_reynolds\": reynolds,\n", + " \"cold_out_C\": cold_out_C,\n", + " \"hot_out_C\": hot_out_C,\n", + " }\n", + "\n", + " return np.array([heat_shortfall_W, pumping_power_W, area_m2], dtype=float)\n", + "\n", + " def check_constraints(self, design, config: dict | None = None) -> list[str]:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " violations = []\n", + " x = np.asarray(design, dtype=float)\n", + " if not self.design_space.contains(x):\n", + " violations.append(\"design is outside geometry bounds\")\n", + "\n", + " self.simulate(x, cfg)\n", + " d = self.last_details\n", + " if d[\"heat_shortfall_W\"] > 1e-6:\n", + " violations.append(\"required heat duty is not met\")\n", + " if d[\"cold_dp_kPa\"] > cfg[\"max_cold_dp_kPa\"]:\n", + " violations.append(\"cold-side pressure drop exceeds limit\")\n", + " if d[\"cold_velocity_m_s\"] < 0.20:\n", + " violations.append(\"cold-side velocity is very low; fouling risk\")\n", + " if d[\"cold_velocity_m_s\"] > 3.00:\n", + " violations.append(\"cold-side velocity is high; erosion/noise risk\")\n", + " min_approach_C = 2.0\n", + " if cfg[\"hot_in_C\"] - d[\"cold_out_C\"] < min_approach_C:\n", + " violations.append(\"hot-in to cold-out terminal approach is too small\")\n", + " if d[\"hot_out_C\"] - cfg[\"cold_in_C\"] < min_approach_C:\n", + " violations.append(\"hot-out to cold-in terminal approach is too small\")\n", + " return violations\n", + "\n", + " def random_design(self):\n", + " return self.design_space.sample(self.rng), -1.0\n", + "\n", + " def score(self, design, config: dict | None = None) -> float:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " obj = self.simulate(design, cfg)\n", + " d = self.last_details\n", + " pressure_penalty = max(d[\"cold_dp_kPa\"] - cfg[\"max_cold_dp_kPa\"], 0.0) / cfg[\"max_cold_dp_kPa\"]\n", + " velocity_penalty = max(0.20 - d[\"cold_velocity_m_s\"], 0.0) + max(d[\"cold_velocity_m_s\"] - 3.00, 0.0)\n", + " return (\n", + " obj[0] / cfg[\"required_heat_W\"]\n", + " + 0.02 * obj[1]\n", + " + 0.08 * obj[2]\n", + " + 10.0 * pressure_penalty\n", + " + 2.0 * velocity_penalty\n", + " )\n", + "\n", + " def optimize(self, starting_point=None, config: dict | None = None, n_candidates: int = 600):\n", + " if starting_point is None:\n", + " starting_point, _ = self.random_design()\n", + " best = self.design_space.clip(starting_point)\n", + " best_score = self.score(best, config)\n", + " best_obj = self.simulate(best, config)\n", + " history = [OptiStep(obj_values=best_obj, step=0, design=best.copy())]\n", + "\n", + " for step in range(1, n_candidates + 1):\n", + " candidate = self.design_space.sample(self.rng)\n", + " candidate_score = self.score(candidate, config)\n", + " if candidate_score < best_score:\n", + " best = candidate.copy()\n", + " best_score = candidate_score\n", + " best_obj = self.simulate(best, config)\n", + " if step % 20 == 0:\n", + " history.append(OptiStep(obj_values=best_obj.copy(), step=step, design=best.copy()))\n", + " return best, history\n", + "\n", + " def render(self, design, config: dict | None = None):\n", + " self.simulate(design, config)\n", + " d = self.last_details\n", + " labels = self.design_space.labels\n", + " values = self.design_space.clip(design)\n", + "\n", + " fig, axes = plt.subplots(1, 3, figsize=(14, 4))\n", + "\n", + " # Schematic panel\n", + " ax = axes[0]\n", + " ax.set_title(\"Counterflow tube bundle\")\n", + " ax.plot([0.08, 0.92], [0.65, 0.65], color=\"#b91c1c\", linewidth=6, solid_capstyle=\"round\")\n", + " ax.plot([0.92, 0.08], [0.35, 0.35], color=\"#2563eb\", linewidth=6, solid_capstyle=\"round\")\n", + " for y in np.linspace(0.40, 0.60, 5):\n", + " ax.plot([0.16, 0.84], [y, y], color=\"0.25\", linewidth=1.5, alpha=0.8)\n", + " ax.text(0.08, 0.74, f\"hot in {self.conditions['hot_in_C']:.0f} C\", color=\"#b91c1c\")\n", + " ax.text(0.70, 0.24, f\"cold in {self.conditions['cold_in_C']:.0f} C\", color=\"#2563eb\")\n", + " ax.text(0.08, 0.08, f\"D={d['diameter_m']*1000:.1f} mm, L={d['length_m']:.2f} m, tubes={d['n_tubes']}\")\n", + " ax.set_xlim(0, 1); ax.set_ylim(0, 1); ax.axis(\"off\")\n", + "\n", + " # Objective panel\n", + " ax = axes[1]\n", + " names = [\"Q delivered\", \"Q required\"]\n", + " vals = [d[\"heat_transfer_W\"] / 1000, self.conditions[\"required_heat_W\"] / 1000]\n", + " ax.bar(names, vals, color=[\"#0f766e\", \"#525252\"])\n", + " ax.set_ylabel(\"kW\")\n", + " ax.set_title(\"Heat duty\")\n", + " ax.grid(axis=\"y\", alpha=0.25)\n", + "\n", + " # Constraint / tradeoff panel\n", + " ax = axes[2]\n", + " names = [\"dp\", \"limit\", \"area\", \"pump\"]\n", + " vals = [d[\"cold_dp_kPa\"], self.conditions[\"max_cold_dp_kPa\"], d[\"area_m2\"], d[\"pumping_power_W\"]]\n", + " colors = [\"#7c3aed\", \"#525252\", \"#ea580c\", \"#0891b2\"]\n", + " ax.bar(names, vals, color=colors)\n", + " ax.set_title(\"Pressure, size, power\")\n", + " ax.set_ylabel(\"mixed units\")\n", + " ax.grid(axis=\"y\", alpha=0.25)\n", + "\n", + " fig.suptitle(\"HeatExchangerDesignProblem.render(design)\", fontsize=14)\n", + " fig.tight_layout()\n", + " return fig\n" + ] + }, + { + "cell_type": "markdown", + "id": "a81bb6b7", + "metadata": {}, + "source": [ + "---\n", + "## 4 - Instantiate and inspect the problem\n", + "\n", + "This is the moment where a simulator starts to feel like a benchmark: we can inspect the design space, the objectives, and the operating scenario before running any optimization.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b4af766", + "metadata": {}, + "outputs": [], + "source": [ + "problem = HeatExchangerDesignProblem(seed=4)\n", + "\n", + "print(\"Design variables:\")\n", + "for label, lo, hi in zip(problem.design_space.labels, problem.design_space.low, problem.design_space.high):\n", + " print(f\" {label:18s}: {lo:.4g} to {hi:.4g}\")\n", + "\n", + "print(\"\\nObjectives:\")\n", + "for name, direction in problem.objectives:\n", + " print(f\" {name:18s}: {direction}\")\n", + "\n", + "print(\"\\nConditions:\")\n", + "for k, v in problem.conditions.items():\n", + " print(f\" {k:18s}: {v}\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "0e2400c0", + "metadata": {}, + "source": [ + "---\n", + "## 5 - FILL-IN 04-A: Simulate one candidate\n", + "\n", + "A benchmark is more than a dataset. It should let us ask: *if a model gives me this design, what happens under this scenario?*\n", + "\n", + "Your first task is to choose a candidate heat-exchanger geometry. Bigger is not always better: longer tubes and more tubes add area, but pressure drop and pumping power can fight back.\n", + "\n", + "Use the design-space bounds printed above:\n", + "\n", + "1. `tube_diameter_m` between 0.006 and 0.030\n", + "2. `tube_length_m` between 0.50 and 6.00\n", + "3. `n_tubes` between 2 and 40\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c7df7ed", + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 04-A\n", + "# Goal: choose one candidate heat-exchanger geometry and simulate it.\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Replace None with a 3-value numpy array:\n", + "# [tube_diameter_m, tube_length_m, n_tubes]\n", + "# Example shape only: np.array([0.014, 3.20, 14.0])\n", + "candidate = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if candidate is None:\n", + " raise RuntimeError(\"Set candidate to a 3-value numpy array before continuing.\")\n", + "\n", + "candidate = np.asarray(candidate, dtype=float)\n", + "\n", + "# CHECKPOINT\n", + "assert candidate.shape == problem.design_space.shape, f\"Expected shape {problem.design_space.shape}, got {candidate.shape}\"\n", + "assert problem.design_space.contains(candidate), \"Candidate is outside the design-space bounds.\"\n", + "\n", + "obj = problem.simulate(candidate)\n", + "violations = problem.check_constraints(candidate)\n", + "\n", + "# CHECKPOINT\n", + "assert np.all(np.isfinite(obj)), \"Simulation produced non-finite objective values.\"\n", + "assert \"heat_transfer_W\" in problem.last_details, \"Simulator details were not recorded.\"\n", + "\n", + "print(\"Objective vector [heat_shortfall_W, pumping_power_W, area_m2]:\")\n", + "print(np.round(obj, 4))\n", + "\n", + "print(\"\\nDetails:\")\n", + "for key in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\", \"effectiveness\", \"cold_reynolds\", \"cold_velocity_m_s\", \"hot_out_C\", \"cold_out_C\"]:\n", + " print(f\" {key:18s}: {problem.last_details[key]:.4g}\")\n", + "\n", + "print(\"\\nConstraint violations:\")\n", + "print(violations if violations else \" none\")\n", + "print(\"\\nCHECKPOINT passed - candidate simulated successfully.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e445feca", + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(candidate)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "1843b62d", + "metadata": {}, + "source": [ + "---\n", + "## 6 - Why constraints are separate from objectives\n", + "\n", + "A design can have a small area and low pumping power because it simply fails to transfer enough heat. That is why `simulate()` and `check_constraints()` answer different questions.\n", + "\n", + "- `simulate()` says how the design performs.\n", + "- `check_constraints()` says whether the performance is acceptable for this benchmark scenario.\n", + "\n", + "This separation is exactly what made Notebook 02 useful: a design can look plausible and still fail engineering checks.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8a2735f", + "metadata": {}, + "outputs": [], + "source": [ + "examples = {\n", + " \"too small\": np.array([0.010, 0.70, 4.0]),\n", + " \"pressure-heavy\": np.array([0.0065, 5.50, 3.0]),\n", + " \"reasonable\": candidate,\n", + "}\n", + "\n", + "rows = []\n", + "for name, x in examples.items():\n", + " obj = problem.simulate(x)\n", + " rows.append({\n", + " \"case\": name,\n", + " \"D_mm\": problem.last_details[\"diameter_m\"] * 1000,\n", + " \"L_m\": problem.last_details[\"length_m\"],\n", + " \"n_tubes\": problem.last_details[\"n_tubes\"],\n", + " \"Q_kW\": problem.last_details[\"heat_transfer_W\"] / 1000,\n", + " \"shortfall_W\": obj[0],\n", + " \"dp_kPa\": problem.last_details[\"cold_dp_kPa\"],\n", + " \"pump_W\": obj[1],\n", + " \"area_m2\": obj[2],\n", + " \"violations\": \"; \".join(problem.check_constraints(x)) or \"none\",\n", + " })\n", + "\n", + "pd.DataFrame(rows)\n" + ] + }, + { + "cell_type": "markdown", + "id": "3b153a4c", + "metadata": {}, + "source": [ + "---\n", + "## 7 - FILL-IN 04-B: Run a tiny baseline optimizer\n", + "\n", + "For a real EngiBench contribution, the optimizer should be documented carefully: what it optimizes, how long it runs, and whether it is meant to be strong or just a baseline.\n", + "\n", + "Here we use random search because it is transparent. The point is not that random search is clever. The point is that every benchmark needs a reference method that everyone can rerun.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0f61c42", + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 04-B\n", + "# Goal: run the baseline optimizer from a random starting design.\n", + "\n", + "problem.reset(seed=12)\n", + "start, _ = problem.random_design()\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Call problem.optimize(...) using the random start above.\n", + "# Hint: use starting_point=start and n_candidates=800.\n", + "best_design = None\n", + "history = None\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if best_design is None or history is None:\n", + " raise RuntimeError(\"Call problem.optimize(...) and assign best_design, history.\")\n", + "\n", + "# CHECKPOINT\n", + "assert len(history) > 1, \"Optimization history should contain multiple recorded steps.\"\n", + "assert problem.design_space.contains(best_design), \"Best design is outside the design-space bounds.\"\n", + "\n", + "print(\"Start design:\", dict(zip(problem.design_space.labels, np.round(start, 4))))\n", + "problem.simulate(start)\n", + "print(\"Start details:\", {k: round(problem.last_details[k], 4) for k in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\"]})\n", + "print(\"Start violations:\", problem.check_constraints(start) or \"none\")\n", + "\n", + "print(\"\\nBest design:\", dict(zip(problem.design_space.labels, np.round(best_design, 4))))\n", + "problem.simulate(best_design)\n", + "print(\"Best details:\", {k: round(problem.last_details[k], 4) for k in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\"]})\n", + "print(\"Best violations:\", problem.check_constraints(best_design) or \"none\")\n", + "\n", + "# CHECKPOINT\n", + "assert problem.last_details[\"heat_transfer_W\"] >= 0.95 * problem.conditions[\"required_heat_W\"], (\n", + " \"The best design is still far below the heat-duty target. Try increasing n_candidates.\"\n", + ")\n", + "print(\"\\nCHECKPOINT passed - baseline optimizer produced a useful candidate.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "474be227", + "metadata": {}, + "outputs": [], + "source": [ + "fig, axes = plt.subplots(1, 2, figsize=(12, 4))\n", + "\n", + "steps = [h.step for h in history]\n", + "shortfall = [h.obj_values[0] for h in history]\n", + "pump = [h.obj_values[1] for h in history]\n", + "area = [h.obj_values[2] for h in history]\n", + "\n", + "axes[0].plot(steps, shortfall, marker=\"o\", label=\"heat shortfall [W]\")\n", + "axes[0].set_xlabel(\"candidate evaluations\")\n", + "axes[0].set_ylabel(\"W\")\n", + "axes[0].set_title(\"Best heat-duty shortfall so far\")\n", + "axes[0].grid(alpha=0.25)\n", + "\n", + "axes[1].plot(steps, pump, marker=\"o\", label=\"pumping power [W]\")\n", + "axes[1].plot(steps, area, marker=\"s\", label=\"area [m2]\")\n", + "axes[1].set_xlabel(\"candidate evaluations\")\n", + "axes[1].set_title(\"Competing costs of the best design\")\n", + "axes[1].legend()\n", + "axes[1].grid(alpha=0.25)\n", + "\n", + "fig.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7432cc93", + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(best_design)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "66dc54ad", + "metadata": {}, + "source": [ + "---\n", + "## 8 - FILL-IN 04-C: Change the operating scenario\n", + "\n", + "Conditions are the input side of the benchmark. The same design can be good for one scenario and bad for another.\n", + "\n", + "This is what makes conditional design interesting: an inverse model should not just produce \"a heat exchanger\". It should produce a heat exchanger for *this* duty, *these* flow rates, and *this* pressure-drop limit.\n", + "\n", + "Your task is to add one new scenario to the list. Keep the keys the same as the existing entries.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0f8175d", + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 04-C\n", + "# Goal: add one operating condition, then optimize a design for each scenario.\n", + "\n", + "scenarios = [\n", + " {\"name\": \"base\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 35.0},\n", + " {\"name\": \"harder duty\", \"required_heat_W\": 7000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 35.0},\n", + " {\"name\": \"tight dp\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 15.0},\n", + " {\"name\": \"more flow\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.40, \"max_cold_dp_kPa\": 35.0},\n", + "]\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Add one more scenario dict to scenarios.\n", + "# Hint: choose a new name and change required_heat_W, cold_mdot_kg_s, or max_cold_dp_kPa.\n", + "# Example shape only:\n", + "# scenarios.append({\"name\": \"your case\", \"required_heat_W\": 6000.0, \"cold_mdot_kg_s\": 0.30, \"max_cold_dp_kPa\": 25.0})\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "# CHECKPOINT\n", + "assert len(scenarios) >= 5, \"Add at least one scenario to the list.\"\n", + "required_keys = {\"name\", \"required_heat_W\", \"cold_mdot_kg_s\", \"max_cold_dp_kPa\"}\n", + "for scenario in scenarios:\n", + " assert required_keys <= set(scenario), f\"Scenario is missing keys: {scenario}\"\n", + "\n", + "rows = []\n", + "for scenario in scenarios:\n", + " cfg = {k: v for k, v in scenario.items() if k != \"name\"}\n", + " problem.reset(seed=30)\n", + " best, _ = problem.optimize(config=cfg, n_candidates=700)\n", + " obj = problem.simulate(best, cfg)\n", + " rows.append({\n", + " \"scenario\": scenario[\"name\"],\n", + " \"D_mm\": problem.last_details[\"diameter_m\"] * 1000,\n", + " \"L_m\": problem.last_details[\"length_m\"],\n", + " \"n_tubes\": problem.last_details[\"n_tubes\"],\n", + " \"Q_kW\": problem.last_details[\"heat_transfer_W\"] / 1000,\n", + " \"required_kW\": cfg[\"required_heat_W\"] / 1000,\n", + " \"dp_kPa\": problem.last_details[\"cold_dp_kPa\"],\n", + " \"dp_limit_kPa\": cfg[\"max_cold_dp_kPa\"],\n", + " \"area_m2\": obj[2],\n", + " \"violations\": \"; \".join(problem.check_constraints(best, cfg)) or \"none\",\n", + " })\n", + "\n", + "scenario_df = pd.DataFrame(rows)\n", + "print(\"CHECKPOINT passed - scenario sweep complete.\")\n", + "scenario_df\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e9965a9", + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(figsize=(8, 4))\n", + "x = np.arange(len(scenario_df))\n", + "ax.bar(x - 0.2, scenario_df[\"Q_kW\"], width=0.4, label=\"delivered\")\n", + "ax.bar(x + 0.2, scenario_df[\"required_kW\"], width=0.4, label=\"required\")\n", + "ax.set_xticks(x)\n", + "ax.set_xticklabels(scenario_df[\"scenario\"], rotation=15, ha=\"right\")\n", + "ax.set_ylabel(\"heat duty [kW]\")\n", + "ax.set_title(\"Different conditions lead to different designs\")\n", + "ax.legend()\n", + "ax.grid(axis=\"y\", alpha=0.25)\n", + "fig.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "4b59b4eb", + "metadata": {}, + "source": [ + "---\n", + "## 9 - What would make this a real EngiBench problem?\n", + "\n", + "This notebook is a workshop wrapper, not a polished repository contribution. To turn it into a real EngiBench problem, we would still need:\n", + "\n", + "1. A module under `engibench/problems/heat_exchanger/` with a `v0.py` implementation.\n", + "2. A documented dataset of optimized designs across sampled conditions.\n", + "3. Tests that every design in the dataset simulates and passes constraints.\n", + "4. A stronger baseline optimizer and fixed evaluation budget.\n", + "5. Clear citations for the heat-transfer and pressure-drop correlations.\n", + "6. Documentation and a canonical render image.\n", + "\n", + "The important thing is that the shape is now visible. Once the simulator is wrapped, any model in EngiOpt can treat this like another conditional design problem.\n" + ] + }, + { + "cell_type": "markdown", + "id": "639d215c", + "metadata": {}, + "source": [ + "## Reflection\n", + "\n", + "Before closing the capstone, discuss:\n", + "\n", + "1. Which parts of this problem are **conditions** and which are **design variables**?\n", + "2. Is pressure drop an objective, a constraint, or both? What changes if you move it?\n", + "3. What data would you generate before training a conditional design model?\n", + "4. Which simplification in this notebook would matter most for a publication-grade benchmark?\n", + "5. How is this different from the heat-conduction topology problems already in EngiBench?\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.x" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/requirements-colab.txt b/workshops/dcc26/requirements-colab.txt new file mode 100644 index 00000000..aee3142e --- /dev/null +++ b/workshops/dcc26/requirements-colab.txt @@ -0,0 +1,15 @@ +# Local convenience snapshot for workshop notebooks. +# Source of truth for Colab is the install/bootstrap cell inside each notebook. +# EngiOpt is intentionally installed from Git in notebook bootstrap cells. + +engibench[beams2d] +sqlitedict +matplotlib +seaborn +gymnasium +pybullet +tqdm +tyro +wandb +torch +torchvision diff --git a/workshops/dcc26/simple/00_framing_your_design_problem.ipynb b/workshops/dcc26/simple/00_framing_your_design_problem.ipynb new file mode 100644 index 00000000..eeefc8cc --- /dev/null +++ b/workshops/dcc26/simple/00_framing_your_design_problem.ipynb @@ -0,0 +1,695 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 00 — Framing your design problem\n", + "\n", + "*A guided tour, not an exercise sheet. Just run the cells top to bottom and read the prose between them.*\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "You just heard a talk arguing that **engineering design needs better benchmarks** before we can seriously compare ML methods. Great — but *what does a benchmark actually consist of?*\n", + "\n", + "This notebook answers that question by walking through a real engineering problem the way you would if it were sitting on your desk: **what am I designing, under what conditions, by what measure, against what reference?** At each step we write down the answer — and that written-down answer is exactly what a benchmark is.\n", + "\n", + "We'll use [EngiBench](https://github.com/IDEALLab/EngiBench) as the place where these answers live, because it happens to be where we collected them. But the points we're making would apply to any benchmark for engineering design.\n", + "\n", + "**You do not need any ML background for this notebook.** Just Python.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A concrete problem to anchor the discussion\n", + "\n", + "\n", + "\n", + "Imagine a colleague walks into your office and says:\n", + "\n", + "> *\"I need a 2D bracket that holds a known load at a known point, using no more than 40% of the available material. Can ML help me design it?\"*\n", + "\n", + "That sentence is the problem. But before any ML, you — the researcher — need to pin down:\n", + "\n", + "1. **What am I allowed to design?** (What's the output?)\n", + "2. **Under what scenarios must the design work?** (What's the input?)\n", + "3. **What makes one design better than another?** (What's the metric?)\n", + "4. **When is a candidate nonsense?** (What are the rules it must obey?)\n", + "5. **What have other people already solved?** (Is there data I can learn from?)\n", + "6. **What can I look at?** (Can I see a design, not just a number?)\n", + "7. **How do I score a candidate?** (Is there a simulator?)\n", + "8. **What does a strong classical baseline look like?** (What do I need to beat?)\n", + "\n", + "These aren't ML questions. They're the questions any careful researcher would write down before touching a model. The rest of this notebook walks through each one on a real problem — the `beams2d` topology-optimisation problem — and shows where the answer lives.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Using current environment. Set FORCE_INSTALL=True to install here.\n" + ] + } + ], + "source": [ + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # flip to True to force install locally\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"ipywidgets\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/opt/anaconda3/envs/EngiBench312/lib/python3.12/site-packages/gymnasium/spaces/box.py:235: UserWarning: \u001b[33mWARN: Box low's precision lowered by casting to float32, current low.dtype=float64\u001b[0m\n", + " gym.logger.warn(\n", + "/opt/anaconda3/envs/EngiBench312/lib/python3.12/site-packages/gymnasium/spaces/box.py:305: UserWarning: \u001b[33mWARN: Box high's precision lowered by casting to float32, current high.dtype=float64\u001b[0m\n", + " gym.logger.warn(\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "SEED = 7\n", + "np.random.seed(SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 1 — *What problem am I actually working on?*\n", + "\n", + "The first thing a careful researcher does is give their problem **a name and a version**. Not a folder on their laptop — a *shared, citable identity* that another lab can load with a single line of code and know they have the same thing you did.\n", + "\n", + "Without this, \"our method beats theirs on topology optimisation\" means nothing: *which* topology-optimisation problem? *Which* mesh resolution? *Which* material model?\n", + "\n", + "So the very first thing we need is **a reproducible handle on the problem itself**.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded: Beams2D\n" + ] + } + ], + "source": [ + "# Every problem in EngiBench has a string ID. For this workshop we pick beams2d:\n", + "# minimise the compliance (flex) of a 2D beam under a prescribed load, subject\n", + "# to a material-budget constraint.\n", + "problem = BUILTIN_PROBLEMS[\"beams2d\"](seed=SEED)\n", + "print(f\"Loaded: {type(problem).__name__}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That one line pins down a precise version of the problem — the same geometry, the same material model, the same discretisation — for you and for anyone who runs your code later.\n", + "\n", + "Every question we ask from here on will go to this `problem` object.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 2 — *What am I allowed to design?*\n", + "\n", + "Before choosing any method, you have to know the **shape of the thing you're producing**. An ML model that outputs a 3D mesh is not interchangeable with one that outputs a 2D image, and *you* need to know which you're dealing with before you sketch an architecture.\n", + "\n", + "This is called the **design space**: the set of all possible designs. Formally, it's just a mathematical description of the output — its dimensionality, its value range, whether it's continuous or discrete.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Design space: Box(0.0, 1.0, (50, 100), float64)\n", + "Design shape: (50, 100)\n" + ] + } + ], + "source": [ + "print(\"Design space:\", problem.design_space)\n", + "print(\"Design shape:\", problem.design_space.shape)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Reading that output.** A design here is a **50 × 100 array of floats in [0, 1]**. You can picture it as a greyscale image, where each pixel is the *density of material* at that location: 0 means empty, 1 means fully solid.\n", + "\n", + "Concretely, anything you build — a hand-drawn design, a classical optimiser's result, a neural network's output — has to live in that same 50×100 box. That's the contract.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 3 — *Under what conditions must my design work?*\n", + "\n", + "A 2D bracket isn't just \"a bracket\". It's a bracket **under a specific load, at a specific location, with a specific material budget**. Change any of those and the right design changes with them.\n", + "\n", + "The scenario you evaluate under is called the **operating condition** (or just *condition*). Two things follow from this:\n", + "\n", + "- Your method has to accept these conditions as input. There is no meaningful \"unconditional\" answer.\n", + "- Every reported result has to say *which* conditions it was evaluated at, or comparisons across papers are meaningless.\n", + "\n", + "Let's see which conditions `beams2d` needs.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Condition keys: ['volfrac', 'rmin', 'forcedist', 'overhang_constraint']\n" + ] + } + ], + "source": [ + "print(\"Condition keys:\", problem.conditions_keys)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For `beams2d` these are:\n", + "\n", + "- `volfrac` — the fraction of the design area allowed to be solid material (your budget).\n", + "- `forcedist` — where the load is applied along the structure.\n", + "- `rmin` — the filter radius used by the solver; effectively the smallest feature size the result is allowed to resolve.\n", + "- `overhang_constraint` — whether the design must be manufacturable by additive processes without support overhangs.\n", + "\n", + "Every design in this benchmark is evaluated **relative to a specific setting of these conditions**. The condition is the *problem instance*; the design is your *answer* to that instance.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 4 — *What does \"better\" actually mean?*\n", + "\n", + "If you cannot write down a scalar you'd like to push up or down, you cannot compare two designs, and you cannot run a benchmark.\n", + "\n", + "So: the next thing we need is **the objective** — the measure of quality the benchmark agrees to judge designs by. Papers are welcome to *argue* about whether this is the right measure for their application, but inside one benchmark it has to be fixed.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Objectives: (('c', ),)\n" + ] + } + ], + "source": [ + "print(\"Objectives:\", problem.objectives)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For `beams2d` the objective is **compliance** (how much the structure deflects under load) and we want to **minimise** it — stiffer is better.\n", + "\n", + "Notice that *compliance* is a physics quantity. It's not MSE between pixels, and it's not a neural-network loss. That's the whole point: to compare engineering methods you need an engineering measure.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 5 — *Do I have reference designs to learn from or compare against?*\n", + "\n", + "As a researcher, you want two things from prior work:\n", + "\n", + "- **Training material** if you're going to fit a model — you need examples of *good* designs under varied conditions.\n", + "- **Comparison points** even if you're not — \"is my method beating what's already out there?\" is the second question every reviewer asks.\n", + "\n", + "A benchmark therefore ships with a **dataset** of known-good designs, each paired with the condition it was optimised for. Each row is essentially \"scenario → a design that solves it\".\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 3880\n", + " })\n", + " val: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 728\n", + " })\n", + " test: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 243\n", + " })\n", + "})\n" + ] + } + ], + "source": [ + "dataset = problem.dataset\n", + "print(dataset)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Design shape: (50, 100)\n", + "Scalar conditions for this sample:\n", + " volfrac = 0.188\n", + " rmin = 3.500\n", + " forcedist = 0.050\n", + " overhang_constraint = 0.000\n" + ] + } + ], + "source": [ + "# One concrete sample: the design + the scenario it was optimised for.\n", + "sample_idx = 50\n", + "design = np.array(dataset[\"train\"][\"optimal_design\"][sample_idx])\n", + "config = {k: np.asarray(dataset[\"train\"][k][sample_idx]) for k in problem.conditions_keys}\n", + "\n", + "print(\"Design shape:\", design.shape)\n", + "print(\"Scalar conditions for this sample:\")\n", + "for k, v in config.items():\n", + " if np.asarray(v).ndim == 0:\n", + " print(f\" {k} = {float(v):.3f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Read the pairing, not the design.** The important thing above isn't the specific numbers — it's that **each dataset entry is a (design, scenario) pair**. That's what a benchmark dataset has to be. A pile of designs without their scenarios would be useless: you couldn't say what any of them was solving.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get a feel for the pairing\n", + "\n", + "Staring at one row tells you the *format*. Staring at many rows tells you the *structure* — how the design changes as the condition changes.\n", + "\n", + "Drag the sliders below to filter the dataset by condition range. Narrow `volfrac` and the surviving designs get thinner; slide `forcedist` and the load — and the truss pattern solving it — moves. This is what it feels like to have a conditional benchmark: the answer genuinely depends on the scenario.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e3084f2bf2dc47d4bb12713d400931fd", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HTML(value='

Explore the dataset — drag sliders to filter by condition

')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "55fd658a598e4cafb2f53e397f16b722", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(FloatRangeSlider(value=(0.15, 0.4), continuous_update=False, description='volfrac', layout=Layo…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ae112764ff0940a6afcd74428c58f755", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from engiopt.workshops.dcc26.notebook_helpers import interactive_condition_explorer\n", + "\n", + "interactive_condition_explorer(dataset, problem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 6 — *Can I actually look at a design?*\n", + "\n", + "Numbers only get you so far. At some point, as an engineer, you want to **see** a candidate — does it look plausible? Is the material spread sensibly? Are supports where you'd expect?\n", + "\n", + "A benchmark should give you a canonical way to *visualise* a design, so that discussion across papers is about the same picture.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmkAAAF5CAYAAADET73UAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAgX9JREFUeJztnQeYFFW2x293T845A0MekCgIIgqKLhiWRXdVBBRMqKCuirCKCTEQDAgqgrqCYVXMPgOiiPrMuoKCIDMwDMwwOec8Xe8719dDd5/bM909qbvn//u+gulT6VbVrapT997/OTpN0zQBAAAAAABcCn1PFwAAAAAAAHDgpAEAAAAAuCBw0gAAAAAAXBA4aQAAAAAALgicNAAAAAAAFwROGgAAAACACwInDQAAAADABYGTBgAAAADggsBJAwAAAABwQeCk9XKSk5PFX//6154uhsvy4osvCp1OJ44dO9Yt+3v00UfFgAEDhMFgEGPGjOmWfbpjnb3yyiu7ZNtfffWVvN70f0/xyCOPiJSUFGE0GoU70933jjPs2LFDBAUFiaKiop4uCgBK4KR5CM8884x8IE6cOLGni+Iy1NbWio0bN4rp06eL+Ph4ERwcLMaOHSs2bdokWlpahKvx2WefiX/9619i8uTJYuvWrWLVqlU9XSTQzVRWVoq1a9eKO+64Q+j1eDybQ84ePeNU07Zt29jyBw8eFOeee650wiIiIsQVV1zBnDGaP2jQILF69epuPBIA7MfLgWWBC/Pqq6/KFoaff/5ZpKenywdPbycjI0PcfPPN4uyzzxZLliwRISEh4tNPPxWLFy8WP/74o3jppZeEK/HFF1/IF/MLL7wgfHx8ero4vZIpU6aIurq6Hjv/W7ZsEc3NzWLOnDk9sn93gM7N+eefb2GbNGmSxe/s7Gx5LUNDQ+XHTnV1tXjsscfE77//Lp+R5tf3+uuvF0uXLhUrV66UH3IAuBJw0jyAo0ePiu+//168++678oFDDtuKFStEbycuLk4+lE866aRWG52fq6++WrZU3XvvvV3qzNbU1IjAwEC7ly8sLBT+/v6d6iBQa2JAQECnbc/TISfZz8+vx/ZP9fJvf/tbj5bB1Tn55JPF5Zdf3uYy5JjR/bd7927Rt29faZswYYL4y1/+Irthr7vuutZl//GPf8iPubfeeks+GwBwJdCe7gGQUxYeHi4uuOACcfHFF8vfznS10RgoejkMHz5cOnzWlJeXi1tvvVX06dNH+Pr6SgeHumasx87QF+tpp50mIiMjpdMxbtw48fbbb7PtUTfFTTfdJB+OtE9alr6IybEinn32WbkPKtOZZ57JxrYcPnxYPmDJGaNlkpKSxGWXXSYqKirk/KioKAsHzcRFF13U2h1izoEDB8S0adNkOWhbDz30kN3jgmiMFHWrHDlyRH7l0xf5vHnz5Dzaxvr162VZqJyxsbHSWSwrK7M4F/SCpheLqQuHXiYm/vOf/8jzSGWjrhs6zuPHj1uUgc7RiBEj5IuJWhHIObvrrrvkvIaGBum40/mka0fXkLpWya66Ju+//77cFi1L5aaxO9bk5OSIa665RiQkJMjl+vfvLxYtWiQaGxsdrjMqNE2T14CuBR3LWWedJa+RCnv3Q91idB7p+lDL6siRI8WGDRvaHZNG3eY0VpDOP73sv/nmG3m+abJe98033xQPP/ywLDddb2rJpdZtez629u3bJ8455xw2r71yl5aWytYgslM9pGXOO+88sXfvXovtmJeRWo4SExPlNum5QfcN1Qc6jzExMXI7V111lc06Qs+ZoUOHymOksn399dfCHj755BNxxhlnyA8Y2jc9t2xdV1vQfWJez6x555135Fhbk4NG0HkdMmSIPHZz6FhHjRol/ud//sehMgDQLWjA7UlJSdGuueYa+ffXX3+t0WX9+eef7Vq3X79+2pAhQ7SwsDDtzjvv1NatW6eNHDlS0+v12meffda6XE1NjTZq1CgtMjJSu+uuu7TNmzdr8+fP13Q6nXbLLbdYbDMpKUlbvHix9vTTT8vtTZgwQZbpo48+sliObLTNPn36aGvWrJFTaGio1rdvX7nu8OHDtccff1y75557NB8fH+2ss85qXbehoUHr37+/lpCQoD300EPav//9b23lypXaKaecoh07dqzNY37uuefkvr///vtWW15enhYdHa2Fh4dr999/v/boo49qgwcPluWjZY8ePdrmNhcsWKD5+vpqAwcOlH/T+Xn55ZflvGuvvVbz8vLSFi5cKO133HGHFhgYKMva2Ngol3nllVe0M844Q26D/qbpyJEjch4dH53n2bNna88884w8zqioKC05OVkrKytrLcPUqVO1uLg4eRw333yz9uyzz2rvv/++1tLSok2fPl0LCAjQbr31Vmm/6aabZJlmzZrFrsno0aO1+Ph47cEHH9TWr1+vDRgwQK5bXFzculxOTo4896Zt0nHde++92rBhw1rL5EidUUHXncpz/vnny/pw9dVXy33SsdM5NmHvfqg+0/bOPvtsbePGjXKi83DJJZe0LvPll1/KZeh/E3TOyUbX58knn9SWLFmiRUREyGtN59x63bFjx2rjxo3TnnjiCVmX6BzRPdAe//nPf+T6+/bts7DbU+7//ve/sjx0D9P1feCBB7TExER5P9G1si7jmDFjtEmTJsnj+ec//ynP1WWXXabNnTtXO++88+Q+rrjiCrks1TdzyDZixAh5HWg/a9eulc8Rf39/7ffff29dbuvWrezeoXuC9nXuuedqTz31lFyX6jE9f9q7x2g+bS8oKEj+T9sZP3689umnn1osl52dLefTtq25/PLL5bWzhu5ROh4AXA04aW7OL7/8Ih9IO3fulL+NRqN0kux5CRL0cKX133nnnVZbRUWFfEnTy8YEvbDJsTh06JDF+vRSMBgMWlZWVquttrbWYhlyROihPm3aNAs77ZecEvOHM71gyE7ORmVlZat9+fLlFg/8X3/9Vf5+6623NEcg546cP3LwmpqaWu3kaND2fvrpp1ZbYWGhfMnZ66TRcnQ+zPnmm2+k/dVXX7Ww79ixg9lpG3SOzSGHk87vww8/bGGnlyE5WeZ2chhom+SkmEMOHzndVBZzaDla/rvvvmu10W9yiNPT01tte/fulXZ6qZogJ4i2Sc6BNVQHHa0z1tC5p3JccMEFrdsjyAmjspg7afbuh+6JkJAQrbm52eZ+rZ00qi/k/JFDbV5fXnzxRbmcykkjR5XWM7FhwwZpN3dg2nJKq6qqLOz2lLu+vl464+ZQnaX7ixwp6zLS/Wj6QCDmzJkjnR5y0MwhR46eEebQ+jTRs8dEZmam5ufnp1100UU2nTQ6LnLG6GPFnPz8fHmfWdutoX3Qx8amTZu0Dz74QH5A0Acd1UPzD0Cqk7Rf00eSOcuWLZPz6HyZs2rVKmkvKChoswwAdDfo7nRzqMuBus+oK8jUFTF79mzZPWKvgpG6q0xdgAR1lcyfP1/8+uuvIj8/X9qoS5K6KKhbtbi4uHWiLgTaj3lXB3UJmaAuPepGoXX37NnD9k1dQSR4MGFSp1I3pvkgXpOdxAAEDQgmSAhA467shbpp/vjjD/H0008LL68TQzK3b98uTj31VNmVZSI6Orq1y9JeqLvPHDpvVFYaC2N+3qh7iLqTvvzyyza3R93O1GV36aWXWqxPXbyDBw9m61NXH3VRWZdh2LBhMqyD+Taoa5ew3gZd04EDB7b+pq4gqhOmc0/loe7QmTNnivHjx7MyUx10tM5Y8/nnn8vuLBorZNoeQV1x1ti7n7CwMNlNtnPnTmEvv/zyiygpKRELFy60qC9UL2h/Kuj8m48rpLIRpvNnC9oP7YPqhTn2lJuuu0kNSsdM26LtUHek6r6j+9vb29vi/iL/y3pMFtmpW53EDObQsASqwyaoW3HWrFnyfrT13KHyU7c0Dfw3v04Ubob20969QPug7d9www2y7t1yyy3yGUX36e233966HAk/TOfEGtNYP9MyJkzXksoDgCsB4YAbQw9DcsbIQaPxLCbogff444+LXbt2yfAT7UHjd8xfhASN3SBoHBg5BDT+i8bL0APR1qB3Ex999JEcS/Tbb79ZjGex3gdhPmbE3PmisUUqu2kcF41/IsXmunXrpKNKL0IacE0Dik3LqmKQPf/88+LBBx9k6rDMzExl+BJ6ydkLvWBpHJI5dN7ISaVxL+2dNxW0Pr08ySFTYf6iJWiMkbXwgLZB4+/suXaqa2J6iZnOPYUxoFARNGatvbLbW2esoetBWB83bcvaObJ3P6TqpfFINFaLzhPdG+T8UhiG9sphLTCha23+cdHW+TOV13wMoiPYU25ynGmMGoXioWeBuaNEY0M7ct/RtqkOm29HVR/pmUEfTFQ/6JlhDV0nwvRxYA19CDgKjc8kp3jNmjVS0Un3n+kj0XosHVFfX88+JIk/GwjVzygAehI4aW4MhWzIy8uTjpoqThA5L/Y4afZAD2pqDaLB5ipMTh0NqCZniQau0wuD4pORI0GD4l977TW2Hn1Fq7BlNz1MCXJEacA+Dfgl4cM///lPGe+IwmtYO0s0CJ9iT9FX+D333CO6AvPWDPPzRg6aLTGHLcfCfH16cdBga9U5sW51sX75mLZBA8rJoVVh/WK259x3Zp3pKPbuh64DfThQawydT5qoXlKrUmeGY3H2/JETRC1WVVVVFq3I9pSb1IykVqaWMPoIIeeF6iK1PKpEGh2575zFVI5XXnlF6cSZt1Q6gqn+kniC7nt65hD0bLSGbHRurFvZTA40iY0AcCXgpLkx9OKnBzgpz1TdZO+9957YvHmz8sVtDinP6CFs/hV56NAh+b+ptYC6vyjWkEp5Zq2qoi4FeqGYPwjppdIVkPNBEzleFIaEAsHSMVNLngly4q699lrx97//XXmuiH79+rV+6ZuTlpbWofLReaOuOypXe9fB1vp0bajl0FmnhrZBKj/qWu6MlgJyLKnVY//+/e3u1546Y+t6EHRNSFVpglpprFukHNkPtTJSVxlN5DRQKxWpiG2FYzGVg+4R05ACgpwpamWmruDOgrqjCWoJs95ue+Um9TSVj2LsmUPdi13heKjuFXpmkArX1oeHqQudnlnO1AlbmLqRTful1kb6m7qqraEYaapMHnTO6Ty199EEQHeDMWluCo2pIEeMZOYkn7eeaOwVfZF/8MEH7W4rNzdXOnQmqCvr5Zdflg8z0xcvda/88MMP0vmyhl4EpjEr9CVOjoB5dwu9zGgMU2dCZbQeJ0POGrUemHdz0HgkCldBLXvk1NqK4k7dn9QCRw9xc4fAmXAm5tB5o3NBrRvWUPnp3LUFOZZ0TilcgnVrBv2msUf2lIHCZVBXr6oe0XgnR6BzeOGFF4oPP/xQ+SI0ldPeOqOCXuLUAvvUU09ZHDeFMlEdnz37sT5XdBwmZ0jVNUbQmDtq4aJzZ15eqhfOdl/awhSQ1fqc2lNuqiPW9YPG6tF17wrofJuPdaNxa/QxRC33tlrjZsyYIZ17avVrampi89tLzaSaT8dHAYDpfJha0ExjWmnYhXmYGhr+QY7kJZdcwrZDYWusA+IC4AqgJc1NIeeLnDDqWlRBg+Dpq5BeJiQkaAtqoaF4V//973+lCIEeegUFBRatX8uWLZP7JKeQuhhp0DC93CmmGX3FkyNGX6IU84i61Wi8zNy5c+V4IGq9oq99GjfUmV295IjSA5fKTy9Q6kahFwQ9oE3jiej8kNNIjiu9tMyhB7vpZUddZbQ+lZsGJFMMp+eee062pHSk3FOnTpUx0agblrqs6CVGzge1RFB5aBwRlc0W1PpArYLLly+X55icI+oKoy9/cqwpKCfFx2oLSodDY5qoq5cGZ1OrHjmOqamp0k7OjUoA0Bb0oqUuZjo+KgMJE6griY7p22+/lYPd7a0zKqju0nHReaP1yYmmQeLU1We9jr37odZU6hKjMVHULUb1g5xA+hih8qugFqz7779fChhoPXIIaXvUfU7XpjPHMFGLIY3zo5ZX8wH89pSbjv2BBx6Q47MoRiEdO9375q2QnQmVk5wuGmJALeY0tIGgjwlbkINGKdmoPlJAWvp4ouuclZUlPv74Y1kvSdBjC7pHKQ4htQiT2ImuA7Um0rU2jxlHUHxAqovUukj3M7W00phU+pCzFtbQM4ru8RtvvLHD5wWATqfb9aSgU5g5c6aUvFOMKFtceeWVmre3t0V8K2tIXk9hDijWEMWaIsk+xV1ThbYgCT2Fwhg0aJAMj0BxhU477TTtscces5Dzv/DCCzLGmGlbJMVfsWKFlLibQ79vvPFGZSwkilNmjil0gKlcGRkZMm4WxYai80CxjyiO2ueff87WsTVRmcyh+FQUUoG2RzGmKLQDHYu9ITisw2dYx2aj2FkUSyo4OFjGovvXv/6l5ebm2rUNCpFy+umny/k00Xmlc5eWlta6DJX9pJNOUq5P14fiRtF8ui4UD47KQzGwKORKW9fEVE/Mw16YQiJQKA6Ky0bbpHhqtK55+Al764wKCilB5aNwMHTezjzzTG3//v3Kstizn7fffluGcIiJiZHLUPiG66+/XsbIaytOGkHxxGi/dJwU84zCltD5o3hf1uta3zumOk33QXtQXEGKA2YexsaeclNIidtvv731XE2ePFn74YcfZJ1QhQmxLqMpXIZ1SBXTfVtUVMTqCMV1M93nFK7H+pyp4qSZyjBjxgwZdoPuNbqH6VllHtJDxWuvvaZNmTJF1jcKP0PXmEJ+7N69W7k81RVTfEAK/TFv3jwZ7sMaCulBy5iH/AHAVdDRP53v+gEAgOdC48KoFYi6o1XdyM5CKkpq/XrkkUdk67YrQq2H1OrUVquXOzF27FiZOeKJJ57o6aIAwMCYNAAAaAMK22D9LUtjNqkL0jwtVGdAIS+oW4+65uxNSQach9Kd0dADGk4AgCuCljQAAGgDynd52223yfGPJCKgAfOkoqTxYDTg3DounafjaS1pALgyEA4AAEAbUBgaisX15JNPytYzirNFMcoogGpvc9AAAN0LWtIAAAAAANqAwjnRMARqPSclO6nrSW3fXis8ZcY5cOCA/NCjeJ6kQHcEjEkDAAAAAGgDCvUyevRomwHRraEwSRSSisLAUPglyv5B4XRU8RzbAi1pAAAAAAAOjMtsryWN0hBS/D/zzCwUG5ACbJNgxV7QkgYAAACAXkdDQ4PMXmM+2co+4kxWDuv0ZxQAmuxuKRz4buw4ZtMZuA9pbDqRbqgtjC28gbCx3DINTWMpT02i9+IRxIMHBjBbUGwgs/kG+zGbTm9fRHK9F0+l4uXLL4/ei58TzciPtaWRp9xpUZw7YzOX+Rub+XItTUY798GXa663bzm9gZ8rLz9+XgKjLJOKE0GxIcwWEBdp8dsvPoYtY4jkufq0cB4FvzE0ltkqghOZrcB4IjWNiZzKE8myTeSX8uMqLObnqaioltnKiqqZraq0itnqa+uYrbmhkdmaFDZVHQAAABXffji1x/b9sfdQp9f9791zWJaMFStWyCwjHSU/P19m8DGHfpMjSOn47M3l7LCTVlxcLNMGkTdIhSAovyOlIqEBcUhQCwAAAIDuQOftfGo2io9HA/vNoTRnroRDThrldqTmuoCAANmMRzkTCcrzSPJ0kqTbkweQmhOtmxQbjUbhYyP5NQAAAACAPb1f9kIOWVc5ZdR4Rb6ROfSbctja24rmsJNGSYYpoOPmzZtZYmHSH1ACZ1qmvT5XSpps3cR4VWycuCY+wZHiAAAAAKAXo/N2zcadSZMmie3bt1vYdu7cKe2O4NDR7d27V0betnbQCLLRPJKa2tPESDnqzKcrYuMcKjgAAAAAejd6L53TkyNUV1dL/8bk41CIDfo7Kyur1a+hINcmqNEqIyNDpnlLTU0VzzzzjHjzzTeln+QIXo423/38888iJSVFOZ/mWQ+Us7eJEV2dAAAAAHBFfvnlFxnzzIRpLNuCBQvEiy++KAPcmhw2on///jIEBzllGzZsEElJSeLf//63HDLWZU7a0qVLxXXXXScj7p599tmtDhn1s+7atUs8//zz4rHHHhPOMPjck7hR5bgZ7VMZNlZxVVx1QaXF76p8rpJTEZ4czmwRg3nXrH88F03oVX3PiuPSeXlzm7fi8ui5KlAoQt1pzVy5qjVyFZ9oUSg+mxTrNvFzbFSpAhXrGu1UD6pC9qlabfU+/FzpFefKWjHbXMEVkC01vJ4YigqZzTckm9liIjKZLTySKz4TQrmtLJzXlaKEUGYrqOLK0IISbisp5YrU0hKu7qwsU9wXFTXMVlfJl2usr2e2FqUSmNcVDcnCAQAuJhxwhDPPPFP5jjJBjppqnV9//VV0BIecNEqqGxUVJZ544gnZdNfy/y94g8Egxo0bJwt56aWXdqhAAAAAAABdLRxwBxwOwTF79mw5NTU1yXAcBDlu3t68dQMAAAAAwN1b0noKp4PZklMWH88DdwIAAAAAdAd6tKQBAAAAALgeOkWmGk8CkkoAAAAAABfEZVrSgv46i9k0VTw2hRpR18RVZ7qyP8fLmROWkWHxu/zwcbZMcz1XJ4Ym8xhuQWNGMVtj8nBmqwrgKr4WPT/tzXofu5Zr0RQqRsHPiUFh8zJylaXByJV4BqMip6lquRbF9pp5clovxXK6Fr49nWIfOpWaRlUHFPvVVVuqebXKCraMsYYrG1UY67jaUVfCVaDeVvskIv2OMVtEIFdy9g2OYLbaCJ5vtDSGh7kpbuDbK6zmKtDicr5cSRk/n6Wl/J4qL+HnoKqcn79apTK0wa6coS0qdTDyiAIAbKDK+exJuIyTBgAAAADgCDo9nDQAAAAAAJdDZ/DsUVtw0gAAAADglujR3QkAAAAA4HroPLy707PbCQEAAAAA3BSXaUn7wp+rO/VClcuRr+vvzVWBcf1KmC0pcY/F76iQn9gy9QVFfPuJXN1Z338ksx0OOJnZjpaGMFtZFfeNVQK25mZFTk6F2FF1TgyKFJ9eimZhL0UNUHXxq2xeBs1Om2JdL76cXs9tBh23+XjxkxXqw/NUxujzLX6HVWbxclSXMZuuhSsMVaiUxqKRqyJFk0LdWsCVxT55vHw+igsUFshVm8nBPL9sQwhXhlYNUKhFBc/7WVTL621BJV+uuEyRM7SsqcvziKqUoc1WylDkEAXA89GjuxMAAAAAwPXQwUmz5ODBg+LHH38UkyZNEikpKSI1NVVs2LBBNDQ0iMsvv1xMmzat3W3QsjSZ09RoEN4+vo4WBwAAAAC9FJ3es0dtOXR0O3bsEGPGjBFLly4VY8eOlb+nTJki0tPTRWZmppg+fbr44osv2t3O6tWrRWhoqMX01ta1HTkOAAAAAPRC4YDOycnjnLQHHnhALFu2TJSUlIitW7eKuXPnioULF4qdO3eKXbt2yXlr1qxpdzvLly8XFRUVFtMlV93RkeMAAAAAQC8ck6Z3cvI4J+3AgQPiyiuvlH9feumloqqqSlx88cWt8+fNmyf27dvX7nZ8fX1FSEiIxYSuTgAAAACADoxJ0/2/lFCv1ws/Pz/ZVWkiODhYtoo5w8e7qpnN3tbIwCDu4PVPTmK2CQMCLH4PG8FVaP5+B3g5/PyYrdnbn9mKawOZLe0YV5NlHeWKwmpFDsSWFr6uZlQoXhUnynSdzDEoZJbq5fR2Lafar0EhA9WrbIp1lcsp6oBfgDezJSSEMdvAREuFYlIIV+n6h3I1ZpMiP2qTke/TS8/Vg8H6KmYLrec5PgMqcpjNUM7zzYoavj2tnCuXdaV8XX9DBrf58ByxMUFcyTk4JJLZ6sJ5ztCKRG4rbeI5SAtqgpitUJFHtKiU1/nSUp73s6KMX7eqcksVaI1KKVqjyCta12BXvlCjQs0LtSgAPYvOTbotu6UlLTk5WRw+fLj19w8//CD69u3b+jsrK0vEx8d3bgkBAAAAAGwIB5ydPK4lbdGiRaLF7GtyxIgRFvM/+eQTu9SdAAAAAAAdRefhLWkOOWk33HBDm/NXrVrV0fIAAAAAANiFuwgAnAXBbAEAAADglug8vCXNPTplAQAAAAB6GS7TknZ4T7pdy2kaV1P5BnClZUV5Il/Ox1J1FtFvNFsmvrqc71ShpvOvzGO26EiuUA0M4KrDpkauCizJ4/toqKmz6/h1On2PfHHYO/BSpeS0ex+KYwsM47krGxu5YMXP11K1qNdxFWOLkdsKy7kKtqSMK/taFErbkCCuiowOT2a22FCuKIyN4XUvooUrQwNruM27iqs7daq6XMsVj1oJ355eYQvy4vdokC9XPicq1KIpITzHZ228QhnajytwSxr4PVRYrVCGllvmLy0u5destJSrQstLuOKzSqG2rlXkEK2vti+vaItVXlEAQOegcxMBgNs7aQAAAAAAjqDz8O5OOGkAAAAAcEt0cNIAAAAAAFwPHZw0AAAAAADXQ+fhY9I8++gAAAAAANwUl2lJq6/maip7UeXeyz3KDy0twlIFmhR5IqWVifDYgczmV13JbIb848w2KGAvs5Umn8ZsJSVcrVZeWGHXOVHlFBRCZfPcryS/IMscrISPD1dk+lqlqVSIMUV5DV/vaCZXAGYe5mrHumq+nI8/z40ZHMZzuoZF8mOIjIzmtnCudowO4wrfqCiuBI5M5PU2ormA2QJruM27UpFHtEqhFq1XKJCr+H698rKZLdSb36Oh/vxcJYfwXKCNIfxc1fSLsfhdMoCrRwtqFUrRSq48LSzltqJirtosLuT3aFkRP/6aimq7lKGqZxnygwJgGwSzBQAAAABwQXQYkwYAAAAA4HroMCbtBHv27BFHjx5t/f3KK6+IyZMniz59+ojTTz9dbNu2za7tNDQ0iMrKSovJ2MK7EgAAAAAA2mpJc3byOCftqquuEkeOHJF///vf/xbXX3+9GD9+vLj77rvFKaecIhYuXCi2bNnS7nZWr14tQkNDLabs9FedPwoAAAAA9Dp0Hu6kOdTdefjwYTF48GD59zPPPCM2bNggHTMT5Kg9/PDD4uqrr25zO8uXLxdLliyxsJ172U+OlRwAAAAAwINxyEkLCAgQxcXFol+/fiInJ0dMmDDBYv7EiRMtukNt4evrKydz9AauigMAAAAA6K1j0hxy0s477zyxadMm2dU5depU8fbbb4vRo08kKX/zzTfFoEGDRHejkqhXl3EZ/PEMyyTm+6IS2DLRI0cw29BoHn5Bl8WTTftl7me2kUO55L9kKE/sXl5qGT6AqKvhEv2aUh6qw5PRG3iIjMBQHqYhIoIn+g4LsqwXvl68njQ28e1XV/EwCBXF/LxXl1XYlRC+yIvvw8ff8iOFCAjmxxUQzEN1BIdxW2i44vjDeJ2KjuSJ6GNCeQiX6DgeWiKyLw/BEd6Qz8tckctshgpVSA9+/jRFuBtRUcZMPjr+MejrY/mhF6FI9D4gjIfWqAvj56Q8noc+KWjgYT9yKvj9nVtkmeidyMvn4VoKcivtCsVTVcrPe0MND30CQG9E5ybdlt3ipK1du1YKBchBo7Fojz/+uPjqq6/EsGHDRFpamvjxxx/Fe++913WlBQAAAADoJS1pDh1dQkKC+PXXX8WkSZPEjh07hKZp4ueffxafffaZSEpKEt999504//zzu660AAAAAAAmdDrnJ0+MkxYWFibWrFkjJwAAAACAnkLn4d2dnt1OCAAAAADgpiDjAAAAAADcEp2Hj0nzWCetqY6rqYpziix+Hz6kSHIdztVf4X3GMFtcNVdcaaVcwRaW+zuzjerLEz+XDOdqsppqrjrLbmzu1OT0ro63dZZ0UjeGcxVkaAi/UYN8myx+G428Wbyunmddr1WoO1WJr+1Ndt/SZFkOW/WzrpIn4S5TqFu9vL3tOk9+Qf52KWNDw/l9EBbB1w0PS2K2mMi+zBYfxs9VbBRXLUZqqmTvlvco4VPJ1dX6Kq74FLU17d6PeoUtyCeD2/z48ScqEr2fpFCGFp3Un9my+3Ol7dEi/hw4coyrRXOyQvk+svl5qi6ttKvuAeBJ6Dy8u9NjnTQAAAAAeDY6tKQBAAAAALgeOrSkAQAAAAC4HjoPd9I8u50QAAAAAMBNQUsaAAAAANwTvWe3NfUqJ81aPZd7lKvLDoTyHIiRwQOZLahvCbc17eY7LeNqsr7BvzLbuP6nM1tVNVeaNtZztVbBsVyPze3nG8ivR2g4V94FBfAmb4POUrlZ2cCre1k5V8tWlnKVZWM9V2N2Niq1qMrW3NBol8K3poKr/cryuVq0QKEMVeUW9Q/kKtAghdI2PIrboqIimS0ynCseo8J4ftXoKH7uoxIVatFmyzyiATVcFepdqcohWm5XDlGtmG/PR3+I2foE8udAYkwisw1NHM5sh2N57uODiVwFmnaYn+PsI1zxWV5Yymz11bV25T8GwB3QuUnmgG5z0vLy8mSS9W+//Vb+rdfrxYABA8SFF14orrzySmFQhAwAAAAAAOhsdB7ekubQ0f3yyy8ymfr27dtFU1OTOHz4sBg3bpwIDAwUS5cuFVOmTBFVVVXtbqehoUFUVlZaTMYW3joAAAAAANCWcMDZyRk2btwokpOThZ+fn5g4caLMX94W69evF0OHDhX+/v6iT58+4rbbbhP1DvTMOOSk3XrrrXIH5Kx988034sUXXxSHDh0S27ZtExkZGaK2tlbcc8897W5n9erVIjQ01GLKTn/VkaIAAAAAoLej1zs/Ocgbb7whlixZIlasWCH27NkjRo8eLWbMmCEKC/lQCOK1114Td955p1z+4MGD4oUXXpDbuOuuu+w/PEcKSIW64oorWn/PnTtX2goKCkR4eLh45JFHxNtvv93udpYvXy4qKiospqRB8xwpCgAAAABAt7Fu3TqxcOFCcdVVV4nhw4eLzZs3i4CAALFlyxbl8t9//72YPHmy9JWo9W369Olizpw57ba+Oe2kxcTEyHFoJsg5a25uFiEhIfL34MGDRWkpH6hqja+vr1zHfNIb+OBlAAAAAICu6O5UDb0im4rGxkaxe/ducc4557TaaEw+/f7hhx+U65x22mlyHZNTRj2ONFzs/PPP7xrhAIkDbrjhBvHoo49KR+vBBx8UU6dOlX2tRFpamkhM5ComV8FawVRVwlVdmYe4mvDXYJ6zMGQMz+c5pi9XVPpl/cFs3rk8V+DQ/sHMVj10ArPV1HJFXH2tIk/pcUulm+1ck66DQZGTMjA0iNlCQ7nyMNCPq9NaNMsxBxU1/JukVKHkrKmodrtz19lqUVUe0Sovfr+U5PFrlmenMjQghNuCw7gtNEyRRzSC3wfRkZZ5NGNC+bFGx3EVbLRCqR1Tkc5sPnlHmK35eCazNeRx1bjh+HFmC487ymyn9BvCbImDTma2uAieM/VAFLdlHOb3T0EWV4GqnoXI+wncAZ3OeeEADb1auXKlhY26Ju+//362bHFxsWhpaRGxsZZqa/qdmpqq3D61oNF6p59+utA0TTZqkQ/VZd2dDz30kGzimzlzpjj77LOlx2nezEdSWDpoAAAAAIAuR69zelINvSJbZ/HVV1+JVatWiWeeeUYODXv33XfFxx9/LBu4uqQlLSgoSA56I2UCeYT02xzqbwUAAAAAcPUQHL6+vnKyh6ioKBlijIZ5mUO/4+LilOvce++9chz/tddeK3+PHDlS1NTUiOuuu07cfffdsru0PZw6OpKeWjtoAAAAAACeGILDx8dHhhzbtWtXq81oNMrfkyZNUq5DES+sHTFTLFnq/rSHXpVxAAAAAADAGSj8xoIFC8T48ePFhAkTZAw0ahkjtScxf/58OS7fNOyLhoaRInTs2LEyplp6erpsXSO7vYH/4aQBAAAAwD3RdV/GgdmzZ4uioiJx3333ifz8fDFmzBixY8eOVjFBVlaWRcsZxY2lsfr0f05OjoiOjpYO2sMPP2z3PnWavW1uXczpM/9XuAK+gVxJFj+QqzvHjON90GeexPMJDivlx2U4zpVjwo/vt3zIacz2ddloZvvmJ67ES//tGLNVlZQJV8Y/hHeh9xvej9lGj+F5IPvHK1SLLZbN2YeyePP2/t+4Eu94WhazNdV1fe7O3jZuxODNvxENXvzr0sePK679gvj9Ehhqmc8yNJwrRcMi+HpxsTz8z9AkrmwcEsjvqdhsRb7ebK7abC7j956mUE96hf4ZzsgcfSJXbdYkKfJ+ep3EbL9nhzJb6iGucD2uyPtZmltsV45YAL79cGqP7bty3a1OrxuyZL1wddCSBgAAAAD3RO/ZuTvhpAEAAADALdHpnMvB6S54tgsKAAAAAOCmoCUNAAAAAO6J3rPbmpw+uuzsbFFdzQesNzU1ia+//rqj5QIAAAAAcIk4aW7TkkYJ1mfNmiWThlJfMOWmopQHpuC2lGD9rLPOkjmu3JGGGp5/s+DYiaTyJg768ZyFwUHRzBYykOfd61ulUHoV81ybYcf3MtuoAXwfJSlcfVpTwdWnzQo1mSpHY0+p/fyCVGo8RX7HQH5zeel57s7KWstrVFHBc1RWlfHjR87Crs+baytnaHODffdkTUUls5XlWypDC3y5atNHkVc0OonfU3X13CYGJTNTUx++vYgorkj2r+DPEH0FzxkqKlTPhkJmC6ypYraR8fwZEtOfq8Fjw/hx7A3nz5Cjh/i9V3Ccl6W6lF8L3EPAE0Nw9AQOH92dd94p44D89NNPMj7IH3/8IZ2yMjOJuYtE9QAAAACAJ6N3PnenR7akff755+K9996TEXeJ7777TlxyySVi2rRprekSPF1tAQAAAICeR4eWNEsoS3x4eHjrb0pOSpndk5OTZYtaYSFvDremoaFBVFZWWkzGFt71AQAAAADQW3HYSRswYIDYt2+fhc3Ly0u89dZbct5f//rXdrdBea1CQ0Mtpuz0Vx0tCgAAAAB6M3rP7u502Ek777zzxHPPPcfsJkeNclm1NyZt+fLlskXOfEoaNM/RogAAAACgF6PT652ePHJMGiUGra2tVW/My0u88847MpFoW1AXKU3m6A1ciQUAAAAAYBMPHwPvsJNGjlhICE8CbB6iY+XKlWLLli3CU1CFqcjP5Im5D4bx5M2RoVzaHpY0ktlC6hWObzmX6CcV/8ps45KDma2yOorZGuq5LD7/aI5dIQ86G1Vy7SCrBNlEeDgPcRDkz8M5aBq/UatqLW3lpfy4aqt4wmhjs3uGj+lNqK6RtU0V4qNJYQsK4/dPQ6MipEs9r7MF3vw+KzacGLNrQhfJk58HxPB4I/FNPIl7UMFhZhOF/L7VZ6QyW0I5T5Ie1ocnZ48eM4rZ9kbEMFuqIiROdjofh1xeWMpsSM4OugS9e7SIOUunHx3FSXvppZc6e7MAAAAAALwlzdnJE1vSPvjggzbnZ2RkdKQ8AAAAAADAGSftwgsvlHHQ2hIHIE4aAAAAALoaHbo7LYmPj5dx0YxGo3Las2dP15QUAAAAAMAcCmbr7OQGOFzKcePGybydtmivlQ0AAAAAoFPQe3acNIe7O5ctWyZqamyrdAYNGiS+/PJL4emokgpnHeYJjveF9GW2sLFc3TmuHz+n/sd+Zzav/ExmG+z3G7NVDZ3IbLW1XK3VUMsVZkXH87s8YbKPvx+zhUQEMVtYqGXSbCLAm5eloZl/b5RXWir0ykv4OW6sU2T0Bh6LbyBXYEfEhjJbTCR/NIYH1DNbi0JVnF3O1aIFilzqKhFxYgxP7D48iSdETw7hKm+f42nMppVw5WWAIjn7qCSuAo0aNJbZYsL7MNv+CP6MyzjMldqFWbwsNeW8LEjODhxB5yYtYt3mpJ1xxhltzg8MDBRTp07tSJkAAAAAANrHTVrEnMWzXVAAAAAAgN7SkgYAAAAA4BLoPLutCU4aAAAAANwTnWd3d3bYSSMl51dffSXS09NleI4ZM2YIb2/vzikdAAAAAIAtPDxOmsNO2vnnny9ef/11ERoaKlNA0e+ff/5ZREVFiZKSEjFkyBDx9ddfi+horlLyJFQKpApFvrojqVxNFhgYz2xBI8Yw20nxPGeodybPz+efuZ/ZRg3i+61IGc9slZVc8VlXw/OIVhaVMZtm5PkN7Q026B/EcwCGRfAyh3CRmPAy8BAvpTV8H2VllsrNmgqu7lTldwSegd6LK4ODw3je4aQ+XI3ZP5bXi0g/rkSsbuL1uKSCf9mnppYzW0UJv78Ph/MKn5vCn6WjB3Bx1vCUWGYLz9nHbFqBIu/n0YPM1qeSS1LDkkYwW+xYnpc0OpKXJTWcn6ucjCJmKyso7pF8wsBN0Xm2k+bw0e3YsUM0NPz58rvnnntEVVWVOHLkiCgsLBSZmZlS3Xnfffd1RVkBAAAAAE6AOGm2+eKLL8Qjjzwi+vfvL38nJSWJtWvXioULF7a5Hjl5JkfPhLGlUegNPh0pDgAAAACAx+BUO6EpN2dZWZkYOHAgC2abm5vb5vqrV6+W3aXmU3b6q84UBQAAAAC9FR3SQjGuvPJK8fe//100NTWJo0ePWszLz88XYWFhba6/fPlyUVFRYTElDZrnTFEAAAAA0FvR6ZyfPLG7c8GCBa1/z5o1S9TWWg4yf+edd8SYMXwQvDm+vr5yMgddnQAAAABwCL17tIh1m5O2devWNuevWLFCGAxcVdUbUCmQCrN4HsyDvvy0BwVyNVToEO7s9q/iiistL5vZgo/zvJ+jBnGVWEnKn+MJzamqiGO2pgauZq0t5/lLVRi8+fEGhnIVW1gYd9SD/HiCwxYj/wKqquG2inLLXIu1VTVOK1SB++Hjx/PDhsVwdWdsNH9eRfjxuu2j4/dAXROv2+UVvM4W53J1dHEOz2VZ4m/58UpUlXO1dUlJFLMVDh3NbKP68edK39C9zOadk85sWlEBswXXcEXq6AS+XPRg/uyKDk9itt/D+TU6ls5VoEXZXAVaXVZh8duoSoYKPB+de7SIOUunu6AUlmPx4sWdvVkAAAAAAEswJs1xJ+2ll17q7M0CAAAAAPQqHO7u/OCDD9qcn5GR0ZHyAAAAAADYB8akWXLhhRfKEByUDqq9EB0AAAAAAF2GzrP9DYddUMrP+e677wqj0aic9uzZ0zUlBQAAAADoRWPSHG5JGzdunNi9e7cMv6GivVa23kZdJVdDFRzjaqiDIVzlFBmawGzhiSOZLayG5xQU5VwFGl/wK7Od3DeU2SqrI5mtvpbnMsxXqKnqq7mC0sefH1uIIkdhWAhX2fl6qRR1fLkyhaLOOjdiY52l2hN4FtY5YgPDeE7O2HhuiwzmdcdHz+tdvZErL/MruC0v11J1SFSX8Xu0SVEfVbbGOsvsLERtFVd8lpdy9XbhUK7UHpXM41imKJZT5f0UhXnMZMg4wGx9FXk/w5N4js+48dy2N5qXJTVNlfezsN3cycj52QvQeXZLmsNO2rJly0RNDX8Rm2cc+PLLLztaLgAAAACAtsGYNEvOOOOMNudTgvWpU6d2pEwAAAAAAL2eDiVYBwAAAADoKTR0dwIAAAAAuCA6z+7udOroPvroI3HfffeJ7777Tv7+4osvxPnnny/OPfdc8dxzz3V2GQEAAAAAOFB3WvLss8+Km266SYwePVps2LBBbNy4UaaBmj17tszZeeutt4q6ujpxyy23dE2JPYCaisp2lUrE/gh/ZoscO5zZxvfj2/M/xnN3Ggp5js9BfjxkSvXg03iZa2KYrbGeKz5LcnguzIBgruSMiOJqreBArgr2MnBbTTW/ucrLuIqrykpR19LUzJYBnoOPVd7LsGiuXI6P4/lhIwO5UtIoeBdKYS1Xhmbnc2VoYQ7P01mrUHnbi0rxWZLDFeKq3LQVZfy+LSzi6u38zs77WcxzFgfXcoXr6ASekzNqCC9LTHhCu8/Ho4f5c6boeJFdz1/k/XRfNHR3WvLkk0+KZ555RixcuFCqOKkF7fHHH2/N13nqqaeKRx55BE4aAAAAALoWnXu0iDmLw0d39OhRMWPGDPn3WWedJVpaWsSUKVNa55955pkiMzOzzW00NDSIyspKi8nYwltlAAAAAAB6Kw47aZGRka1OWG5urmhubhZZWVmt82leREREm9tYvXq1CA0NtZiy0191pvwAAAAA6K1Qd6ezkyd2d1KmgWuuuUYsWLBAJlufP3++uP3224Ver5fZBijY7fTp09vcxvLly8WSJUssbOde9pPjpQcAAABA70Xv2d2dDjtpa9euFY2NjWLbtm3itNNOE0899ZQcp0bOW1NTkwxkSy1lbeHr6ysnc/QGPqAXAAAAAMAWEA4oMgpYh9lYunSpVHySkxYczBVQAAAAAACdjg4taXbh5+cnp+PHj4sVK1aILVu2dNamPQ6V3LuymMv2M1J5CI7AgHhuG3kys41M5CEpvLPSmM03m9tOGsCl7JUppzBbdRWX6GtGHjIjIJgfR1iYN7OFBtgng6/mERNEZQUPU1BvtaBm5OFBgGckUyf8g4Msfscm8BAcsRG8fgZ587rTbOSPxsIK3tqfm8vDOVQUlzNbSxNP2N7Zz5Ca0gq7krPXVNR0anL2YUP5cyAs53e7krN7H9nPbMkVPGxGROIIZosdN8zi974oXt60yEC7wh0hObv7onm4k9bpR1daWipeeumlzt4sAAAAAECvwuGWNBILtEVGRkZHygMAAAAAYB/dPCaNAvg/+uijIj8/Xwb1p3H5EyZMsLl8eXm5uPvuu8W7774rG7H69esn1q9fL2PMdomTduGFF0oVp6bxbgMTNB8AAAAAwFO6O9944w0ZmWLz5s1i4sSJ0tmiuLFpaWkiJkaRlaexUfzlL3+R895++22RmJgow5SFhfFhA7Zw+Oji4+OlR2g0GpXTnj08zRAAAAAAgDvHSVu3bp3MtnTVVVeJ4cOHS2ctICDA5hh8slPr2fvvvy8mT54skpOTZQQMaoHrMidt3LhxYvfu3Tbnt9fKBgAAAADQ0wnWGxTZj8imglrFyPc555xzWm0UH5Z+//DDDzaHh02aNEnceOONIjY2VowYMUKsWrVKZmqyF4e7OylYbU0NVweZGDRokMzpCRxDpSQqzORJilN9+CUL8OfqqqAUrvgcFMlVZ/pcnsIrKIsrs0YN4VkkyocPZTadPs6u7u/QYP594G3gCdAbmvlyVTVcpVlVziWfTQ1INeapGLz5fRAaZanmjIv3Y8tEB3Elp5eOPzCLGrgqsLCE17viXH5P1Vcp5Mc9RPckZx/DbKMVydn7hO6zLzl7EVeBhtRwFe3YBMvjiFaUIyacq+H3h/N6kXEogNmKsrkKtLacJ4mHatx946StXr1arFy50sJG0Snuv/9+tmxxcbF0rsjZMod+p6am2hyj/8UXX4h58+aJ7du3i/T0dJnnnMKV0X66xEk744wz2o2jRs15AAAAAACuynJF9iPrQPsdgYaA0Xg0ii1rMBhkT2ROTo4UHnSZkwYAAAAA4BLonBcOqLIf2SIqKko6WgUFli249Dsujvcgmcbwe3t7y/VMDBs2TCpDqfvUx6f9TEueHQUOAAAAAB6LJnROT45ADhW1hO3atcuipYx+07gzFSQWoC5OWs7EoUOHpPNmj4PW6U5aWVmZePnllztzkwAAAAAANkNwODs5CnWNPv/88zJg/8GDB8WiRYvkGH1SexLz58+XXagmaD6pO2+55RbpnH388cdSOEBCAnvp1O7OrKwsWVgqKAAAAABAl6Lrvg7B2bNni6KiInHffffJLssxY8aIHTt2tIoJyAcixaeJPn36iE8//VTcdtttYtSoUTJOGjlsd9xxR9c4aSRPbYuqKq58Ac5TX80VV/kKxefBQN6nHh7ClVlhCTw2S4xCNSUqeB7R6Jxfme3kxBBm8zLwvvnqOt6sHOjHw7Q0tfCbrbyGV9GKCi6RrlUo6owOyJyBe+EfxNWXUXGW9TGOCxFFsA9XUTdrJ8aLmCiq4vdUXj6vY5WllV2ep7OzsTfvZ7NCHa3K+1lRrlCBDuaqytH9w5ktpRPzfvavKmHLRCScxGzRJw/nyylUoKkhPOdw3jFF3s+iUrvOHXA9dacz3HTTTXJS8dVXXzEbdYX++OOPwlkcctIoSm5b2QQoPhqyDQAAAAAAdByHnLTg4GCZg4rSIag4fPiwuP7669vdDgWLsw4YZ2xpFHqDfQPpAAAAAAC0buzudHkn7eST/wyQaisOGrW02ZNtQBVArs/gBaLv0D8H3wEAAAAAtIuH99455ILOnTtX+PnxaM0mKFaIPQHaSP1QUVFhMSUNmudIUQAAAADQy9G6Ud3p8i1plFi0LUjhYI+Tpgogh65OAAAAADiC5mC8M3cDGQfcDFXuOJXiaH8ob/GMCB7MbJP6ciVn4JE9zKYv5uqqZJ+fmM07jucMzamNZraqBu6UV9VxlV1uIc+LV5hfbVfuQZWKDbgfBm9vZguO5MrihERLxWd0MFfYGRR5OisauVK0gAsFRUEOV0DWVtrOY+yJ+YRLFKrF+mq+XHlJFLMVKWz5QxR5P5O5WjQpZC+zeedmtKsADa3m6ttxfbgaM3IYV75HhMbY9Vw9dpgrgUtyCu06n6DjaG7SIuYsDh9dXV2d+Pbbb8Uff/zB5tXX1yOYLQAAAABAdztpFDGX8k5NmTJFjBw5UgoI8vJOfL3Q2DJT5F0AAAAAgC4XDuicnDzNSaMouSNGjBCFhYUiLS1NhuSg3FQUZRcAAAAAoDvRhN7pyePGpH3//ffi888/l9ngafrwww/F4sWLxRlnnCG+/PJLERjIx3YAAAAAAHhCxoHuRu/oeDQvrxN+HWUX2LRpk5g5c6bs+qTuUAAAAACA7kBDCI4TpKSkiF9++UWOSzPn6aeflv//7W9/69zSAYZmNNqVO06lOAoOTWK2kFFc1TSmD1es+Ry1zJNHGI6nM1tSE1d/+SRwBddR0YfZcksUufLyeFlK8rkiFcopz8UvOIDZouPDmC0+2vKhG+5Xa5dcv6iaK/by8nl+2Ipi+/JbejIqxXRVCb8fG+vr7cr7WV7KFZRFQxOYbVSyIu/nEMu8n6G5B3iBi3muY590rhQdHFPMbGGJ/LkVHsQV8kHB/Lma5sMVyQXH8uzKzwwcQ/PwEBwOuZIXXXSReP3115XzyFGbM2eOXRkHAAAAAABAJzpplClg+/btNuc/88wzwqho6QEAAAAA6Gw0dHcCAAAAALgeGoQD7TNt2jSRmZnZGZsCAAAAALB7TJqzk8e1pH3wwQdK+9dffy0++ugj0afPn4PBISAAAAAAQFejuUm3Zbc4aRdeeKEMu6ESB9x8883yf5rf0oKcid2JSmFWmsvVSof/4IrPwIA4ZgsaxvNvpsTxHHi6jIPclsnDsMQomqOr4kOZLd3A1Z11NfzYaip47s7eprLzVPRePH9rcJgiT2dSMLNFhzRZ/PbRW/4mqpt5HSsq4w/5wlxe32srquxSW4OO5f2sLOO5fouKI5mtYMhYi98jk/mzrE/gbmbzyuLPKC0jldliyouYbfJAntQ1YNR4ZtPpLZWnhFHxXsxXKD6b6rgyFvReHHJBZ8yYIc477zyRn58vBQKmyWAwiP3798u/4aABAAAAoDvQPLy70yEn7ZNPPhFnn322GD9+vOzedJaGhgZRWVlpMRlb0BICAAAAAPvRPFzd6XApb7vtNjk2jfJ4Xn/99aK2lgeMbI/Vq1eL0NBQiyk7/VWHtwMAAACA3ouGljTOmDFjZOYBGn9GfzsawJbirVVUVFhMSYPmOVMUAAAAAPRSNA9vSXM6Tpq/v7/YvHmzbFWj5OqUcN1efH195WSO3uDjbFEAAAAA0AvR3KRFrMeC2VK4DYTcAAAAAADoYSetrq5O7N69W0RERIjhw4dbzKuvrxdvvvmmmD9/fmeWETiBKnFv/jGebPigP2/BDAnicvfgZC4z71NdzmzGbB7UWJ+fxWzx4RnMFhfO9xseyZNrFwRyW30VHxvZ0sRDMADXxjeAh8iIVCVTj+GhOqIDeNgMa4preN3JK1CEsCngdbuxjiddB92TnL1akZy9rMwyOXvZ8ES2zIT+POzQEGMzs+kO/sbLkcGfUf5VvI6dPIofV9OwScxWX8fDctTX8jpVklNg17kDf4KMA2YcOnRIDBs2TEyZMkWMHDlSTJ06VeTlnYjzQmPLrrrqqq4oJwAAAACABZqmc3ryOCeNFJ0jRowQhYWFIi0tTQQHB4vJkyeLrCzeUgIAAAAA0JVoQu/05HHdnd9//734/PPPpUiApg8//FAsXrxYnHHGGVI8EBgY2HUlBQAAAADoRcIBvaPj0by8Tvh1FIJj06ZNYubMmbLrk7pDAQAAAAC6A83D46Q51JKWkpIi46PRuDRznn76afk/VJ4AAAAAAD3gpF100UXi9ddfF1dccQWbR44a5e6k2GnANamr5InJ845yJdH+YK6ICg3sw2xB/cYxW3gDV2ZplRV83YLDzDa8P1c/VaT0Z7bm5iRmO+bNq3JZQXG7SZ+BayVTDwq3L5l6TBhX6FknVK9v4fW4sILvMy+HKzlrkEzdtZKzZxcym2a0DKIeEODNlokOi2C28JgRzBZXyp+Dooo/LxtyuULe3+cnZhszmtfjsqFjmK2i3FKhaivpvEoFC/7EXVrEuqW7kzIFbN++3eb8Z555RjpqAAAAAABdjYbuTgAAAAAA10Nzk1Aa3dKS9s477ziVUB0AAAAAoLPRPLwlzSEn7ZJLLhHx8fHiuuuuEz/9xPvhAQAAAAC6Cw1OmiVLly6VCs9JkybJwLbr168XJSUlXVM6AAAAAIBeisNj0q6//npx7733yvydL7zwgli5cqW48847ZfiNhQsXir/85S9dU1LQYVTqtKoSrmzLOszzeQYq1J2BY05itvGDuOouJIvnxRNVfL/xObuZbRJPxycCfJO5LYArPtMPWh5HYWa+XTlOQffg4+fHbBGxPE9nXAxX7UUFcMWwNcW1PE9nbj6vnyX5XDnXUAslcE+h0/O2A79gfi0DQy2DpwcF83pC7SXW1AiuFq7rY5mHmvBv5Dldm/NyuK2klNnCju9lttEK9Xrx0HhmqyyLtiufKdTqf+IuLWLO4nRehHHjxkk1J+XufP7550VRUZE499xzRf/+PGQCAAAAAEBno3l47k6HWtIow4A1fn5+Mm4aTenp6WLr1q3tbqehoUFO5hhbGoXewFtwAAAAAABUGNGSdgJN403H5gwaNEg8/PDD7W5n9erVIjQ01GLKTn/VkaIAAAAAoJejQThwgqNHj4roaN5f7igUFLeiosJiSho0r8PbBQAAAEDvQUN35wn69evXKTv19fWVkzno6gQAAAAA6IC6s66uTio7IyIixPDhloqY+vp68eabb4r58+c7ulnQQ7Q0WeY7JMryeUiV9D+4csrHJ4HZ9CNGMduYZJ4vMTR3P1+3jOfnS2jh8fgC47iyL3gMV2f5+Vkqp/4w8Ibj/IxcZoPis3sUe4FhXGUXl8hzHsaEtTCbn4Er7xqNlnW0oILX2dxsruSsLuN5Oo3NfJ+g8/Hy5R/nwZFc4Zs4II7ZBg+1XG5wElevJwRXMptBx69taTBXrwenBHJb2EFmE7nHmUkrymO2PuH7mG1kv3BmKyiKZLaqMp5HtKSB3wO9sd5qbtJt2S3dnYcOHRLDhg0TU6ZMESNHjhRTp06V6k4T1G151VVXdUU5AQAAAAB6VXenQ07aHXfcIQPYFhYWirS0NBEcHCwmT54ssrKyuq6EAAAAAAC9UDjgUHfn999/Lz7//HMRFRUlpw8//FAsXrxYnHHGGeLLL78UgYG8eRgAAAAAoCvQ3KRFrFta0mg8mpeXl0XctE2bNomZM2fKrk/qDgUAAAAA6A6MHZg8riUtJSVF5u2kcWnmPP300/J/Sg0FAAAAAAC62Um76KKLxOuvvy6zC1hDjprRaBSbN2/uhGKBnqSpjueJK84uYLZUPW9mNhp5LrrmESOZbVQfnrcxKu93ZtOXFzFbWCPPBXpyAs9j5zNijMVvg4ErxPSKY8g/xnN81pZzlRiwHx9/y5A7RFh0KLPFx3G1X0wQV9vqFTkZi+ssh1vkFXKlW0k+zxkLNW/34BfEh8NEJEQxW/JgnuNyyGB/bouzvG7xfsVsGYPguVprtCBmy6/jisoWjavXE/rx51s/3x+ZTZ99hNm8c7lt6BD+TMoeOJrZigv4eaqp4IrP3vic0tDdaRmEdvv27TbnUy5PctQAAAAAALoaDcIBAAAAAADXQ0NLGgAAAACA66F1c0vaxo0bRXJysvDz8xMTJ04UP//8s13rbdu2TYotL7zwQof2BycNAAAAAG6JUXN+cpQ33nhDLFmyRKxYsULs2bNHjB49WsyYMUPGjm2LY8eOiaVLl8pwZd3S3Ume4w8//CDy8/8cYB0XFycmTZokJkyY4MzmAAAAAAC6lYaGBjm1l1vcxLp168TChQtbMyuRUPLjjz8WW7ZsEXfeeadynZaWFjFv3jyxcuVK8c0334jyci5e6jQnjbzFf/zjH+K7774Tffv2FbGxf6pwCgoKxG233SazD7zzzjsiJibGoUIA16ehhqsnC7O4CrKlhQtHmpq4Iqp2+FBmG53IVVeJPr8ym1dxDrMFZv/BbKPiLfOSGoaP59vy4uoqbx9+W+Qd5cdaXcZziPbG3Hn25On0D+bXNjaBqztjwvnnbYB3fbt5OomiSktlaG4Ovz6VpbhmnY3B29u+vKz9uGpz4FCuqhySzLuhksP5iy3Ky1LN6dtcy5ap9uJ5QPNruC0th6uKK6sUuUBjE5lN9DmVmfrXc8WwVsCfW6EFqcw2PJ7nEc1K5mUuyQ+zS6ns6fVb64AAYPXq1dJ5Modaye6//362bGNjo8xbTgJKE3q9Xpxzzjmy0coWDzzwgPSJrrnmGumkOYpDThplFyCv8ODBg2LoUMuXLKWJuvrqq8WNN94o3nrrLYcLAgAAAADQXcKB5cuXy+5Lc2y1ohUXF0v/x9Q4ZYJ+p6ZyZ5v49ttvxQsvvCB++42HjeoSJ+3TTz8VX3/9NXPQCLI9+eST4swzz3SqidHY0ij0Bv41AwAAAACgQnNibJk9XZsdpaqqSsaUff7552UazW5x0uhgKisr2yyUPQesamLsM3iB6Dv0z35eAAAAAID2MHZTvDNytAwGgxzeZQ79pnH51hw5ckQKBihtZmtZ/z+OLKXXpN7HgQMHdq66c/bs2WLBggXivffes3DW6G+y0WC6OXPm2NXEWFFRYTElDZrnSFEAAAAA0MvRNJ3TkyP4+PiIcePGiV27dlk4XfSbhJOqNJq///677Oo0TZQ686yzzpJ/9+nDxx52uCWNlA1UqMsuu0w0NzfLQpsG1JFnSAPjHnvsMaeaGNHVCQAAAABXZcmSJbKhavz48TKaxfr160VNTU2r2nP+/PkiMTFR9hZSHLURI0ZYrB8W9qfYw9reqd2dmzZtEmvXrpUqB/MQHORhhoSEOLI54IGKz6LjPMdnU4OlypKoreWKz4ph/MtiXN8AZhvks4fZfAozmc0/P93i94gYrnLyHXYyswX4RTPbgUD+EZGdzo+1vLCE2ZobGkVvwuDNHyuhUVzJGRfP87fGBHMlJ4WdtKa8gdeLvCJLNV5JPldy1ldxBSCwH99AnkMzPJaPt0kcwO+hIUO44nNIQgNfN5DfQ4GiitlarF5f+TquvMwu4wrIQ1m8BSUttZTZqsu5UrIgMZyXzY8/y6Li+Ljt4HJ+XLpCrvhMjjjIbCn9T2O23JwIu9TLNQqbJ6F1YEyao1BvYlFRkbjvvvuk/zNmzBixY8eOVjFBVlaWVHx2Jg7HSSNl548//iib96jZjlQNGzZsEK+88oq4/PLLxbRp0zq1gAAAAAAAKro7B+dNN90kJxVfffVVm+u++OKLXeukkcc4a9YsERQUJGpra+U4NGreo6i71A06ffp08dlnn8FRAwAAAECXY+zGlrSewKF2OQrKtmzZMlFSUiK2bt0q5s6dK6Pv7ty5Uw6eo3lr1qzputICAAAAAHSzcMAtnLQDBw6IK6+8Uv596aWXypAbF198cet8Sn2wb9++zi8lAAAAAIBiTJqzkzvg8Ag3yuIuV9TrpXohNPTEgODg4GAZTgMAAAAAAHSjk5acnCwOHz7c+pvyVVEOTxOkbIiP50oXAAAAAICuCGZrdHJyBxwSDixatEjmrjJhHevjk08+gWigl9NUx0MolOYW2RWWo6aKJ2CuqeUy8/rBPKHxsHifdkNw+OVZ/iZSonl5AweNYbbQIP7xsS+YZ9c4msZtJTlFdiVC9hQCQngy9WhFMvU4nltbhPjwEBlGjX9LFlXx652XZ3lOK4p5q35LE693QAi9l4HZ/BXXMbYvv0f7D+YhOIYO4K+WAVH8ekT78LAUPi38nqzV8/AdubWW+z1SyMODpGfwbWUf4+E2SnIsk7UT9bU8xFBDPQ+nkxqTzGz9xqYw24iYLGbTZ2cwm3/RUWYb0ncQsx0byK9FSX6EXaGSPCkskOYm3Zbd4qTdcMMNbc5ftWpVR8sDAAAAAGAX7iIA6LY4aQAAAAAAroARLWkAAAAAAK6H5uFOmsPqTk3TxNGjR2XuTlPezjfeeEO8/PLLoriY9+sDAAAAAIAubklLS0sTM2bMEMePHxcDBgyQ2QUuueQSmRqKnLeAgADx/fffi8GDBztRFAAAAAAA100L5dJO2h133CFTQH344Ydiy5Yt4oILLhBDhgyRoTgoLRQ5bJSVgPJ4AtCWoq5CkYi8qZErjpoa/2yxNaexiSdvbk4Zz2wjEiwVawG5aWwZ7zyupEqu58rLoDhLJTMREsA/RgICeJLnNEXC8YJjeR6h+DR4ezNbcEQIs8XH84ToMSH8envpTqjHTZQ3BjJbIRfoiaK8SovfddVIpq7C258ntg+N4onD4/vHMNvQoVylOySJ36N9QwqYLUQr44Xhl1uU67js91g5t6VlWnYEpaXyZ0p+ZiGzVZdZ1hNbakfNaGS2qhKuUM3MKGe21ASueE2IH8ZsUZWKpOtlvEcqKSKV2Yb15eckN5fvt6qsmtnK87ni3F0xorvzBNRKtnLlSjFy5Ejx0EMPyRa0pUuXCm9vb+Hr6yvuvPNO8fXXX3ddaQEAAAAAeknGAYda0qqrq0VExJ9xWAIDA+VkHry2T58+oqCAf0FZ09DQICdzjC2NQm/gsY8AAAAAAFS4i7PVLS1pCQkJMquAiUceeUTExJxoEi8qKhLh4bzZ3JrVq1fLdFLmU3b6q46WHQAAAAC9GKOmc3ryOCftnHPOkV2c5hkIKF+nCRISnHzyye1uZ/ny5TLHp/mUNGieo2UHAAAAAPBYHOru3Lx5c5vzZ8+eLRYsWNDudmj8Gk3moKsTAAAAAI6geXh3p8PBbA8ePCh+/PFHMWnSJJGSkiJb1jZs2CDHmF1++eXI3QnsQqWcqinlyqmcZi7/MrbwdVtauBKtaZil4nNkH65qC84/yGz6Uj6uMqaR57+bGF/FbIGjRjKbt3ccsxm8eCN2XkYus9VVcmWWK+EXzFWbUXFcARgfzY833I+rWVVJj4treE7GvHzLMa2qXJ2elJ/QWVT5N6MSuTq632CFknMQv18Gx/JrFuvL1Yh+Rr5cg55fx6JGrlA8UszzdB7K4ArxjEOWys3CrAK77h/Vs8de6hWK4aJsrtBMTefHkBjB82+GxOUwm08mV3L6FXIV+tCBfZgte/BAZisuiLTrONxRXU7ASTNjx44dYtasWSIoKEjU1taK9957T8yfP1+G5aAQHNOnT5ddnnDUAAAAANDVGD3cSXNoTBrFQFu2bJkoKSkRW7duFXPnzhULFy4UO3fuFLt27ZLz1qxZ03WlBQAAAAAwS7Du7ORxTtqBAwfElVdeKf++9NJLRVVVlbj44otb58+bN0/s27ev80sJAAAAANDL4qQ5nLtTp/vT+9Tr9cLPz0+GzzBBSk9SagIAAAAAgG500pKTk8Xhw4dbf1M6qL59+7b+phhq5sFtAQAAAAC6ckya0cnJ44QDFBetpeWE2m7ECMt8hp988glEA6BTUamzctO5IqqlmSu2mppiLX7Xpoxhy4zsy/NMxhT8zmz6Mp4DMPg4X250AlcUGk4ay2zeXrF8HwaF4vMoz/FZW17VqYo1e9F7WeZCJUIiuJIzIYkr26JDuDrPW8dzPtY0c0VhQSk/L4W5PP9ibUVVt58TV8qZGqTImRrXj9ezQUP/zBpjTkoy30dyOFdthut40lSDkV/HKl0Ys2VX87ySh3L4caQd4vU7J4PffxVWCVwbargCu7NR1anKEp67MyuDKz7/iOMNGAkpPCdwcgU/Vl0Jt8UU/8FswxK4kjx7AL/eFcX8/mmo5efPHe4hzU2crW5x0m644YY2569ataqj5QEAAAAAsAs4aQAAAAAALogRThoAAAAAgOuhebiT5rC6EwAAAAAAuFhLGqV+otAb3v8/aPXIkSNiy5YtUtXZr18/cc0114j+/ft3VVkBAAAAAFpxA21D9zlpM2bMEDfddJMMYPvdd9+Js88+WwwdOlQMGzZMbN++XTzxxBPi888/l3k9AegqVDnm8o9yxWdTo6WisLqKK93Khw9htrEJXCXXz/c3ZvMuzGK2gNw0ZhsVy9Vv3sNO5jZvnj/R14+r33KO5DNblUJh1tLEFZUdwS8okNki47iKLyGWq0CjAqrsytNZUM33kVfAFbOlBfx4G+t4Pk9PQHXew+N4PsakgTwn59DBPHfn4IR6vm5AEbMFtvCYl016rr7Nb0lgtqOl/B5KzeBv04zDXLVYmMVt1WW8LEZFXt+eoKmOn8/SPK7uPHSYX4ukaK7GjInh+TcDy7mq1is/k9kGh/C8n1kD+LMmP5fXn+qySrfLHdwbujsdctJ+/fVXmaeTuPvuu8XixYvFunXrWuffe++9MjXUt99+2/klBQAAAADoRU6aQ2PSKEaaKU5aamqqWLBggcV8Shm1d+9eu7pNKysrLSZjC/9aBgAAAADorcFsHXLSJk6cKD788EP598CBA5lD9ttvv4mICB44z5rVq1fLdFLmU3b6q46WHQAAAADAY3Gou/Ohhx4S5513nqipqRFz5swRt99+u0wTRWPS0tLSxJNPPimWL1/e7nZomSVLlljYzr3sJ8dLDwAAAIBei9ah/k4+LtatnTQSBFDqJ3KwfvrpT6fq4Ycflv8nJCSI+++/X9xyyy3tbsfX11dO5ugNPo6VHAAAAAC9Gs1Nui27LZgtOWqUWL2oqEhkZGQIo9Eok6pT8nUAegpV3r7CTMu8lw21XP1XU80Vn5UnccXVKf25YnmI4sPCr/Aot+WnM9tJ0bwsfinjmC04kOc7/D2I7zfzELeV5ZfYpUTrSJ7OxL48T2dcGFeV+hn48dYp8nQWVfD95maXMVuNVZ5Od8kz2N45DgxV5JLty1W/A4Zwdd6Q/vxxPjCSqyKjvHlOTp8Wfv/UGnhZcuu4gjQt15/ZDqVzBXaWQsmpqqMq9ba7UaPIr5ufyY81NYErd/uNGcpswyK4klPkHmOm4MLDzDYkbgCzHU3m93JJXrhd+TxdRVVrws1u+64PZnvw4EGxdetWUVpaKseohYeHi7Vr14qrr75afPHFF11TSgAAAAAARUuas5PHtaTt2LFDzJo1SwQFBYna2lrx3nvvifnz58uwHNSiNn36dPHZZ5+JadOmdV2JAQAAAACE+6g0u6Ul7YEHHpBx0EpKSmRr2ty5c8XChQvFzp07xa5du+S8NWvWdF1pAQAAAAB6CQ45aQcOHJCx0IhLL71UVFVVyewDJubNmyf27dvX+aUEAAAAALAC3Z1W6HR/SlYph6efn5+McWYiODhYVFTwgaoAAAAAAJ2NZvTsEBwOtaSRgpPiopkglWffvn1bf1OidVJ6AgAAAAB0NUYPzzjgUEvaokWLWtNCESNGjLCYTzHUIBoArkJzg2WqsbL8IruSctfV8BRl9Q089EDjkInMNjyeJ0QPyDvEbD75PFTH4CZelqB+f+bKNSc4oA+zBQad+FgyceQPHpaj6HihXSEPlMnU43ky9cRY/giJDuAhM3SCPxFL6gKYLa+g2a5k6qrQAK6MbyAPUxESxUMeJA7g4TaGDuWhMAYn8jAnfYLymS1Y4+dO0/i3eamO7/dYGQ/zkZrJWx4Op/HQEnlHC+xKkm59j3oKLU38+lQU82uReZTfU4eSeNaePtE8jEZwKb+XdcWWYYeI5EiedH1IMn925Wbz/dZU8ATrVSX8/u5JNDdxtrrFSbvhhhvanL9q1aqOlgcAAAAAwC6M7tIk1l1x0gAAAAAAgAsKBwAAAAAAXAHNsxvSnHPSKLPAt99+K/Ly8qTKc8CAAeJvf/ubGDx4cOeXEAAAAABAAZw0MwoLC8XMmTPFL7/8Ip0zyjIwduxY8e6774o77rhDJl5/5JFHuq60AAAAAAD/j9HDvTSHnLR//vOfIiEhQZSVlQlfX1+xdOlSUVlZKZ02al2jALeJiYnilltu6boSA+AkqsTAKqWSuYLZRLNi3eZmnpy9OWU8s41I5LdZUF4as3kVHme2xIZaZguIr2S2YEVS5gD/RGZL9eXq0+IcnnDbL4irEaNiuOIzLIhnNzbo+bmqa+HJ1AsqeFnycrkCrrqsymWTPOv0fFivf0gQs0UncfVk/6FcMTx0AFfkDozmxx/jw6+Zr5ErXhv0/DoWKJTKR4p4mdOOcLXxscN8v0XZXGVYV8lVgZqnZ8JuB5WKujCHK2MPZwYz24CxKcw2IjqL2fTZGcwWWMwTsQ9K5L1eR/tzdWdZMbfVKY6jJ9E8vFo5JBygEBsPPfSQCAkJkU4apYB6/fXXpaNGoTfWr18vNm3a1HWlBQAAAAD4fzRNc3ryOCeNHDNTxgG5sl4vWx2am/+MbXTaaaeJY8e4125NQ0ODdOzMJ2OLZ8bLAQAAAIBnsHHjRhnYnzIuTZw4Ufz88882l33++efFGWecIcLDw+V0zjnntLl8h520008/Xdx3332ipqZGNDU1ibvuukuKBiIi/mwSLSoqkgVpj9WrV8t0UuZTdvqrDhUcAAAAAL0bo9H5yVHeeOMNOfZ+xYoVYs+ePWL06NFixowZcry+iq+++krMmTNHfPnllzJDU58+fcT06dNFTk5O1zhpjz32mPjtt99EWFiYCAwMFC+++KJF9+bBgwdbE7C3xfLly2WOT/MpadA8R4oCAAAAgF6O1o3dnevWrRMLFy4UV111lRg+fLjYvHmzCAgIEFu2bFEu/+qrr4rFixeLMWPGiJSUFPHvf/9bCi537drVNcIBajXbt2+f+O6772SX5amnniqioqJa59vjoJm6TWkyR2/gA2YBAAAAAGzRkYQD5MfQ1J5/QjQ2Nordu3fLRibzIV/UhUmtZPZQW1sreyFNvY9dEictMzNTZGdni0mTJkkHLTU1VWzYsEEe6OWXX47cncDtqS3n6sm8DKNd6UhUis/6lHHMNrIfV9OF5x1gNn0FV39FNvzCbKckcAVgwAie9zPAP47ZjsVwNZnZ0NNW4mL5h1SIHx9L2tTCVZuFtVwZmpPPFZol+Yo8nTWukafT4M2PKziS516MS+ZKziEp/KE8LJnXqeRQnvMyRPBzolP01VTq+FCTrMoTH9EmDmXzx/7hwzyvZk4G78IpLyxltqa6emYD9qrL+XnPzODXOy2RX8c+MVyhGV7B1bc6RY7PvlE8n3BKP57Ps7BQlc/T1dSdmtPr0tCrlStXWtioK/P+++9nyxYXF8sx+LGxls94+k1+kD1QqDKKkEGOXZc4aTt27BCzZs0SQUFB0iN87733xPz582W/LDXhUV/rZ599BkcNAAAAAF2O1oGWNGoVozFm5qha0ToDioaxbds2OU6NRAddMibtgQceEMuWLRMlJSVi69atYu7cubJ/dufOnbKPleZRQQAAAAAAXBlfX18ZUsx8suWkUc+hwWAQBQWWrd30Oy6O91BYj+cn34gasUaNGuVQGR1y0g4cONA67owC11ZVVYmLL764df68efPkmDUAAAAAgK7GaNScnhzBx8dHjBs3zmLQv0kEQMO/bEFZmB588EHZEzl+PA923ulj0kxx0mjAHDXZUfgME8HBwVKpCQAAAADQ1WjdGJSWukYXLFggna0JEybIAP4UkozUngQN/6KsSzTWjVi7dq0MW/baa6/J2Gr5+fnSTkPGaOp0J412cvjwYTFw4ED5mxQNffv2bZ2flZUl4uPjHdkkAAAAAIDLp4WaPXu2jAdLjhc5XBRag1rITGIC8oGoAcsEhSgjVah5j2Nb4oQOO2mLFi2yyGs4YsQIljYKogHgiahyEeZn5DJbS1MzX7eej1eoSjmJ2cYkhTBbgvduZjMU8f0GZvJhBqPjed7PkGFc8ZmVwBVcDc1c3hnq38RsYb58H01GA7OVVfNHTWEBV6RWlXBlbUsT329X4xfE1ajhcZHMljSQ58FMGcK/kIcmcIVqgn8RswW08ONvNPD8m4XNXO13tITXn0NHeX3MOMQVpIVZXAFYU1HpsjlTPYX6an7/FOdwBW1aBr+2/SK5uvPkaJ7/1yuLKzkDijOZbVBSMrMVDE5gtupKrl7uTQnWb7rpJjmpIFGAOfZkYOpUJ+2GG25oc/6qVas6Wh4AAAAAALtwlxyczuKQcAAAAAAAAHQPDgsHAAAAAABcAWNHUg54aksayU5t2WngHAAAAABAV6Npzk8e56RVVlbK+GiUXJ3UDKRwMBcSkOqhf//+XVFOAAAAAACWFsrZyeO6O++9916xd+9e8corr4jy8nLx0EMPiT179oh3331XBnrrDYP4ADBRX81z2OUfy2O2hnqe37Kmmis+a0b0Y7ZTkni+zH4GnrvTK5+rtXyz05htSCSPY5gQzvfb4B3AbDqF1l3T8e+8Ej1XPOp0ml2qLJ2eq0p1ZpL21v3aaM1vD9W2AsJ47tLYvjwH66AUflxD+nMl68BInnsxyovnVPRu4Tkvawwn4k6ayK7l+z2Uw9PKpB3mCuTsdK7aLCsodtn8qL0NVT2uKuX1J/tYGbOlJfE62jd5KLPFlfM6oFPkBE4I5fknRyRypXJ9A89X25vUnS7dkvb++++LZ599Vsb8uPbaa8Uvv/wiW89mzpzZmkneFOwWAAAAAKAr8fSWNIecNHLI+vXrZ5HL6vPPP5fpoc4//3yZdN0eyKGjrlPzydjCWxsAAAAAAHorDjlplF3g4MGDFjZKBUVJQ+vq6sRFF11k13YoZQKlkzKfstNfdazkAAAAAOjVaGhJO8H06dPF1q1bmZ1yUH366acyl6c9LF++XOb4NJ+SBs1zpCgAAAAA6OUYNecnjxMOrFy5UuTm8pQ0JBagFrWdO3dKIUF7+Pr6yskcvYEPkAYAAAAAsIW7tIh1i5MWHh4uJ2vI4SLV57Bhw8TUqVM7s3wAuBVNdVyxV5LN1VVNDTwfZVMjz5PX1BzPbf1OZbb+3jy/o29JNrPpq7lyLLiRlznIh7eKGw3ezNbsq1B/hfC8lwG+/EEaGMg/zAKC+boNtVx52Fj3p1CpLaWc3osrL4PCuXoyYSA/x8NO4nk6h/XleTCTQ7iaN1jj59io8bIU67g671gpz6N68CgzifQ0nvczP5Pn5KwudY1cqMB+VHW7JI/n80xX5PNMjuZK7fBoHrvUN/cws/kV8+UGx/PnitegYYpS95ziU/NwdadDTtqSJUuUdoqVtmbNGhEZ+eeDbd26dZ1TOgAAAACAXppxwCEnbf369WL06NEiLCyMebIkKKAgtwjBAQAAAADQzU7aqlWrxHPPPScef/xxMW3atFa7t7e3ePHFF8Xw4cM7oUgAAAAAAO3j6d2dDqk777zzTvHGG2+IRYsWiaVLl4omjG0AAAAAQA+hIQSHJaeccorYvXu3DGw7fvx4sX//fnRxAgAAAKDb0TzcSXOou9M8LtpLL70ktm3bJs455xyLJOsAAAAAAN2B0cO7O51y0kxcdtll4vTTT5cta+bpopzBL4hL71VoiiTPALgbDbU87EXBcS6zT/Xjt6iXgSfcbkoYz2xJAVHMFljLQzfoWhTDFhQPPs3Ay2L0si++YYAPD18RHs7XDY/lITKaGnn5qkVFuyqvwFCeOD1pkCLcxnAePmBEMg+D0DeAh7jwb6litgYvnpy+oIFfs/RCXr7UdL7fY4f5NSs6zstSV8kTrAPPSLpeXcZDqeRk8qTrqYm8ficMPYnZ+tfx+0dflMNsQcd+Y7aUGP6cEuIC0VNobtIi1iNOGpGUlCQnAAAAAADgQk4aAAAAAEBPoKG7s22OHj0q0tPTRXx8vBgxYkTnlAoAAAAAoJcHs3VI3bl48WJRXf3nuIe6ujpx8cUXi0GDBokZM2bIILcUO800HwAAAACgK9E8XN3pkJP27LPPitraWvn3gw8+KH766Sfx+eefS8fs66+/FllZWeLhhx/uqrICAAAAAFh0dzo7eVx3p/lBffjhh+KRRx4RZ511lvw9efJkmbNz2bJlYvXq1Q4XZPDJg5hNrwi/pnJ+3cUjBp6FTlVB7US1qpe3IiF4EFdAKsSOoqSOKwq9AxOZLTSUJ0T3aeEJzA1GrsbUKZTVzQZePk3wgwvx5arFfgk8YbsQ4cwSoEjEXl7CVaDWRMTwYx0yiNuGJfHj7xPAE6f7NdcwW52Bby+njidOP5THr0/qIb6940e4krOimCdsb2poZDYvX36edHq9XerB3oTqnKiw9zzZu72O7EOveGDUVPD6cyzzz0YUc9KieX2MjBzAbGHFvM43Z/Ok6+JIOrdN6El1p1F4Mg7XLlPg2vz8fDFq1CiLedTlefz48c4rHQAAAABAL8Vh4cC9994rAgIChF6vF7m5ueKkk07EYCkpKZFJ1tujoaFBTua0NDcIg5evo8UBAAAAQC/F6OE9aQ61pE2ZMkWkpaWJX3/9VSZTz8zMtJi/fft2C6fNFtQdGhoaajHt+eIJx0sPAAAAgF6LhjFpJ/jqq6+UdjpY6gadO3euuPLKK9vdzvLly8WSJUssbEs38nEhAAAAAAC28PQx6Z0SzNbX11fs3btXDBs2zO7laTLH4OXZg/8AAAAA0LlocNJOYN36ZYISrK9Zs0ZERkbK36TydJQLzuYqKb2w7+QbFWoyAFwFe+uxXs+X8zVwlaW/N1dwhXhxpVeA4DELfZv4ul7GRrtyd6rwa+GqTR89bxUP8OK5B8NjuZKzTxhXbRb25crIilqe99KayGB+7hKDS5gtVuQym28tP3f1PnyflS0hzFZU7cdsZRX8I7SlhZ/j4DA+ptcvwNchURdwTeztXlNdR4OBj0zSe3GbQaE0raxV2P7/XW1OmC+vt00lXFlccZTfL+3fjV2H0cPzeTvkpK1fv14qOMPCwljlO3jwoBQN4EEBAAAAANDNTtqqVavEc889Jx5//HGZXcCEt7e3ePHFF6WYAAAAAACgO9A8vLvTIXXnnXfeKd544w2xaNEisXTpUtHUpIiqCQAAAADQDWhIC2XJKaecInbv3i2KiorE+PHjxf79+9HFCQAAAIBuR0MIDk5QUJB46aWXxLZt28Q555wjhQMAAAAAAN2J0cPTQnUoBMdll10mTj/9dNmy1q9fvw4VZFrd/zCbpmih07mJ9wtAK/Y+RIz8Y0ensjUrhhm0cCWjaGxwWrUp7G0dV21P8dGmUn9F+fkzW78gS1ES0RgcxWz1cVwZatRZ5j71beIKTZ/yMmYzNHDFa4svV5Q2efHyehn4sYb48WvRN57nKY2O4Ir2FiO34ZHn2nTkVrF3ewpxp2hWtI148fS/IjaUPy98NUVc0lquEG8or2K20qNcIZ0seg7NTboteyxOWlJSkpwAAAAAAICLBbMFAAAAAOhuNMRJAwAAAABwPTR0d1pC6Z9oDNqZZ54pBgwYIA4cOCA2btwoB+9ddNFFYsaMGV1TUgAAAAAAM+CkmfHuu++KSy+9VGYcaGhoEO+995645JJLZCgOg8EgLrjgAvHyyy/LROsAAAAAAF2JEd2dJ3j44YfFypUrxd133y3Db5CDRvk87733XjmfMhE8+uijTjlp1R9xdadQ5CFTKeU83ZMGnoeqzmp21m2jStbVEQmgSkWtV9nsC6to73HoFVI070CeP9AnjOfHDA7lelGdj49d5WPl8OdKTl10PLP5+/C8mhEB/BEaHMTzlAou2lRi1PFzrGn8WmiKEJc64dkvK1dFp7Pv3rP3Oup1/P42GLliuEXP616Txu8BL8HVneHlx5jNWMZVm/VlXN1ZW8zV0D2J5uHvf4eC2aalpYl58+bJv2fPni1qamrEhRde2DqfujvT09M7v5QAAAAAAL0Mh1rSgoODRUlJiUhOThbl5eWiublZ/jZBf1Og2/agrlKaLGzNLcJXFeQFAAAAAMDOlvte25JG2QVuvPFG8eqrr4oFCxaI6dOni+XLl4vU1FTZyrZs2TIZ3LY9Vq9eLUJDQy2mJ3/Y35HjAAAAAEAvQ0PuzhM89thjIiQkRNxwww2isbFRJlsn0cDw4cPllJubK9asWdPudsixq6iosJj+OWlER44DAAAAAL0wTprm5ORx3Z2xsbHis88+s7A99dRT4rbbbhO1tbUiJSVFeHm1v0lfX185mVOPrk4AAAAAOIDRTVrEejSYLTlnFD/NHgfNFod3HGA2nSJhmbEJydxB78HYYqdyzM7ldAY7Ew3aiV6xPXvLrMLLjz9DAiK44tM/nCsyfQJ921WK+oZyhaZ/Uhwvh69iXYXy0qcin9n0dTxnqKivtSvHqTAoPlb1BqeV772e7shVay+qfaiuozI3r6KueCvUzIGqLLkKinm9rc7MYbbK3Apmq8lW5AQGXYZDXhWF21DR0tIiuzkjIyPl73Xr1nVO6QAAAAAAeqlwwCEnbf369WL06NEymK05mqaJgwcPisDAQKGz94sEAAAAAKADaB7e3emQcGDVqlVykD8Fr/3yyy9bJ8o28OKLL8q/v/jii64rLQAAAABADwkHKA0mhSHz8/MTEydOFD///HOby7/11ltySBgtP3LkSLF9+/auc9LuvPNOqehctGiRWLp0qWhqUvSdAwAAAAB4WAiON954Qw77WrFihdizZ4/sWaR85YWFhcrlv//+ezFnzhxxzTXXiF9//VUG/6dp//79XeOkEaeccopMsF5UVCTDb9DO0MUJAAAAgJ4Yk6Y5OTkKjbdfuHChuOqqq2TYsc2bN4uAgACxZcsW5fIbNmwQ5557rowhO2zYMPHggw+Kk08+WTz99NNd56QRlFXgpZdekvHOKMAtCQcAAAAAANyFhoYGUVlZaTFZZ0MyQbFhqYGKfB4Ter1e/v7hhx+U65DdfHmCWt5sLa9E6yDHjx/X3n//fa26urqjm9Lq6+u1FStWyP+xbtes627lxbrds667lRfruvY+sW73rOtu5XU1VqxYQX2eFhPZVOTk5Mj533//vYV92bJl2oQJE5TreHt7a6+99pqFbePGjVpMTIzdZeywk9aZVFRUyJNA/2PdrlnX3cqLdbtnXXcrL9Z17X1i3e5Z193K62rU19fLYzCfbDmePeWkdUowWwAAAAAAd8JXkf3IFlFRUTKSRUFBgYWdfsfF8YDYBNkdWb7TxqQBAAAAAPQWfHx8xLhx48SuXbtabUajUf6eNGmSch2ymy9P7Ny50+byKtCSBgAAAADQDhR+Y8GCBTKyxYQJE2SA/5qaGqn2JObPny8SExPF6tWr5e9bbrlFTJ06VTz++OPiggsuENu2bRO//PKLeO6554RbOmnU7EjxR+xtfsS6jq/rbuXFut2zrruVF+u69j6xbves627ldXdmz54tw4/dd999Ij8/X4wZM0bs2LFDxMbGyvlZWVlS8WnitNNOE6+99pq45557xF133SUGDx4s3n//fTFixAi796mjgWldcjQAAAAAAMBpMCYNAAAAAMAFgZMGAAAAAOCCwEkDAAAAAHBB4KQBAAAAALggcNIAAAAAAFyQHg3BUVxcLLPHU7JRkrMSFImXZKtXXnmliI6OFq5GXl6e2LRpk/j222/l3yS3HTBggLjwwgtlmSkiMQAAAABAR+mxEBz//e9/ZTb4gIAAmSXeFGeEUiZQhN7a2lrx6aefyqBxKg4ePCh+/PFHGbk3JSVFpKamig0bNsgM9pdffrmYNm2acr09e/aI8PBw0b9/f/n7lVdeEZs3b5bxTfr16yduuukmcdlllynXpSB0VNZBgwYJf39/6VzOnTtXNDY2yrIOHz5cxkwJDg4Wnk5ZWZn48MMPZfA+e6FrsnXrVnmebfHOO++I8847T9YL0D4U8do8Lo+5PTs7W/Tt29eu7Rw9elSkp6eL+Pj4dmP47N27V+zevVuceeaZ8gPlwIEDYuPGjXKfF110kbyvPQm6vym2kepjctasWTISuS1KSkrEvn37xOjRo0VERIT8MH3hhRfkc+qSSy4Rw4YNU65H187Pz0+moiG++eYbi+fUjTfeaDNqeV1dnXj99deVH5Jnn3226E3Q++TZZ5+Vca3shc4VPc8pppUtKDjpxRdf3OazzFPqE4E61YNoPcTEiRO16667TjMajWwe2Wjeqaeeqlz3k08+0Xx8fLSIiAjNz89P/o6OjtbOOeccbdq0aZrBYNB27dqlXHfUqFHazp075d/PP/+85u/vr/3zn//UNm3apN16661aUFCQ9sILLyjXnTx5snb//fe3/n7llVfkcRClpaXamDFj5Lba4vjx41pVVRWzNzY2av/7v/+r2QOdny+++EJ77rnntA8//FCu2xa0zL333qt9++238jedm/POO0+bMWOG9uyzz2rO8Ntvv2l6vV4573/+53+UE12Xp59+uvW3Cp1Op4WEhGgLFy7UfvzxR6fK9tNPP2nr16/X7rzzTjnR32Sz57xmZGRoTU1N8ndDQ4O2bds27aWXXtKKiopsrkcJec2vQXp6unbXXXdpl19+uXb33XfLbbYHXZOVK1dqN9xwg7Z48WLtscce0w4dOmRzeUoEfMkll8j6T8l66fo2Nze3zs/Pz7d5fRYtWtRaB2tra7V//OMfclk69/T/WWedpayjxDvvvCOvY2RkpLxX6F4KCwuT9x7VJ5r36quvas5A9xCda0egsh47dqzNZd5++22tpqbGqTIdPnxYGzBggDzPU6dO1S699FI50d9kGzRokFxGBdW50NBQeV7Dw8O1X375Revfv782ePBgbeDAgfLZs3v3buW6lLCZ7lvi/fffl9flb3/7m3bHHXdoF110kUzcbJpvXd5+/frJOtGnTx+57wsuuEA+p+jaUJ0x1W9btLS02LRnZmZq9kL1/rPPPtN+//13u54n9Nw9cuSI/L1//35ZT6+//nptx44ddu9TtV1b98GGDRuUE52n5cuXt/5WQeeVlqN6T88IelZ4Yn3qrDoFnKfHnDSqkAcPHrQ5n+bRMiomTZokX37E66+/LissvRRN0Iv5L3/5i3Jdqsimh/rYsWOlo2MOvWCGDx9uc13TQ8T00KLKTS9Egh5ICQkJynVzc3O1U045Rd4cVLGvuOIKixdhWy9VcqjKy8vl3yUlJfLmoBuFHFNaJyUlRSssLFSuu3nzZs3Ly0sbN26cdH7IsQwODtauvfZa+QCkYyInRuUEtDV98803NstretnT/7amttZ94IEH5LWhv0866STtiSee0IqLi7X2KCgo0E4//XS5Hj1U6MFEE/1NNppHy6hITU2Vy1G56EFJLxg6Z4GBgVpAQIAWFRVl02miB+xbb70l/yZH2NfXV34MzJ49Wx4Hrf/999/bLDOVkfZL14n+p/3GxcXJerJs2TLlevQxMGTIELlf+tigstOD0/SyoPpEx6yC9mE6D/QySkpKkk4/OTJUfnrg0z2k4uSTT9Yeeuih1nuPHDS6XibIuaSPFU9x/OklPGvWLFnnrSEbzZs+fbrNdek+q6ys1B599FF5num3iauuukq78MILletSvTM593S/r1mzxmL+U089JeuW6llB97Xp45fWIxtB9Tc5OVlbsWKFxzj+e/fubXN644032nzW0DWhc2I+kT0xMVH+TU6QrXW3bt0qrz+9A6jst9xyS7sOqbvVp47WKeDGThpd2La+mmkevXhU0APX9LVBjhK93Pbs2dM6n26U2NhY5bp0M9EXCEEPInoxmEOtIOS4qKDymFqjTI4X3az0UCKOHj1q07GcP3++vDn++9//yocQvYjHjx8vWw/ae6mS3fRSpYchOZGmG45a5mhb1AKjgpY1OaL0Iqbybdy4sXU+PWiGDRum3Cc93GxNbTla5557rnQYrB0iuk4HDhxQrqM6VrpOdLz0wCbHh14g5Ajbgl4M5MCTw2UN2U477TTt4osvVq5LD0f6sty3b59sUaVzQjZqIaOWspkzZ8qWMVv10eTAkcN22223Wcy/5557ZCusCnLk6MFKD2jaz0033STriql1jeqryonu27ev9uWXX7b+ppY+cvboAU/baeuFan6OR4wYob322msW88nZIQfQ1sOe6jlBD216QdE5M0EfMfSiVeGOjj89C9p68dKx23pe0MfjH3/8If+mekTlM2/RpVYPcgZUUIsJORmm55Tpb/PnFDn/1pDN/GOCnHa6RqZjpVYUevZ6iuPfVr1o7zlFjgdt13SNnH1O0f9r166VH8u0L/oYp2cuOVPuXp86WqeAGztp9PVLL156MNBLgb5waaK/yUYV1dyZsH4pUqUyQS8F8xYuaimz5SzRi/aaa66Rf9NLn16g5qxatUobOXKkcl36UqKXGnWv0gOIvg7PPPPM1vnULE8PIxXUwmZ+Q5le/PSQoNYxe1+qQ4cOZS0Gn3/+uc0vPjqP5l0UdHOZPyTohau6Oekc04Pnq6++Uk70ELdVXmLdunWyady8Cd3Rh5+Juro67eWXX5bnmvZp64FA9cDcWbeGnD5bDgS1Sv7666/y7+rqalkOchpMfPfdd9IxsuW4mFqF6eNA5fjb2i+dZ+raMUH7pmtk+tKmlk+65qrrat2NSi8FclKpy5/mtVWfTC2v1EJovn/T/WPrRUEtfKaPHPrAoG2ZO4s///yzXMbWft3N8Y+Pj7fZDUR88MEHcpn2HFrVc4ruS1vPKfpgMDk11Jpk3e1G9x91c6meM+ZdXmVlZfL4TQ4D1Qs6bhXu6PjTRwx1k1KdVU0ff/xxm8+pd999Vz6nqCWpo88p4uuvv9YWLFggj4kmd69PHa1TwI2dNIL68ql1iW4K09cP/U02aqa2BXUlkaNkgpwO8z5xulFsOS05OTnyJT9lyhRtyZIl8mVE3WDUFUI2GutGN7YKaq6n8QOm8lLLjPmL8tNPP9XefPNNmzeYdXcZlZlaUeh46KFkz0uVvoJUL1VbNwl90dL5MB07bcv8+MjhomWsIYeInDRbkCNi66vaBDk91JJH4wvpi9qeh5/5F7kKakE179q2fmDT8diCXkC0jD3OLD0AzT8EsrKybJ5jcooeeeQR+TfVCesWYhoTZcvBI+fQ/JxQqyydA3LcCXoIq/ZLjpuqnlIdJUdt9OjRbdYnakWgFj+qT9ZOCj2QyXmz9ZFD9+d//vMf+ZFBD3waO0pOKrVWUkuirdZKd3T8qcuPWjBo39T6QI4KTfQ32WhcrK2uHmpZMR8b+9FHH7W2uhP0Uaq69whqMaG6Sq2qDz74oKyPdO4ffvhhaaM6Qa3g1pCDQNeArgc9m0xd7iboPNM5VOGOjj85kHR+OvKcys7OlsdHHwJ5eXmd8pyijyzroTTuWJ86WqeAmztpJqjplroOaWpvEDxBg/ypgtqCmttNrWUq6EuABkySA0FfHuSYUdP+3LlzZXdke9AD3tb4CltQ6xy9rK0xOWr0Em/rAXj++efLAZ50g1t/idHNaat798Ybb5RfSNSdQF/FdMPRzU5OLrX8Ubmuvvpqth49YGwNmiXowWIuorAFPUTIIaAy0NgSZ79Q7YEG3NN1pK9j8zEf9DfZ6EVM3YkqqAXUvOXsmWeeseiuIMfF1ouCxptRdwI9XOmLnF5S1EJL42juu+8+2Wpjy+Gla0rdtNSCRnWfulppTJz5tVXt9+abb7bpDFG5yZGyVZ/ogUvOiWki58gceojTMrauO433pIc8OWg0VpLOqakVjK6zuXPr7o6/aQwOtW6YtwTS32Rr63jo/qDuO1vQPv/+97/bnE/n8bLLLpNjSE0fsdTKRB8C7733nnIdOk5ymk1lpfvBvHWZujKffPJJ5bru6PjTfU2tzbYgp+/FF1/U2oNa8KgXxTQWtCufUz1Zn8jBcqQ+Wdcp03hf85a1tuoU8BAnrTfwr3/9y+aAUHLUqDna1gvqyiuvtJisWxlpcDk92FTQy59aCakLgl5sNJ6ABp2SY0r7oxensw8bR6DuDnJA2tsXfXGrFL/2QF0yNDaPjo0efOSA00R/k426uWgZFfSCsXZWzFm9erV0lG1Bjpr5g8w00RgR1ZgyE9RSRg4iORv0wCSHzqQ+JujrVjWWh14+1q0VhOnckaPWVquiCtO6VCYa6+gItI51i7YnOP7mUCsCXWea7FHstgc5mbbqo/V1oXNj70csQa327V0Pa8jZdjfHv7OhFj26X01jhbsSd6pP5nXKXEwCup4ei5PW22hubpax30JCQmzOz8nJcSruTk1NjQyiS3Fw7KW+vl40NTV5ZEy3yspKGcfLPAbRuHHjbJ57e+OI0fmlOGJtUVRUJDIyMmTMMFo2OTm53W1Tvfjuu+9krKNTTz21NZaRM1CMJYpjZitekiuu6ywUp++LL74Qy5cvFzExMTaXy8zMlPHidDpdt5XNXWMf5ubmipNOOkk5v6qqSsaZnDp1qsPbpnuC6khSUpJD69C9QXEwvbx6NO66ywREd8d1QcdAWqhugh4ybTkJVPFXrlzp1LZLS0vF4sWLHVqHHA5y0I4fPy6uvvpqmwEM6ab8448/lE7eyy+/bHP7PbUuBTmmgLjkIM2ZM0eMHTtWvPnmm+LWW2+VL/S2oHUp2C4FRibo/0WLFsnzQ05aWw6aaV26FhMnTpQBk9euXSvXbW+/5ERQsEkKkkwOmvl+ba27ZMkS5dTS0iLWrFnT+tuV1lV9XNA5u/vuu8XTTz8tA3XaC61LgTwp6DFd37bWpQ8fcwfNkf2SU0LX3gQFv548ebLo06ePOP3008W2bdtcat2O7JMCvlL9tQU9L2w5aDfffLMMkGoLeqHbctBsrUvrUGDl9hw0uoYUVNt0bHTMFFicnLu77rpLfgC7yroUEJ0+ZLZv3y4/kg8fPiw/IAMDA8XSpUvFlClTpDOswh3XBZ1AN7TWgQ7GiOqJddPS0lrji9F8ElVQ87iJtpRePbVuR4Icu9u6dH5IGWzexUQT2SkEAP1N6mMVPbUuhTUxCSJIiEFjBGksH61Hx09jmWx1+1ivS3XE2XUd2W9Hgl/3xLod2ad59yKNm6JB9PbSU+tSNyqNsaJxnTSejNanAfI0BpfGmNG9RONCXWXdjgREd8d1QceBk9ZN2ArGaZoobpOjgTy7cl0SM1DIA5Lh08Bq+psUsyYFZFvOUk+t25Egx+62Lo2Ro/Ni7cDZM5C+p9Y1Hxs2b948OWDZFKSZBqeTYzpnzhyXWrejwa+7e92O7JPOE4XzoVBDNMifxkjSWFkSKtnKQtDT69J4TgqGa/rgpI8aEiCYCwvMhTg9vW5HAqK747qg48BJ6yY6Goyzu9el1gXzWEU02JQG5ZMKlW7Ytpylnlq3I0GO3XFdCk1Asaduv/321gHA9jhLPbWuubNEqXGsFYAUi86WlL+n1u1I8OueWLcj+zQ/T3RdSaBkivhPL2H6eLCVsqin1lXFgTQX1JDDaitIa0+s25GA6O64Lug4GJPWTdB4pnfffVcOKFdNNJbEldalcWHmY0FoTA8NHJ05c6Ycl3Lo0CGb++ypdU3LEzSwlcbdhYaGWoypqaio8Jh1TznlFCmQILHC+PHjxf79++0eHN9T65qWo7GF1mP8EhMT5TZdad3zzjtP1j+C6t/bb79tMZ/Gw9FYQldZtyP7NMfb21tceumlYseOHXIA/8KFC8Wrr74qhg4d6lLrkijINHaVxkrR2EjzsawHDhywKSrpiXVpoP0NN9wgj+/LL78U8+bNk9fJ399fzk9LS5P1UYU7rgs6gU5w9IAdUPwfCmToTIyonliXxutQsE9bsdcoVIStFq2eWrcjQY7dcV1zqKuUWtzo3NjTGtYT61I9o7h81A1HY6Os4wb+7//+r820Nj21bkeCX/fEuh3ZZ3uhSqhV21Z2hp5al+IR0vgvymFJ9wkND6BWdxqDR3mLqYXUOk1bT67bkYDo7rgu6Dhw0roJetmav4xV8cxsxbXqiXVp8Kspia4KijlmyzHsqXU7EuTYHde1hmKbUR49uqaO0h3r0uBj84mCKZuzdOlSGbzVldbtaPDrnljX2fXIubMnn6krrUtDBChq/l//+lf57CCHjj4cyEmirl+KK2mrXvbUus4GRHfndYHzIE4aAAAAAIALgjFpAAAAAAAuCJw0AAAAAAAXBE4aAAAAAIALAicNAAAAAMAFgZMGAAAAAOCCwEkDAAAAAHBB4KQBAAAAAAjX4/8Aqj0Ym7kOpj4AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig = problem.render(design)\n", + "plt.title(f\"A beams2d reference design (sample {sample_idx})\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's a topology-optimised beam: dark pixels are material, light pixels are void. If you stared at enough of these you'd start recognising the truss-like patterns that classical solvers produce." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 7 — *When is a candidate design actually invalid?*\n", + "\n", + "Here's a failure mode that catches ML researchers by surprise: your model can have low training loss *and still emit garbage*. Pixel-MSE doesn't know about physics. The network will happily output a \"design\" with negative material density or zero material where the solver needs at least a little.\n", + "\n", + "A benchmark therefore needs an **independent validity test** — a separate function that says yes-or-no *\"this candidate obeys the rules\"* regardless of how it was produced.\n", + "\n", + "EngiBench calls these **constraints**, and tags each one by *why* it exists:\n", + "\n", + "| Category | Meaning |\n", + "|---|---|\n", + "| `THEORY` | Comes from **physics**. Values outside this are unphysical (e.g. negative volume fraction). |\n", + "| `IMPL` | Comes from the **solver implementation**. Violating it crashes or destabilises the numerical method. |\n", + "\n", + "Let's first confirm our dataset design is valid under its own scenario (it should be — it came from the solver)." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Checked 16 constraints — 0 violated.\n" + ] + } + ], + "source": [ + "violations = problem.check_constraints(design=design, config=config)\n", + "print(f\"Checked {violations.n_constraints} constraints — {len(violations)} violated.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's feel the other side of it. We'll lie about the volume budget by setting it to something extreme, and ask the benchmark whether the same design still qualifies under that lie.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Under the bad config: 2 violation(s).\n", + "\n", + "check: Config.volfrac: 0.01 ∉ [0.1, 0.9]\n", + "volume_fraction_bound: Volume fraction of the design 0.1875 does not match target 0.0100 specified in the conditions. While the optimizer might fix it, this is likely to affect objective values as the initial design is not feasible given the constraints.\n" + ] + } + ], + "source": [ + "bad_config = dict(config)\n", + "bad_config[\"volfrac\"] = 0.01 # claim we only have 1% material budget\n", + "\n", + "bad_violations = problem.check_constraints(design=design, config=bad_config)\n", + "print(f\"Under the bad config: {len(bad_violations)} violation(s).\\n\")\n", + "if bad_violations:\n", + " print(bad_violations)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**What just happened.** The design itself didn't change — only the scenario we claimed it was solving. The benchmark caught the mismatch. That's the point: a benchmark's validity test is what keeps a paper honest when a flashy model happens to produce outputs that look good but break the physics." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 8 — *How well does this design actually perform?*\n", + "\n", + "Passing the validity test is necessary but not sufficient. The next question is: **what's the objective value?** For that we need a **simulator** — a function that takes `(design, condition)` and returns the physics score.\n", + "\n", + "This is the function your method is ultimately being judged by. Not training loss. Not reconstruction error. *This*.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Objective(s): (('c', ),)\n", + "Values for this design: [464.75580476]\n" + ] + } + ], + "source": [ + "problem.reset(seed=SEED) # clear cached FEM state from any earlier simulate call\n", + "obj_values = problem.simulate(design, config=config)\n", + "print(f\"Objective(s): {problem.objectives}\")\n", + "print(f\"Values for this design: {obj_values}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's a compliance value — you'd want a method to produce designs whose simulated compliance is **low** and **feasible**. Anything else is noise." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 9 — *What's the strongest non-ML baseline?*\n", + "\n", + "Before you celebrate a new method, you need to know what it has to beat. For most engineering problems there is already **a classical optimiser** that has been the workhorse for decades. A benchmark should give you direct access to it, so the comparison isn't to some watered-down version.\n", + "\n", + "Running the classical optimiser from a trivial starting point yields a *reference* design and a *trajectory* showing how quickly the solver converges. Your ML method has to be compared against both — not just final quality, but how fast the baseline got there.\n", + "\n", + "> **Heads-up:** this cell runs a real FEM optimiser and can take ~30s depending on hardware." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": "problem.reset(seed=SEED) # clear cached FEM state so optimize starts fresh\n\n# A \"trivial\" starting point depends on the problem. If there is a material-budget\n# condition we can fill the field at that level; otherwise fall back to the midpoint\n# of the design space (e.g. photonics2d, which has no volume fraction).\nif \"volfrac\" in problem.conditions_keys:\n fill = float(config[\"volfrac\"])\nelif \"volume\" in problem.conditions_keys:\n fill = float(config[\"volume\"])\nelse:\n low, high = float(problem.design_space.low.min()), float(problem.design_space.high.max())\n fill = 0.5 * (low + high)\n\nstarting_point = np.full(problem.design_space.shape, fill)\noptimised_design, history = problem.optimize(starting_point, config)\n\nprint(f\"Optimiser ran for {len(history)} steps\")\nprint(f\"Final objective: {history[-1].obj_values}\")\n\nfig, axes = plt.subplots(1, 2, figsize=(10, 4))\naxes[0].imshow(design, cmap=\"gray_r\", vmin=0, vmax=1)\naxes[0].set_title(f\"Dataset design (obj={obj_values[0]:.3f})\")\naxes[0].axis(\"off\")\naxes[1].imshow(optimised_design, cmap=\"gray_r\", vmin=0, vmax=1)\naxes[1].set_title(f\"Freshly optimised (obj={history[-1].obj_values[0]:.3f})\")\naxes[1].axis(\"off\")\nplt.tight_layout(); plt.show()" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice the two designs look similar but not identical — classical topology optimisers are non-convex and depend on starting point. That tells you something important: **the \"right answer\" isn't unique**. Your benchmark has to handle that with diversity-aware metrics, which is exactly what Notebook 02 will do." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Putting it together\n", + "\n", + "Look back at the engineer's checklist at the top of this notebook. Here's the mapping we just built up:\n", + "\n", + "| Researcher's question | What we need | Where it lives on `problem` |\n", + "|---|---|---|\n", + "| What problem am I on? | A reproducible identity | `BUILTIN_PROBLEMS[\"beams2d\"]` |\n", + "| What am I designing? | A design space | `problem.design_space` |\n", + "| Under what scenarios? | Operating conditions | `problem.conditions_keys` |\n", + "| What does better mean? | An objective | `problem.objectives` |\n", + "| Is there prior work to learn from? | A dataset | `problem.dataset` |\n", + "| Can I see a design? | A renderer | `problem.render(design)` |\n", + "| When is a design invalid? | A validity check | `problem.check_constraints(design, config)` |\n", + "| How does this design score? | A simulator | `problem.simulate(design, config)` |\n", + "| What do I have to beat? | A classical baseline | `problem.optimize(start, config)` |\n", + "\n", + "**None of those rows are API features we're selling you.** They are answers to questions you'd have to pin down yourself before running a serious experiment. The benchmark's job is to pin them down *once* so every paper can point to the same pin.\n", + "\n", + "That's what \"benchmarking engineering design\" means in one picture." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflect before moving on\n", + "\n", + "1. Pick one row in the table above and ask: *what would go wrong in published results if this row were left implicit?*\n", + "2. Look at the `volfrac` constraint we violated. If you were reviewing a paper, how would you check the authors didn't quietly skip this test?\n", + "3. Simulation is expensive; training loss is cheap. When in a research workflow would you use each?\n", + "\n", + "## Next\n", + "\n", + "In **Notebook 01** we take the same eight answers and drive a simple generative model against them. In **Notebook 02** we turn the simulator and baseline into a metrics harness. The pieces you just met are the same pieces we'll keep using — you never have to re-learn them." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "EngiBench312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/simple/01_training_a_generative_model.ipynb b/workshops/dcc26/simple/01_training_a_generative_model.ipynb new file mode 100644 index 00000000..287f44e6 --- /dev/null +++ b/workshops/dcc26/simple/01_training_a_generative_model.ipynb @@ -0,0 +1,485 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 01 \u2014 Training a generative model for inverse design\n", + "\n", + "*A guided tour, not an exercise sheet. Just run the cells top to bottom and read the prose between them.*\n", + "\n", + "> **Colab users:** click **File \u279c Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "In **Notebook 00** we pinned down what a `beams2d` design actually *is*: a 50\u00d7100 grid of material densities, scored by a physics simulator under a scenario `(volfrac, rmin, forcedist, overhang_constraint)`, with a classical topology optimiser as the baseline to beat.\n", + "\n", + "That baseline works \u2014 and for `beams2d` it finishes in seconds. But it has two properties that get painful as soon as you leave the toy regime:\n", + "\n", + "1. **Every new scenario starts from scratch.** The optimiser has no memory. If tomorrow a colleague hands you a thousand new `(volfrac, forcedist)` pairs, you pay the full iterative cost a thousand times.\n", + "2. **Every scenario returns one answer.** The optimiser converges to a single design. If you want to *explore* the design space \u2014 \"give me five plausible beams for this load\" \u2014 the optimiser can't help.\n", + "\n", + "A **generative model** changes both of those. Train it once on a dataset of `(scenario, design)` pairs, and at inference time you hand it a scenario plus a random seed and it hands back a design in milliseconds. Different seeds give different designs.\n", + "\n", + "That's the pitch. The *question* is whether those fast designs are any good. This notebook builds the generator. **Notebook 02** is where we find out." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What this notebook is \u2014 and isn't\n", + "\n", + "This notebook is **not** a tutorial on GAN architectures. We're not going to stare at layer diagrams or debate activation functions. EngiOpt already ships a catalogue of conditional generators (CGANs, diffusion models, VAEs, \u2026) \u2014 we'll just pick one off the shelf and use it.\n", + "\n", + "The point we *do* want to make is this: **hooking a generative model onto an EngiBench problem takes almost no glue code.** The benchmark hands you a dataset in the exact format supervised training expects. The model hands you designs in the exact format the simulator expects. There's no adapter layer in between.\n", + "\n", + "By the end of this notebook you'll have:\n", + "\n", + "- a trained conditional generator for `beams2d`,\n", + "- a folder of artifacts (generated designs, matched baselines, scenarios used) that Notebook 02 picks up and evaluates,\n", + "- a clearer feel for *why* we're about to spend a whole notebook on metrics." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` and `engiopt` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # flip to True to force install locally\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"tqdm\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import random\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import torch as th\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "from engiopt.cgan_cnn_2d.cgan_cnn_2d import Generator as CGAN2DGenerator\n", + "from engiopt.workshops.dcc26.notebook_helpers import (\n", + " TrainingConfig,\n", + " WorkshopGenerator,\n", + " generate_designs,\n", + " show_gen_vs_baseline,\n", + " show_training_curve,\n", + " show_training_progression,\n", + " train_supervised_generator,\n", + ")\n", + "\n", + "SEED = 7\n", + "random.seed(SEED); np.random.seed(SEED); th.manual_seed(SEED)\n", + "\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 1 \u2014 *Load the problem \u2014 exactly the way Notebook 00 did*\n", + "\n", + "Nothing new here. We ask for `beams2d` by name, and the problem object hands us everything we need: the design shape, the names of the conditions, and a train/val/test dataset of `(scenario, design)` pairs already pre-split for us.\n", + "\n", + "Notice what we *didn't* have to do: write a data loader, define a schema, decide on a train/test split, normalise conditions, or figure out what a design even looks like. The benchmark already answered all of those." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[\"beams2d\"](seed=SEED)\n", + "train_ds = problem.dataset[\"train\"]\n", + "test_ds = problem.dataset[\"test\"]\n", + "\n", + "condition_keys = problem.conditions_keys\n", + "design_shape = problem.design_space.shape\n", + "n_conds = len(condition_keys)\n", + "\n", + "print(f\"Design shape : {design_shape}\")\n", + "print(f\"Condition keys : {condition_keys}\")\n", + "print(f\"Train examples : {len(train_ds):,}\")\n", + "print(f\"Test examples : {len(test_ds):,}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 2 \u2014 *Reshape the dataset into tensors the model can eat*\n", + "\n", + "The benchmark dataset already pairs each design with its scenario. All we need to do is stack the columns into NumPy arrays so PyTorch can batch them. Two lines of real work.\n", + "\n", + "The only subtlety is that we rescale designs from `[0, 1]` (the physics convention \u2014 0 = void, 1 = solid) to `[-1, 1]` (the neural-network convention \u2014 matches the `tanh` output of the generator we'll use below). No physics changes; we're just matching conventions on the boundary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# (N, n_conds) float array \u2014 one row per training sample, one column per scenario field.\n", + "conds_np = np.stack(\n", + " [np.array(train_ds[k]).astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + ")\n", + "\n", + "# (N, H, W) float array, rescaled from [0, 1] to [-1, 1].\n", + "designs_np = np.array(train_ds[\"optimal_design\"]).astype(np.float32)\n", + "targets_np = designs_np * 2.0 - 1.0\n", + "\n", + "print(f\"conditions: {conds_np.shape}, range [{conds_np.min():.2f}, {conds_np.max():.2f}]\")\n", + "print(f\"targets : {targets_np.shape}, range [{targets_np.min():.2f}, {targets_np.max():.2f}]\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's the whole data-prep step. Compare that to the usual ML-tutorial pain of \"find a dataset, clean it, split it, align it with labels.\" Pinning the benchmark contract in Notebook 00 is exactly what collapsed that work into three lines here." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 3 \u2014 *Pick a generator off the EngiOpt shelf*\n", + "\n", + "EngiOpt ships a catalogue of conditional generators for 2D problems. We'll use the CGAN-CNN generator (`engiopt.cgan_cnn_2d.Generator`) as a stand-in for \"any off-the-shelf conditional image model.\" The exact architecture is *not* the lesson of this notebook \u2014 treat it as a black box with one job: map `(noise, conditions) \u2192 design`.\n", + "\n", + "```\n", + "noise \u2208 \u211d^32 \u2500\u2500\u2500\u2510\n", + " \u251c\u2500\u2500 [ conditional CNN generator ] \u2500\u2500\u2192 design \u2208 [0, 1]^{50\u00d7100}\n", + "conditions \u2208 \u211d^4 \u2500\u2518\n", + "```\n", + "\n", + "We wrap it in `WorkshopGenerator` so callers can pass plain 2D tensors instead of the 4D tensors the underlying CNN expects. That wrapper is two lines of reshaping." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LATENT_DIM = 32 # size of the random noise vector per sample\n", + "\n", + "cnn_gen = CGAN2DGenerator(\n", + " latent_dim=LATENT_DIM,\n", + " n_conds=n_conds,\n", + " design_shape=design_shape,\n", + ")\n", + "model = WorkshopGenerator(cnn_gen).to(DEVICE)\n", + "\n", + "n_params = sum(p.numel() for p in model.parameters())\n", + "print(f\"Generator ready: {n_params:,} parameters, input = noise({LATENT_DIM}) + conditions({n_conds})\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "One import, one constructor call. That's the integration point between EngiOpt and EngiBench." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 4 \u2014 *Train it, supervised, against the optimiser's answers*\n", + "\n", + "The training loop is the simplest thing that could work: for each batch, feed random noise plus real scenarios to the generator and ask it to match the optimiser's design on that scenario (MSE loss). No adversarial training, no diffusion schedule \u2014 just supervised regression on a benchmark dataset.\n", + "\n", + "This is deliberately a *weak* generative model. Real methods do better. We're using the simplest possible recipe because the point of the notebook is the *plumbing*, not the performance.\n", + "\n", + "> **Heads-up:** ~1\u20132 minutes on a GPU, ~5\u201310 minutes on CPU. Bump `EPOCHS` later if you want sharper designs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "EPOCHS = 10\n", + "\n", + "rng = np.random.default_rng(SEED)\n", + "snap_idx = rng.choice(len(test_ds), size=4, replace=False)\n", + "snap_conds = np.stack(\n", + " [np.array(test_ds[k])[snap_idx].astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + ")\n", + "snap_baselines = np.array(test_ds[\"optimal_design\"])[snap_idx].astype(np.float32)\n", + "\n", + "train_cfg = TrainingConfig(\n", + " latent_dim=LATENT_DIM,\n", + " epochs=EPOCHS,\n", + " batch_size=64,\n", + " lr=2e-4,\n", + " device=DEVICE,\n", + " snapshot_at_epochs=[1, max(1, EPOCHS // 2), EPOCHS],\n", + ")\n", + "\n", + "result = train_supervised_generator(\n", + " model, conds_np, targets_np,\n", + " config=train_cfg,\n", + " snapshot_conditions=snap_conds,\n", + ")\n", + "losses = result[\"losses\"]\n", + "snapshots = result[\"snapshots\"]\n", + "\n", + "print(f\"\\nFinal loss after {EPOCHS} epochs: {losses[-1]:.5f}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Sanity-check the loss curve\n", + "\n", + "A training loss that goes down is *necessary* but very much not *sufficient*. A low MSE means the generator is pixel-matching the dataset; it says nothing about whether the designs are physically valid, let alone stiff. We'll come back to that." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_curve(losses)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Watch the designs emerge\n", + "\n", + "At epoch 1 the generator outputs essentially noise. Halfway through training you start seeing dark blobs where material should go. By the final epoch the shape roughly tracks the ground-truth beam in the bottom row \u2014 same conditions, drawn from the test set, never shown during training." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_progression(snapshots, baseline_designs=snap_baselines, n_show=4)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 5 \u2014 *Use it to generate designs for unseen scenarios*\n", + "\n", + "The whole premise was \"train once, generate instantly.\" Time to cash that in. We grab a batch of scenarios from the held-out test set \u2014 scenarios the model has never been trained on \u2014 and ask the generator for a design on each one.\n", + "\n", + "No optimisation loop. No FEM. Just a forward pass." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "N_SAMPLES = 24\n", + "\n", + "test_idx = rng.choice(len(test_ds), size=N_SAMPLES, replace=False)\n", + "test_conds_np = np.stack(\n", + " [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + ")\n", + "baseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\n", + "\n", + "gen_designs = generate_designs(\n", + " model, test_conds_np, latent_dim=LATENT_DIM, device=DEVICE,\n", + ")\n", + "\n", + "print(f\"Generated {gen_designs.shape[0]} designs of shape {gen_designs.shape[1:]} \"\n", + " f\"in a single forward pass.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generated vs. baseline \u2014 side by side\n", + "\n", + "Top row: the generator's output. Bottom row: the optimiser's output for the same scenario. Both are attempts to answer the same question.\n", + "\n", + "Stare at these and a few things jump out:\n", + "\n", + "- **Blurriness.** The generator hedges. MSE loss rewards the average of all plausible designs for a given scenario, and the average of two valid truss topologies is an invalid blur. That's a loss-function problem, not a benchmark problem.\n", + "- **Condition sensitivity.** Do the generated designs actually *change* when the scenario changes, or does the model output something close to \"the dataset mean\"? Eyeballing this is hard; measuring it is what Notebook 02 is for.\n", + "- **Plausibility.** Some of these look like beams. Some don't. A picture can't tell you whether the design is stiff under load, or whether the material budget is respected. *You need a simulator for that.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "conditions_records = [\n", + " {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n", + " for i in range(N_SAMPLES)\n", + "]\n", + "\n", + "show_gen_vs_baseline(gen_designs, baseline_designs, conditions_records)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 6 \u2014 *Export artifacts for Notebook 02*\n", + "\n", + "Notebook 02 will load these three files and run the physics simulator on every generated design. We save them now so the evaluation notebook can start cold \u2014 no notebook-to-notebook Python state required." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "IN_COLAB = \"google.colab\" in sys.modules\n", + "ARTIFACT_DIR = (\n", + " Path(\"/content/dcc26_artifacts\") if IN_COLAB\n", + " else Path(\"workshops/dcc26/artifacts\")\n", + ")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "np.save(ARTIFACT_DIR / \"generated_designs.npy\", gen_designs)\n", + "np.save(ARTIFACT_DIR / \"baseline_designs.npy\", baseline_designs)\n", + "with open(ARTIFACT_DIR / \"conditions.json\", \"w\") as f:\n", + " json.dump(conditions_records, f, indent=2)\n", + "\n", + "print(f\"Saved artifacts to {ARTIFACT_DIR}:\")\n", + "for p in sorted(ARTIFACT_DIR.iterdir()):\n", + " print(f\" {p.name}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Putting it together\n", + "\n", + "Every box in the \"generative model for inverse design\" picture was filled by one of two sides \u2014 EngiBench or EngiOpt:\n", + "\n", + "| What you need | Who provides it | One-line access |\n", + "|--------------------------------------------|-----------------|-----------------|\n", + "| The problem definition | EngiBench | `BUILTIN_PROBLEMS[\"beams2d\"]()` |\n", + "| `(scenario, design)` training pairs | EngiBench | `problem.dataset[\"train\"]` |\n", + "| The shape and range of a valid design | EngiBench | `problem.design_space` |\n", + "| The scenario schema | EngiBench | `problem.conditions_keys` |\n", + "| A conditional generator architecture | EngiOpt | `engiopt.cgan_cnn_2d.Generator` |\n", + "| A training recipe | EngiOpt | `train_supervised_generator(...)` |\n", + "| The yardstick to score generated designs | EngiBench | `problem.simulate(...)` (Notebook 02) |\n", + "| The validity test | EngiBench | `problem.check_constraints(...)` (Notebook 02) |\n", + "\n", + "The only code we wrote in this notebook was **plumbing** \u2014 stacking columns into arrays, reshaping tensors, saving `.npy` files. That's the story: once a benchmark pins down the contract, integrating an ML method against it is mostly a matter of connecting pipes that already exist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflect before moving on\n", + "\n", + "1. Look at the side-by-side figure. If you had to write a short paragraph in a paper claiming your generator \"works,\" what evidence in that figure would you cite \u2014 and what would a sceptical reviewer push back on?\n", + "2. We trained the model to minimise pixel-MSE against the optimiser's designs. Name one thing a *lower* MSE could improve, and one thing a lower MSE says *nothing* about.\n", + "3. The generator runs in milliseconds; the optimiser takes seconds. At what break-even number of scenarios does \"train a generator once\" start paying off compared to just running the optimiser each time?\n", + "\n", + "## Next\n", + "\n", + "In **Notebook 02** we run the physics simulator on every design this notebook produced, check them against the benchmark's constraints, and turn the intuitions above into actual numbers \u2014 objective gaps, feasibility rates, diversity proxies. *That* is what lets us say whether the generator \"works.\"" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "EngiBench312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/simple/02_evaluating_your_generated_designs.ipynb b/workshops/dcc26/simple/02_evaluating_your_generated_designs.ipynb new file mode 100644 index 00000000..e007d490 --- /dev/null +++ b/workshops/dcc26/simple/02_evaluating_your_generated_designs.ipynb @@ -0,0 +1,637 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 02 — Evaluating your generated designs\n", + "\n", + "*A guided tour, not an exercise sheet. Just run the cells top to bottom and read the prose between them.*\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "By the end of **Notebook 01** you had a stack of generated designs sitting in a folder. They *look* like beams — sort of. The optimiser's designs look sharper. The generator's designs look blurrier. Do those two sentences tell us anything useful?\n", + "\n", + "Not really. A picture tells you a design is *plausible*. A picture does not tell you:\n", + "\n", + "- Whether the design **obeys the physical rules** (the constraints).\n", + "- Whether the design is **actually stiff** under the load it was designed for.\n", + "- Whether the model is producing **varied** designs or secretly copying one.\n", + "- Whether a generated design is **a better starting point** for the classical optimiser than a blank slate.\n", + "\n", + "Those are four engineering questions, not four ML questions, and every one of them has an answer on the same `problem` object we used in Notebook 00. This notebook just asks them, one by one, and reports what the benchmark says." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What this notebook is — and isn't\n", + "\n", + "This is *not* a survey of every generative-model metric in the literature. There are many (MMD, DPP, FID, coverage, precision/recall, …) and a real benchmark report would use several. We're going to stick with the four questions above because each one maps **directly** onto a single method on `problem` that you already met in Notebook 00:\n", + "\n", + "| Engineering question | Who answers it |\n", + "|----------------------------------------------|---------------------|\n", + "| Does the design obey the rules? | `problem.check_constraints(...)` |\n", + "| Does the design actually work? | `problem.simulate(...)` |\n", + "| Is the model producing varied designs? | *(one line of NumPy)* |\n", + "| Does it help the classical optimiser? | `problem.optimize(...)` |\n", + "\n", + "That's the pedagogical point: **the benchmark's evaluation interface is the same interface we've been using the whole time.** Once the problem's methods are nailed down, scoring an ML method against them is mechanical." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` and `engiopt` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # flip to True to force install locally\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"tqdm\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import random\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import torch as th\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "from engiopt.cgan_cnn_2d.cgan_cnn_2d import Generator as CGAN2DGenerator\n", + "from engiopt.workshops.dcc26.notebook_helpers import (\n", + " TrainingConfig,\n", + " WorkshopGenerator,\n", + " generate_designs,\n", + " mean_pairwise_l2,\n", + " show_feasibility_bars,\n", + " show_objective_comparison,\n", + " show_optimization_trajectories,\n", + " show_pairwise_distance_heatmap,\n", + " train_supervised_generator,\n", + ")\n", + "\n", + "SEED = 7\n", + "random.seed(SEED); np.random.seed(SEED); th.manual_seed(SEED)\n", + "\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 0 — *Load or rebuild what Notebook 01 produced*\n", + "\n", + "Notebook 01 normally saves three files into an artifacts folder:\n", + "\n", + "- `generated_designs.npy` — the generator's outputs on held-out test scenarios.\n", + "- `baseline_designs.npy` — the optimiser's answers for those same scenarios.\n", + "- `conditions.json` — the scenarios themselves.\n", + "\n", + "In Colab, participants sometimes open Notebook 02 in a fresh runtime or lose the `/content/dcc26_artifacts` folder. If those files are missing, this cell **rebuilds the same lightweight Notebook 01 artifact pipeline**: it loads `beams2d`, trains the same workshop CGAN-CNN for a short run, generates 24 designs, and writes the three files.\n", + "\n", + "That rebuild cell can take a few minutes, especially without a GPU. It is expected; it is not a problem with the notebook.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ARTIFACT_DIR = (\n", + " Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules\n", + " else Path(\"workshops/dcc26/artifacts\")\n", + ")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "problem = BUILTIN_PROBLEMS[\"beams2d\"](seed=SEED)\n", + "\n", + "\n", + "def _rebuild_notebook01_artifacts(artifact_dir: Path) -> None:\n", + " \"\"\"Recreate the Simple Notebook 01 train/generate/export path.\"\"\"\n", + " print(\"Rebuilding Notebook 01 artifacts in this runtime.\")\n", + " print(\"This trains the same lightweight workshop generator and may take a few minutes.\")\n", + "\n", + " random.seed(SEED)\n", + " np.random.seed(SEED)\n", + " th.manual_seed(SEED)\n", + " if th.cuda.is_available():\n", + " th.cuda.manual_seed_all(SEED)\n", + "\n", + " train_ds = problem.dataset[\"train\"]\n", + " test_ds = problem.dataset[\"test\"]\n", + " condition_keys = problem.conditions_keys\n", + " design_shape = problem.design_space.shape\n", + " n_conds = len(condition_keys)\n", + "\n", + " conds_np = np.stack(\n", + " [np.array(train_ds[k]).astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + " )\n", + " designs_np = np.array(train_ds[\"optimal_design\"]).astype(np.float32)\n", + " targets_np = designs_np * 2.0 - 1.0\n", + "\n", + " latent_dim = 32\n", + " epochs = 10\n", + " rng = np.random.default_rng(SEED)\n", + "\n", + " cnn_gen = CGAN2DGenerator(\n", + " latent_dim=latent_dim,\n", + " n_conds=n_conds,\n", + " design_shape=design_shape,\n", + " )\n", + " model = WorkshopGenerator(cnn_gen).to(DEVICE)\n", + "\n", + " train_cfg = TrainingConfig(\n", + " latent_dim=latent_dim,\n", + " epochs=epochs,\n", + " batch_size=64,\n", + " lr=2e-4,\n", + " device=DEVICE,\n", + " snapshot_at_epochs=[],\n", + " verbose=True,\n", + " )\n", + " result = train_supervised_generator(\n", + " model,\n", + " conds_np,\n", + " targets_np,\n", + " config=train_cfg,\n", + " snapshot_conditions=None,\n", + " )\n", + " print(f\"Final rebuild loss after {epochs} epochs: {result['losses'][-1]:.5f}\")\n", + "\n", + " n_samples = 24\n", + " test_idx = rng.choice(len(test_ds), size=n_samples, replace=False)\n", + " test_conds_np = np.stack(\n", + " [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + " )\n", + " baseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\n", + " gen_designs = generate_designs(\n", + " model,\n", + " test_conds_np,\n", + " latent_dim=latent_dim,\n", + " device=DEVICE,\n", + " )\n", + " conditions_records = [\n", + " {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n", + " for i in range(n_samples)\n", + " ]\n", + "\n", + " np.save(artifact_dir / \"generated_designs.npy\", gen_designs)\n", + " np.save(artifact_dir / \"baseline_designs.npy\", baseline_designs)\n", + " with open(artifact_dir / \"conditions.json\", \"w\") as f:\n", + " json.dump(conditions_records, f, indent=2)\n", + " print(f\"Rebuilt artifacts in {artifact_dir}.\")\n", + "\n", + "\n", + "required = [\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\"]\n", + "missing = [f for f in required if not (ARTIFACT_DIR / f).exists()]\n", + "if missing:\n", + " print(f\"Missing {missing} in {ARTIFACT_DIR}.\")\n", + " print(\"Running the Notebook 01 artifact rebuild now so Notebook 02 can continue.\")\n", + " _rebuild_notebook01_artifacts(ARTIFACT_DIR)\n", + "else:\n", + " print(f\"Found Notebook 01 artifacts in {ARTIFACT_DIR}.\")\n", + "\n", + "\n", + "gen_designs = np.load(ARTIFACT_DIR / \"generated_designs.npy\")\n", + "baseline_designs = np.load(ARTIFACT_DIR / \"baseline_designs.npy\")\n", + "with open(ARTIFACT_DIR / \"conditions.json\") as f:\n", + " conditions = json.load(f)\n", + "\n", + "print(f\"Generated designs : {gen_designs.shape}\")\n", + "print(f\"Baseline designs : {baseline_designs.shape}\")\n", + "print(f\"Scenarios : {len(conditions)} (keys = {list(conditions[0].keys())})\")\n", + "print(f\"Artifact dir : {ARTIFACT_DIR}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 1 — *Does the design obey the rules?*\n", + "\n", + "This is the **feasibility** question. A generator can produce a beam that looks reasonable but quietly uses twice the material budget, or has density values outside the physical range, or breaks some solver-stability rule you didn't think to check. Low training loss gives you no protection against any of those.\n", + "\n", + "In Notebook 00 we met `problem.check_constraints(design, config)`: it runs every `THEORY` and `IMPL` constraint the benchmark ships with and returns the ones that fired. Here we just call it once per generated design and count how many scenarios come back clean." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def feasibility_count(designs, configs):\n", + " feasible_flags = []\n", + " for d, cfg in zip(designs, configs):\n", + " # len(violations) == 0 means every constraint passed.\n", + " violations = problem.check_constraints(design=d, config=cfg)\n", + " feasible_flags.append(len(violations) == 0)\n", + " return np.array(feasible_flags)\n", + "\n", + "\n", + "gen_feasible = feasibility_count(gen_designs, conditions)\n", + "base_feasible = feasibility_count(baseline_designs, conditions)\n", + "\n", + "print(f\"Generated feasible : {gen_feasible.sum()} / {len(gen_feasible)} \"\n", + " f\"({gen_feasible.mean()*100:.0f}%)\")\n", + "print(f\"Baseline feasible : {base_feasible.sum()} / {len(base_feasible)} \"\n", + " f\"({base_feasible.mean()*100:.0f}%)\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Same numbers, but as a bar chart so the ratio is obvious at a glance.\n", + "show_feasibility_bars(pd.DataFrame({\n", + " \"gen_feasible\": gen_feasible,\n", + " \"base_feasible\": base_feasible,\n", + "}))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Which rules are being broken?\n", + "\n", + "Counting feasibility tells us *how many* designs failed, not *why*. `problem.check_constraints(...)` returns a violation list — each entry has the constraint's name and a human-readable message. Tallying those names across the whole generator output set tells us which rule the model is breaking most often, which is the actionable signal: a `volume_fraction_bound` failure means *change the loss/conditioning*, while an `IMPL` solver-stability failure means *clamp the output range*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from collections import Counter\n", + "\n", + "def violation_breakdown(designs, configs):\n", + " counts = Counter()\n", + " examples = {} # one example message per constraint name\n", + " cats = {} # THEORY / IMPL / ... per name\n", + " for d, cfg in zip(designs, configs):\n", + " for v in problem.check_constraints(design=d, config=cfg).violations:\n", + " name = v.constraint.check.__name__\n", + " counts[name] += 1\n", + " examples.setdefault(name, v.cause)\n", + " cats.setdefault(name, str(v.constraint.categories))\n", + " return counts, examples, cats\n", + "\n", + "\n", + "gen_counts, gen_examples, gen_cats = violation_breakdown(gen_designs, conditions)\n", + "\n", + "if not gen_counts:\n", + " print(\"No constraint violations on the generated set — nothing to break down.\")\n", + "else:\n", + " n = len(gen_designs)\n", + " print(f\"Violations on the generated set ({n} designs total):\\n\")\n", + " for name, k in gen_counts.most_common():\n", + " print(f\" {k:3d} / {n} ({k/n*100:4.0f}%) [{gen_cats[name]}] {name}\")\n", + " top = gen_counts.most_common(1)[0][0]\n", + " print(f\"\\nExample cause for '{top}':\\n {gen_examples[top]}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**How to read this.** The baseline is the optimiser's output — by construction it should pass almost every constraint. If the generator's bar is noticeably shorter than the baseline's, the model is learning *something that looks like a beam* but not *something that obeys the benchmark rules*. That's a real failure mode and no amount of prettier pictures will fix it — you need to change the loss, the conditioning, or the architecture.\n", + "\n", + "Notice the asymmetry: a design that's infeasible is disqualified regardless of how good its objective looks. **Feasibility gates performance.** We check it first for that reason." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 2 — *Does the design actually work?*\n", + "\n", + "This is the **performance** question. Among the designs that passed feasibility, how stiff are they *really* — measured by the physics simulator, not by pixel loss?\n", + "\n", + "In Notebook 00 we met `problem.simulate(design, config)`. It takes seconds on `beams2d` and returns the engineering objective (compliance — lower is better). We run it on each generated design *and* on the matching baseline design under the *same* scenario, so we can compare apples to apples." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rows = []\n", + "for i, (g, b, cfg) in enumerate(zip(gen_designs, baseline_designs, conditions)):\n", + " problem.reset(seed=SEED + i)\n", + " g_obj = float(problem.simulate(g, config=cfg)[0])\n", + " problem.reset(seed=SEED + i)\n", + " b_obj = float(problem.simulate(b, config=cfg)[0])\n", + " rows.append({\n", + " \"sample\": i,\n", + " \"gen_obj\": g_obj,\n", + " \"base_obj\": b_obj,\n", + " \"gen_minus_base\": g_obj - b_obj,\n", + " \"gen_feasible\": bool(gen_feasible[i]),\n", + " \"base_feasible\": bool(base_feasible[i]),\n", + " })\n", + "\n", + "results = pd.DataFrame(rows)\n", + "results.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The two plots below show the same numbers in two ways. The **histogram** asks: do the two objective distributions sit on top of each other, or has the generator shifted right (higher compliance = worse)? The **scatter** asks: for each individual scenario, is the generated design above or below the `y = x` diagonal? Points *below* the diagonal are scenarios where the generator *beat* the optimiser — a rare but real thing on a well-trained model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_comparison(results)\n", + "\n", + "mean_gap = results[\"gen_minus_base\"].mean()\n", + "win_rate = float((results[\"gen_obj\"] < results[\"base_obj\"]).mean())\n", + "print(f\"Mean objective gap (gen − base): {mean_gap:+.1f} \"\n", + " f\"(positive = generator is worse on average)\")\n", + "print(f\"Generator beats baseline on : {win_rate*100:.0f}% of scenarios\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**How to read this.** For a *simple* supervised-MSE generator like ours, a positive mean gap is expected — the optimiser is a very strong baseline, and ten epochs of MSE training won't catch it. The number we'd care about in a paper is *how big* that gap is relative to the typical objective value, whether it stays stable across re-training with different seeds, and whether a more sophisticated model (GAN, diffusion, …) closes it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 3 — *Is the model producing varied designs, or one design 24 times?*\n", + "\n", + "This is the **diversity** question, and it's the one that doesn't need a physics call — it's a property of the generated set itself. A generative model that collapses to a single beam topology gets a low training loss (average over the dataset looks fine) but is useless for exploration.\n", + "\n", + "The crudest-but-useful measure is *mean pairwise L2 distance* between all generated designs: average how different any two outputs are. We compute the same number for the baseline set as a sanity reference — the baseline comes from an optimiser run on diverse scenarios, so it naturally spreads out." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gen_div = mean_pairwise_l2(gen_designs)\n", + "base_div = mean_pairwise_l2(baseline_designs)\n", + "\n", + "print(f\"Mean pairwise L2 — generated : {gen_div:.2f}\")\n", + "print(f\"Mean pairwise L2 — baseline : {base_div:.2f}\")\n", + "print(f\"Ratio (gen / base) : {gen_div / base_div:.2f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The heatmap makes collapse visible — a big dark block means a cluster of\n", + "# near-duplicates. A mostly-uniform warm plot means healthy variety.\n", + "show_pairwise_distance_heatmap(gen_designs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**How to read this.** If the generator's diversity is ≳ the baseline's, the model is exploring at least as widely as the optimiser. If it's much lower — say under half — the model is partially collapsing, and the scatter plot in Part 2 is lying to you: \"objective close to baseline\" might mean \"objective close to *one* baseline, because the model is always outputting that one beam.\"\n", + "\n", + "This is why diversity has to be reported *alongside* the objective, not instead of it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 4 — *Does the generator actually speed up the optimiser?*\n", + "\n", + "This is the question that, if the answer is yes, justifies the whole pipeline.\n", + "\n", + "Recall the argument from Notebook 01: the optimiser is slow, the generator is fast, and the dream is to amortise. But there's a gentler version of the same dream — even if the generator's designs aren't quite optimal, they might be *a much better starting point* for the classical optimiser than a blank slate. If so, then running *(generator → optimiser)* gets you the optimiser's quality in a fraction of its normal iterations.\n", + "\n", + "We test this with a tiny demo: pick three scenarios, feed the generator's design into `problem.optimize(...)` as the starting point, and watch the compliance curve. We compare where the trajectory *starts* (that's what the generator gave us for free) and where it *ends* (that's where the optimiser drives it) against the baseline's compliance for the same scenario.\n", + "\n", + "> **Heads-up:** `problem.optimize(...)` runs the real FEM topology optimiser — each scenario takes ~30 seconds depending on hardware." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "N_WARMSTART_DEMO = 3\n", + "\n", + "opt_data = []\n", + "for i in range(min(N_WARMSTART_DEMO, len(gen_designs))):\n", + " cfg = dict(conditions[i])\n", + "\n", + " # Run the optimiser starting from the GENERATED design — this is the warmstart.\n", + " problem.reset(seed=SEED + i)\n", + " _, history = problem.optimize(gen_designs[i], config=cfg)\n", + " trajectory = [float(step.obj_values[0]) for step in history]\n", + "\n", + " # The compliance of the ORIGINAL baseline design — the target to hit.\n", + " problem.reset(seed=SEED + i)\n", + " base_obj = float(problem.simulate(baseline_designs[i], config=cfg)[0])\n", + "\n", + " opt_data.append({\n", + " \"sample_idx\": i,\n", + " \"obj_trajectory\": trajectory,\n", + " \"base_obj\": base_obj,\n", + " })\n", + " print(f\"Sample {i}: start = {trajectory[0]:8.1f}, end = {trajectory[-1]:8.1f}, \"\n", + " f\"baseline = {base_obj:8.1f} ({len(trajectory)} optimiser steps)\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_optimization_trajectories(opt_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**How to read this.** The three numbers printed at the top of each panel are the **optimality gaps** a serious benchmark would report:\n", + "\n", + "- **IOG** (Initial Optimality Gap) — how far the *generated starting point* is from the baseline. Small or negative means the model already gave the optimiser something close to the answer.\n", + "- **FOG** (Final Optimality Gap) — how far the *optimiser's output from that start* is from the baseline. Close to zero means the warmstart didn't trap the optimiser in a bad local minimum.\n", + "- **COG** (Cumulative Optimality Gap) — the shaded area. Small means the optimiser converged quickly from this start.\n", + "\n", + "For our MSE generator, IOG is usually awful (pictures are blurry) but FOG recovers fast — the optimiser does its job. The interesting question for a better model is whether you can shrink *both*: a generator whose outputs are already near-optimal *and* stay there after a handful of optimiser steps would be a genuine speedup." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Putting it together\n", + "\n", + "Four questions, four numbers (or small sets of numbers). If you were writing a one-row benchmark table for a paper, this is essentially what would go in it:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "summary = pd.DataFrame([{\n", + " \"feasible %\": f\"{gen_feasible.mean()*100:.0f}%\",\n", + " \"mean obj gap (gen−base)\": f\"{results['gen_minus_base'].mean():+.1f}\",\n", + " \"win rate vs baseline\": f\"{(results['gen_obj'] < results['base_obj']).mean()*100:.0f}%\",\n", + " \"diversity (L2)\": f\"{gen_div:.2f}\",\n", + " \"baseline diversity (L2)\": f\"{base_div:.2f}\",\n", + " \"warmstart IOG (mean of demo)\": f\"{np.mean([d['obj_trajectory'][0] - d['base_obj'] for d in opt_data]):+.1f}\",\n", + " \"warmstart FOG (mean of demo)\": f\"{np.mean([d['obj_trajectory'][-1] - d['base_obj'] for d in opt_data]):+.1f}\",\n", + "}]).T.rename(columns={0: \"value\"})\n", + "summary" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Look at those seven numbers side by side. A row like this is what reviewers, collaborators, and future-you actually need — **not** a pretty grid of designs, and **not** a training loss. Every field answered a question the design-pictures-alone could not.\n", + "\n", + "More importantly: every single field came from a method we already had in Notebook 00. No new infrastructure. The benchmark contract *is* the evaluation contract." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## What we deliberately skipped\n", + "\n", + "A publication-grade evaluation would add, at minimum:\n", + "\n", + "- **Distributional metrics** (MMD, DPP, FID, coverage/precision-recall) — do the *two distributions* of designs match, not just pairs?\n", + "- **Novelty against the training set** — is the model generalising, or quietly copy-pasting?\n", + "- **Multiple seeds** — any single training run lies; report mean ± std over 3–5 seeds.\n", + "- **Many more warmstart scenarios** — 3 is a demo, not a statistic.\n", + "- **Per-scenario breakdowns** — does the generator fail uniformly, or only on certain condition ranges?\n", + "\n", + "Every one of those additions uses the *same* `problem` methods, just in different aggregations. The full pipeline in the companion notebook `02_evaluate_metrics.ipynb` walks through them." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflect before moving on\n", + "\n", + "1. Look at the seven-row summary. Which single number would change your mind the most if it were very good, or very bad? Why that one and not the others?\n", + "2. Suppose the feasibility rate is high but the mean objective gap is also high. What does that mean about the generator — is it being too *safe*, or too *dumb*, and how would you tell the difference?\n", + "3. The warmstart demo compared *generator-started* optimisation to a baseline. What would be a fairer comparison for claiming the generator *helps* the optimiser? (Hint: Notebook 00's `problem.optimize(start, cfg)` started from a uniform field.)\n", + "\n", + "## Next\n", + "\n", + "You've now seen the full workflow of benchmark-driven research in engineering design: **consume** a benchmark (Notebook 00), **train against** it (Notebook 01), and **evaluate on** it (this notebook). **Notebook 03** flips the perspective and shows what it takes to *build* a new EngiBench problem of your own — answering the same eight researcher's questions from Notebook 00, but in code you write yourself." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "EngiBench312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/simple/03_writing_your_own_problem.ipynb b/workshops/dcc26/simple/03_writing_your_own_problem.ipynb new file mode 100644 index 00000000..f686e48d --- /dev/null +++ b/workshops/dcc26/simple/03_writing_your_own_problem.ipynb @@ -0,0 +1,700 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 03 — Writing your own design problem\n", + "\n", + "*A guided tour, not an exercise sheet. Just run the cells top to bottom and read the prose between them.*\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "In **Notebook 00** we walked through the `beams2d` problem and wrote down the eight things any engineering benchmark has to pin down. Notebooks 01 and 02 then consumed that benchmark to train and evaluate a model.\n", + "\n", + "This notebook flips the perspective. Instead of *consuming* a benchmark, we're going to **write one**. That means we, the researcher, have to answer the same eight questions in code — and package the answers so another lab can import our problem with a single line and get the exact same thing we did.\n", + "\n", + "We'll deliberately pick a problem that is **as simple as possible while still being engineering**: no FEM, no images, no meshes. Just a closed-form physics formula you could do on a napkin. That way nothing distracts from the point of this notebook, which is the *contract* — the interface a benchmark has to honour to be reusable.\n", + "\n", + "**You do not need any ML background for this notebook.** Just high-school algebra and a willingness to look at a few lines of Python." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A tiny engineering problem to anchor the discussion\n", + "\n", + "Imagine a colleague walks into your office and says:\n", + "\n", + "> *\"I have a cantilever beam bolted to a wall with a weight hanging off the end. I want the **lightest** rectangular cross-section that doesn't break and doesn't flex more than the spec allows. Can ML help me?\"*\n", + "\n", + "Picture it:\n", + "\n", + "```\n", + " wall\n", + " |\n", + " |==================== | ← end of beam\n", + " | length L |\n", + " ↓ P (tip load)\n", + "\n", + " cross-section:\n", + " ┌─────────┐\n", + " │ │ height h\n", + " └─────────┘\n", + " width b\n", + "```\n", + "\n", + "We're going to turn that sentence into a proper benchmark problem. The eight things we pinned down in Notebook 00 are exactly what we need to answer now — but this time we're the ones supplying the answers:\n", + "\n", + "1. **What am I allowed to design?** — a cross-section `(h, b)`.\n", + "2. **Under what scenarios must it work?** — a tip load `P` and a length `L`.\n", + "3. **What makes one design better?** — less **mass**.\n", + "4. **When is a candidate invalid?** — if the beam breaks (stress too high) or flexes too much (tip deflection too large).\n", + "5. **Is there prior data?** — no, we're the first ones to define this benchmark.\n", + "6. **Can I see a design?** — yes, we'll draw the cross-section and annotate its physics.\n", + "7. **How do I score a candidate?** — plug `(h, b, P, L)` into three high-school physics formulas.\n", + "8. **What do I have to beat?** — a trivial random-search baseline we'll write ourselves.\n", + "\n", + "The rest of this notebook walks through those answers one at a time, turning each one into a piece of Python, and finally bundles them into a single reusable `Problem` class.\n", + "\n", + "### The physics, on a napkin\n", + "\n", + "For a rectangular cantilever of height `h` and width `b`, loaded with force `P` at the tip of a beam of length `L`, three numbers matter (from Euler–Bernoulli beam theory — you do not need to derive these):\n", + "\n", + "- **Max bending stress** at the wall: `σ = 6 · P · L / (b · h²)`\n", + "- **Tip deflection**: `δ = 4 · P · L³ / (E · b · h³)`\n", + "- **Mass**: `m = ρ · L · b · h`\n", + "\n", + "where `E` is Young's modulus and `ρ` is density — both constants of the material (steel)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # flip to True to force install locally\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + " _pip([\"engibench[all]\", \"matplotlib\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Annotated\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from gymnasium import spaces\n", + "\n", + "from engibench.core import Problem, ObjectiveDirection, OptiStep\n", + "from engibench.constraint import bounded, constraint, THEORY\n", + "\n", + "SEED = 7\n", + "rng = np.random.default_rng(SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 1 — *What am I allowed to design?*\n", + "\n", + "First we have to describe the **shape of the output**: what, precisely, does a designer (or a model) hand back to us? For this problem a design is just two numbers — the cross-section's height `h` and width `b`, in metres.\n", + "\n", + "EngiBench (like Gymnasium / OpenAI Gym) represents this with a **`Box` space**: a bounded box in ℝⁿ. Every valid design has to live inside this box. Pinning this down matters because the moment you hook up a generative model, its output layer has to target *exactly* this shape and these bounds — no ambiguity allowed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Bounds chosen so \"obviously sensible\" cross-sections (1–20 cm) all fit.\n", + "design_space = spaces.Box(\n", + " low=np.array([0.02, 0.01], dtype=np.float32), # h_min = 2 cm, b_min = 1 cm\n", + " high=np.array([0.20, 0.10], dtype=np.float32), # h_max = 20 cm, b_max = 10 cm\n", + " dtype=np.float32,\n", + ")\n", + "\n", + "print(\"Design space:\", design_space)\n", + "print(\"Shape: \", design_space.shape, \" (two scalars: [h, b])\")\n", + "print(\"Sample: \", design_space.sample())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Reading that output.** A design is a **length-2 float array** in fixed bounds. That's the contract: anything — a hand-picked design, a classical optimiser's result, a neural-network output — has to land inside this box. Nothing else counts as a valid submission." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 2 — *Under what conditions must my design work?*\n", + "\n", + "A cantilever beam isn't just *\"a beam\"*. It's a beam holding **a specific load at a specific length**. Change either one and the right cross-section changes with it.\n", + "\n", + "EngiBench asks us to declare those *operating conditions* up front as a **dataclass** — one field per scenario parameter, with bounds attached via `bounded(...)`. The bounds aren't a suggestion: `check_constraints()` will refuse any scenario that falls outside them, the same way it does for `beams2d`'s `volfrac`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class CantileverConditions:\n", + " # Tip load in Newtons (≈ 10 kg to 1 ton hanging off the end).\n", + " load_N: Annotated[float, bounded(lower=100.0, upper=10_000.0)] = 1000.0\n", + " # Beam length in metres.\n", + " length_m: Annotated[float, bounded(lower=0.2, upper=2.0)] = 1.0\n", + "\n", + "\n", + "cond = CantileverConditions()\n", + "print(\"A default scenario:\", cond)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Two fields, two annotated ranges. That's the whole scenario description. Every evaluation from here on will be *relative to a specific `(load_N, length_m)` pair* — never \"just a beam\"." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 3 — *What does \"better\" actually mean?*\n", + "\n", + "The colleague asked for the **lightest** beam. Mass is a single scalar we want to push **down**. So the objective is one name plus one direction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "objectives = ((\"mass_kg\", ObjectiveDirection.MINIMIZE),)\n", + "print(\"Objectives:\", objectives)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that `mass_kg` is a **physics quantity**, not an ML loss. It has units. Two people comparing methods on this benchmark will always be comparing the same thing, measured in the same unit — which is the whole point of fixing an objective." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 4 — *How do I score a candidate?*\n", + "\n", + "This is the **simulator** — the function that takes `(design, conditions)` and returns the objective value. In the `beams2d` problem this is a real FEM solver that takes seconds. In our toy problem it's **three lines of algebra**, but the *role* it plays is identical: it is the final arbiter of quality, and the thing any method — ML or not — is ultimately judged by.\n", + "\n", + "We'll write it as a plain function first, then fold it into the `Problem` class at the end." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Material constants for structural steel — fixed, not designable.\n", + "E_PA = 200e9 # Young's modulus [Pa]\n", + "RHO_KGM3 = 7850.0 # Density [kg/m^3]\n", + "\n", + "\n", + "def cantilever_physics(h: float, b: float, load_N: float, length_m: float) -> dict:\n", + " \"\"\"Closed-form cantilever beam response. Returns stress, deflection, and mass.\"\"\"\n", + " stress_Pa = 6.0 * load_N * length_m / (b * h**2)\n", + " deflection_m = 4.0 * load_N * length_m**3 / (E_PA * b * h**3)\n", + " mass_kg = RHO_KGM3 * length_m * b * h\n", + " return {\"stress_Pa\": stress_Pa, \"deflection_m\": deflection_m, \"mass_kg\": mass_kg}\n", + "\n", + "\n", + "# Try a hefty cross-section under a 1 kN tip load on a 1 m beam:\n", + "r = cantilever_physics(h=0.06, b=0.03, load_N=1000.0, length_m=1.0)\n", + "print(f\"stress = {r['stress_Pa']/1e6:7.2f} MPa\")\n", + "print(f\"deflection= {r['deflection_m']*1000:7.2f} mm\")\n", + "print(f\"mass = {r['mass_kg']:7.2f} kg\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That's our \"simulator\". It is deterministic, fast, and trivially reproducible — exactly the properties a benchmark's scoring function needs. A bigger benchmark might hide a PDE solver behind this function signature, but the *contract* `(design, scenario) → objective` is the same." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## 5 — *When is a candidate design actually invalid?*\n", + "\n", + "Here's where ML reviewers tend to get caught out: a generative model can hit low training loss and still emit designs that are **nonsense** in the physical world. A benchmark has to protect against this with an *independent* validity test that runs regardless of how a design was produced.\n", + "\n", + "For our cantilever, two physical rules must hold:\n", + "\n", + "| Rule | Formula | Category |\n", + "|---|---|---|\n", + "| Beam must not break under the load | `stress ≤ 250 MPa` (steel yield) | `THEORY` |\n", + "| Beam must not flex more than the spec | `deflection ≤ L / 250` (standard serviceability limit) | `THEORY` |\n", + "\n", + "EngiBench calls each of these a **constraint**, written as a tiny function that *asserts* the rule must hold. If the assertion trips, `check_constraints()` collects it as a violation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "SIGMA_ALLOW_PA = 250e6 # Steel yield stress [Pa]\n", + "DEFLECTION_LIMIT_RATIO = 1.0 / 250.0 # Serviceability limit: delta <= L/250\n", + "\n", + "\n", + "@constraint(categories=THEORY)\n", + "def stress_ok(design: np.ndarray, load_N: float, length_m: float, **_) -> None:\n", + " \"\"\"The beam must not yield under the tip load.\"\"\"\n", + " h, b = float(design[0]), float(design[1])\n", + " stress = 6.0 * load_N * length_m / (b * h**2)\n", + " assert stress <= SIGMA_ALLOW_PA, (\n", + " f\"stress {stress/1e6:.1f} MPa exceeds yield {SIGMA_ALLOW_PA/1e6:.0f} MPa\"\n", + " )\n", + "\n", + "\n", + "@constraint(categories=THEORY)\n", + "def deflection_ok(design: np.ndarray, load_N: float, length_m: float, **_) -> None:\n", + " \"\"\"The tip must not deflect more than L/250.\"\"\"\n", + " h, b = float(design[0]), float(design[1])\n", + " delta = 4.0 * load_N * length_m**3 / (E_PA * b * h**3)\n", + " limit = length_m * DEFLECTION_LIMIT_RATIO\n", + " assert delta <= limit, (\n", + " f\"deflection {delta*1000:.2f} mm exceeds limit {limit*1000:.2f} mm\"\n", + " )\n", + "\n", + "\n", + "print(\"Constraints defined:\", stress_ok.check.__name__, \",\", deflection_ok.check.__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Each constraint is just a function that takes the design + the scenario and asserts a physical rule. No ML here — it's a signed-off physics test that any design has to pass, no matter how it was produced.\n", + "\n", + "Crucially, these live **separately from the simulator**. If we only checked the rules inside `simulate()`, a clever-but-wrong model could game the scoring function. An independent validity test is what keeps papers honest." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "---\n## 6 — *What's the strongest non-ML baseline?*\n\nA benchmark without a reference number to beat is useless: nobody can tell whether a new method is *better* than something that already existed. So before we wrap everything up, we owe the benchmark two more pieces:\n\n- a way to **sample a random design** from the design box (a sensible starting point for anything, and the building block for generating a reference dataset later);\n- a **simple optimiser** that takes a starting design and drives it toward lower mass while still obeying the rules.\n\nWe'll write both as standalone functions first. The only reason they end up as class methods later is so every `Problem` in EngiBench exposes them under the same names (`random_design`, `optimize`)." + }, + { + "cell_type": "markdown", + "source": "### 6.1 A uniform random sample from the design box\n\nThis one is almost embarrassingly simple: pick `(h, b)` uniformly inside the bounds we declared in Section 1. That's the whole idea.", + "metadata": {} + }, + { + "cell_type": "code", + "source": "# Three uniform random picks from the design box:\nfor _ in range(3):\n d = rng.uniform(design_space.low, design_space.high).astype(np.float32)\n print(f\" random design: h = {d[0]*1000:5.1f} mm, b = {d[1]*1000:5.1f} mm\")", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": "That's all `random_design()` will do inside the `Problem` class — it just returns a uniform sample from the design box. Most of these won't satisfy our physical rules, and that's fine: *fixing them up* is what the optimiser is for.\n\n### 6.2 A reference optimiser — feasible random-perturbation search\n\nThe simplest possible classical optimiser that still respects constraints is:\n\n> *Start somewhere. Nudge the current best design by a small random vector. Throw the nudge out if it breaks a rule. Keep the nudge if it lowers the objective. Repeat.*\n\nNo gradients. No solver. Just **feasibility** and **comparison**. Written as a recipe:\n\n1. Start from a design; record its mass if feasible, else `∞`.\n2. For each step, sample a Gaussian perturbation of the current best and clip back into the design box.\n3. If the candidate violates a constraint, keep the current best and move on.\n4. Otherwise score the candidate; if its mass beats the incumbent, adopt it.\n5. Append the best-mass-so-far to a history list — that list is the curve you plot.\n\nWe first need a tiny helper that asks *\"does this design pass both `@constraint` rules?\"*. Then the search loop itself is about a dozen lines.", + "metadata": {} + }, + { + "cell_type": "code", + "source": "def is_feasible(design, load_N, length_m):\n \"\"\"Run both @constraint functions; return True iff neither raises.\"\"\"\n kwargs = {\"design\": design, \"load_N\": load_N, \"length_m\": length_m}\n return not any(c.check_dict(kwargs) for c in (stress_ok, deflection_ok))\n\n\ndef random_search(start, load_N, length_m, n_steps=500, step_scale=(0.01, 0.005)):\n \"\"\"Nudge, clip, reject-if-infeasible, keep-if-better. Returns (best_design, mass_curve).\"\"\"\n step_scale = np.array(step_scale, dtype=np.float32)\n best = np.clip(start, design_space.low, design_space.high).astype(np.float32)\n if is_feasible(best, load_N, length_m):\n best_mass = cantilever_physics(best[0], best[1], load_N, length_m)[\"mass_kg\"]\n else:\n best_mass = float(\"inf\")\n curve = [best_mass]\n\n for _ in range(n_steps):\n cand = best + rng.normal(size=2).astype(np.float32) * step_scale\n cand = np.clip(cand, design_space.low, design_space.high)\n if is_feasible(cand, load_N, length_m):\n m = cantilever_physics(cand[0], cand[1], load_N, length_m)[\"mass_kg\"]\n if m < best_mass:\n best, best_mass = cand, m\n curve.append(best_mass)\n return best, curve\n\n\nstart = np.array([0.15, 0.08], dtype=np.float32) # deliberately heavy starting point\nbest_standalone, curve = random_search(start, load_N=1000.0, length_m=1.0, n_steps=500)\n\nplt.figure(figsize=(5, 3))\nplt.plot(curve)\nplt.xlabel(\"step\"); plt.ylabel(\"best mass so far [kg]\")\nplt.title(\"Standalone baseline search\"); plt.grid(alpha=0.3); plt.tight_layout(); plt.show()\n\nprint(f\"Lightest feasible beam found: {curve[-1]:.2f} kg at h={best_standalone[0]*1000:.1f} mm, b={best_standalone[1]*1000:.1f} mm\")", + "metadata": {}, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": "A few hundred random nudges already drove the mass from the starting point's ~94 kg down to a much smaller number — not bad for an algorithm with no gradients and roughly a dozen lines of code. That final plateau is our **reference**: any ML method proposing designs for this scenario has to reliably land below it to count as doing something useful.\n\nWhen we fold this loop into the `Problem` class in the next section, two cosmetic things change — the method is called `optimize()` (to match EngiBench's contract), and each step's history entry is wrapped as an `OptiStep(obj_values=..., step=...)` so other tooling can consume it. Same algorithm, richer wrapper.", + "metadata": {} + }, + { + "cell_type": "markdown", + "source": "---\n## 7 — *Putting it all together: a `Problem` class*\n\nWe now have every piece we need:\n\n- a `design_space` (what the output looks like)\n- a `Conditions` dataclass (what scenario we're evaluating against)\n- an `objectives` tuple (the scalar we're pushing on)\n- a simulator function (the scoring rule)\n- two `@constraint` functions (the validity rules)\n- a uniform sampler `random_design` and a random-search baseline `random_search`\n\nThe last step is to stitch them into one `Problem` subclass. That's the object other code (and other people) will import. Everything below gets wired into it as class attributes or methods. There's nothing new here — we're just collecting what we already have, under the method names EngiBench expects.", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CantileverBeamProblem(Problem[np.ndarray]):\n", + " \"\"\"A minimal analytical benchmark: size a rectangular cantilever for minimum mass.\"\"\"\n", + "\n", + " version = 0\n", + " objectives = ((\"mass_kg\", ObjectiveDirection.MINIMIZE),)\n", + "\n", + " # The scenario (what the designer doesn't get to pick).\n", + " @dataclass\n", + " class Conditions:\n", + " load_N: Annotated[float, bounded(lower=100.0, upper=10_000.0)] = 1000.0\n", + " length_m: Annotated[float, bounded(lower=0.2, upper=2.0)] = 1.0\n", + "\n", + " # `Config` = the scenario plus any solver-only knobs (we have none).\n", + " @dataclass\n", + " class Config(Conditions):\n", + " max_iter: Annotated[int, bounded(lower=1, upper=10_000)] = 500\n", + "\n", + " dataset_id = \"IDEALLab/cantilever_toy_v0\" # placeholder — no dataset published\n", + " container_id = None\n", + "\n", + " def __init__(self, seed: int = 0, **kwargs):\n", + " super().__init__(seed=seed)\n", + " self.config = self.Config(**kwargs)\n", + " self.conditions = self.Conditions(\n", + " load_N=self.config.load_N,\n", + " length_m=self.config.length_m,\n", + " )\n", + " self.design_space = spaces.Box(\n", + " low=np.array([0.02, 0.01], dtype=np.float32),\n", + " high=np.array([0.20, 0.10], dtype=np.float32),\n", + " dtype=np.float32,\n", + " )\n", + " self.design_constraints = (stress_ok, deflection_ok)\n", + "\n", + " # --- The scoring rule ---------------------------------------------------\n", + " def simulate(self, design: np.ndarray, config: dict | None = None) -> np.ndarray:\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " h, b = float(design[0]), float(design[1])\n", + " mass = RHO_KGM3 * cfg[\"length_m\"] * b * h\n", + " return np.array([mass], dtype=np.float32)\n", + "\n", + " # --- A uniformly random sample inside the design box -------------------\n", + " def random_design(self):\n", + " design = self.np_random.uniform(\n", + " self.design_space.low, self.design_space.high\n", + " ).astype(np.float32)\n", + " return design, -1 # -1 = \"not sampled from any dataset\"\n", + "\n", + " # --- The classical baseline: simple random-perturbation search --------\n", + " def optimize(self, starting_point, config=None):\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " x = np.clip(starting_point, self.design_space.low, self.design_space.high).astype(np.float32)\n", + " best = x.copy()\n", + " best_obj = self.simulate(best, cfg)\n", + " feasible = not self.check_constraints(best, config=cfg)\n", + " best_score = float(best_obj[0]) if feasible else float(\"inf\")\n", + " history = [OptiStep(obj_values=best_obj, step=0)]\n", + "\n", + " step_scale = np.array([0.01, 0.005], dtype=np.float32) # 1 cm / 0.5 cm jitter\n", + " for i in range(int(cfg[\"max_iter\"])):\n", + " cand = best + self.np_random.normal(size=best.shape).astype(np.float32) * step_scale\n", + " cand = np.clip(cand, self.design_space.low, self.design_space.high)\n", + " if self.check_constraints(cand, config=cfg):\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + " continue\n", + " cand_obj = self.simulate(cand, cfg)\n", + " if float(cand_obj[0]) < best_score:\n", + " best, best_obj, best_score = cand.copy(), cand_obj, float(cand_obj[0])\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + " return best, history\n", + "\n", + " # --- How a human looks at a design ------------------------------------\n", + " def render(self, design: np.ndarray, *, open_window: bool = False):\n", + " cfg = self.config\n", + " h, b = float(design[0]), float(design[1])\n", + " phys = cantilever_physics(h, b, cfg.load_N, cfg.length_m)\n", + "\n", + " fig, ax = plt.subplots(figsize=(4.2, 4.2))\n", + " ax.add_patch(plt.Rectangle((-b/2, 0), b, h, facecolor=\"#4c78a8\", edgecolor=\"black\"))\n", + " pad = 0.12\n", + " ax.set_xlim(-pad, pad); ax.set_ylim(-0.02, 0.22)\n", + " ax.set_aspect(\"equal\"); ax.set_xlabel(\"width b [m]\"); ax.set_ylabel(\"height h [m]\")\n", + " stress_ratio = phys[\"stress_Pa\"] / SIGMA_ALLOW_PA\n", + " delta_limit = cfg.length_m * DEFLECTION_LIMIT_RATIO\n", + " delta_ratio = phys[\"deflection_m\"] / delta_limit\n", + " ax.set_title(\n", + " f\"h={h*1000:.1f} mm, b={b*1000:.1f} mm\\n\"\n", + " f\"stress = {phys['stress_Pa']/1e6:.1f} MPa ({stress_ratio*100:.0f}% of allow)\\n\"\n", + " f\"tip δ = {phys['deflection_m']*1000:.2f} mm ({delta_ratio*100:.0f}% of allow)\\n\"\n", + " f\"mass = {phys['mass_kg']:.2f} kg\",\n", + " fontsize=9,\n", + " )\n", + " plt.tight_layout()\n", + " if open_window:\n", + " plt.show()\n", + " return fig, ax\n", + "\n", + "\n", + "problem = CantileverBeamProblem(seed=SEED)\n", + "print(\"Created problem:\", type(problem).__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "That single class **is** our benchmark. From this point on, anything that worked on `beams2d` in Notebook 00 works on `problem` — same method names, same return shapes.\n", + "\n", + "Let's prove it by running through the same checklist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "---\n## 8 — *Use it exactly like `beams2d`*\n\n### 8.1 Inspect the interface" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(\"Design space: \", problem.design_space)\n", + "print(\"Objectives: \", problem.objectives)\n", + "print(\"Condition keys:\", problem.conditions_keys)\n", + "print(\"Conditions: \", problem.conditions)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you squint at those four lines next to the same four lines from Notebook 00, you'll see the same API — we just wrote a different problem behind it." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### 8.2 A feasible design — the scoring function + validity test agree\n\nLet's pick a sensible beefy cross-section, check it obeys the rules, and read off its mass." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "feasible_design = np.array([0.06, 0.03], dtype=np.float32) # h=6 cm, b=3 cm\n", + "scenario = {\"load_N\": 1000.0, \"length_m\": 1.0}\n", + "\n", + "violations = problem.check_constraints(feasible_design, config=scenario)\n", + "print(f\"Checked {violations.n_constraints} constraints — {len(violations)} violated.\")\n", + "print(\"Objective:\", problem.simulate(feasible_design, config=scenario), \"kg\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "Zero violations, mass ≈ 14 kg. That's a valid benchmark entry." + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### 8.3 An infeasible design — validity test catches it\n\nNow let's try a noticeably *thinner* beam and watch the benchmark reject it." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "thin_design = np.array([0.04, 0.02], dtype=np.float32) # h=4 cm, b=2 cm\n", + "bad = problem.check_constraints(thin_design, config=scenario)\n", + "print(f\"Violations: {len(bad)} of {bad.n_constraints} constraints.\\n\")\n", + "print(bad)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The thin beam's stress is still OK, but its tip deflection blows past the `L/250` limit — so the benchmark flags it, loudly and independently of whatever objective score we'd compute.\n", + "\n", + "This is the *whole point* of having a separate validity layer: a generative model that happened to produce this design would score it low on mass (hooray!) but the validity check would veto it before it got counted as a win." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### 8.4 A visual\n\n`problem.render(...)` gives us the canonical picture of a design — here, the cross-section annotated with its physics." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem.render(feasible_design)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "### 8.5 The classical baseline via the class API\n\nWe already ran the random-search baseline as a standalone function in Section 6. Let's confirm `problem.optimize(...)` runs the same algorithm through the class and produces a similar curve — just packaged as `OptiStep` records instead of raw floats." + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem.reset(seed=SEED)\n", + "start = np.array([0.15, 0.08], dtype=np.float32) # start heavy and shrink down\n", + "best, history = problem.optimize(start, config={**scenario, \"max_iter\": 500})\n", + "\n", + "print(f\"Optimiser ran for {len(history)} steps\")\n", + "print(f\"Start mass : {history[0].obj_values[0]:.3f} kg\")\n", + "print(f\"Final mass : {history[-1].obj_values[0]:.3f} kg\")\n", + "print(f\"Final design : h = {best[0]*1000:.2f} mm, b = {best[1]*1000:.2f} mm\")\n", + "\n", + "curve = [s.obj_values[0] for s in history]\n", + "plt.figure(figsize=(5, 3))\n", + "plt.plot(curve)\n", + "plt.xlabel(\"step\"); plt.ylabel(\"best mass so far [kg]\")\n", + "plt.title(\"Baseline optimisation curve\"); plt.grid(alpha=0.3); plt.tight_layout(); plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "A monotonically decreasing curve that plateaus at the lightest feasible cross-section the baseline found. That plateau is the **number to beat**: if your future ML method proposes designs whose mass is reliably below it (and still feasible), that's a meaningful claim. If it isn't, it isn't." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Putting it together\n", + "\n", + "Look back at the eight questions we had to answer. Here's the mapping we just built — but this time the right-hand column is **code we wrote ourselves**:\n", + "\n", + "| Researcher's question | What we wrote | Where it lives on `problem` |\n", + "|---|---|---|\n", + "| What problem am I on? | `class CantileverBeamProblem(Problem[np.ndarray])` | `type(problem).__name__` |\n", + "| What am I designing? | A 2-D `spaces.Box` | `problem.design_space` |\n", + "| Under what scenarios? | The `Conditions` dataclass with `bounded()` fields | `problem.conditions` |\n", + "| What does better mean? | `objectives = ((\"mass_kg\", MINIMIZE),)` | `problem.objectives` |\n", + "| How does this design score? | 3 lines of algebra in `simulate()` | `problem.simulate(design, cfg)` |\n", + "| When is a design invalid? | Two `@constraint` functions | `problem.check_constraints(...)` |\n", + "| Can I see a design? | `render()` draws the cross-section | `problem.render(design)` |\n", + "| What do I have to beat? | A random-search `optimize()` | `problem.optimize(start, cfg)` |\n", + "\n", + "The payoff: **every generative model in EngiOpt** — the CGAN from Notebook 01, the diffusion models, the VAEs — could, in principle, train on `CantileverBeamProblem` **with zero model-code changes**, because they only talk to `problem` through the exact API above. That's what the Problem contract is *for*: decouple the engineering problem from the ML method so the two sides of the research can move independently." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## What we deliberately skipped\n", + "\n", + "A real, publishable benchmark needs two more things we didn't do here:\n", + "\n", + "1. **A dataset of reference `(design, scenario, objective)` rows**, large enough for generative models to train on. The standard way to produce one is to run `problem.optimize(...)` thousands of times with scenarios sampled from the condition ranges, and package the results on HuggingFace under the `dataset_id` the class declares.\n", + "2. **Metadata for reproducibility** — which version of the physics you used, what units everything is in, which solver settings are baked in, how to cite the dataset.\n", + "\n", + "For a toy problem with a one-line simulator these feel optional. For a real benchmark they're what lets another lab trust your numbers a year later." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflect before moving on\n", + "\n", + "1. Which piece of the `Problem` contract felt the most obvious to write? Which felt the least? Why?\n", + "2. We put the stress and deflection rules in `@constraint` functions instead of inside `simulate()`. What would go wrong if we merged them into `simulate()` and returned \"invalid\" as a huge penalty value?\n", + "3. Imagine an engineering problem from **your** research — fluid flow, circuits, chemistry, robotics. What would its `design_space`, `Conditions`, objective, and one constraint look like? Sketch them in pseudocode.\n", + "\n", + "## Next\n", + "\n", + "You've now seen the full loop: **consume** a benchmark (Notebook 00), **train against** it (Notebook 01), **evaluate on** it (Notebook 02), and **build** a new one (this notebook). Those four steps are the whole workflow of benchmark-driven research in engineering design — everything else is detail." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "EngiBench312", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/slides/benchmarking-ai-for-engineering-design-dcc26.pptx b/workshops/dcc26/slides/benchmarking-ai-for-engineering-design-dcc26.pptx new file mode 100644 index 00000000..3f98ec98 Binary files /dev/null and b/workshops/dcc26/slides/benchmarking-ai-for-engineering-design-dcc26.pptx differ diff --git a/workshops/dcc26/slides/contact-sheet.png b/workshops/dcc26/slides/contact-sheet.png new file mode 100644 index 00000000..036a232b Binary files /dev/null and b/workshops/dcc26/slides/contact-sheet.png differ diff --git a/workshops/dcc26/solutions/00_setup_api_warmup.ipynb b/workshops/dcc26/solutions/00_setup_api_warmup.ipynb new file mode 100644 index 00000000..298a51a3 --- /dev/null +++ b/workshops/dcc26/solutions/00_setup_api_warmup.ipynb @@ -0,0 +1,577 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Welcome to the DCC'26 EngiBench Workshop!\n", + "\n", + "In the next 20 minutes you will **load an engineering-design benchmark, explore its data, and break its constraints on purpose**. No ML required yet — just Python and curiosity.\n", + "\n", + "> **Colab users:** click **File ➜ Save a copy in Drive** before editing so your changes persist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "Skip this if your local environment already has `engibench` installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + "\n", + " def _pip(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " _pip([\"engibench[all]\", \"matplotlib\", \"seaborn\", \"ipywidgets\"])\n", + " _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " _pip([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is EngiBench?\n", + "\n", + "\n", + "\n", + "EngiBench is an **open benchmark suite for engineering design** with ML. Three things it gives you:\n", + "\n", + "- **Standardised problems** — beams, heat sinks, photonic crystals, and more, each with the same Python API\n", + "- **Ready-made datasets** — thousands of optimal designs with their operating conditions, hosted on HuggingFace\n", + "- **Built-in evaluation** — constraint checking, simulation, and metrics so results are comparable across papers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercise legend\n", + "\n", + "| Marker | Meaning |\n", + "|---|---|\n", + "| `PUBLIC FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n", + "| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1 — Choose a problem and set up\n", + "\n", + "EngiBench has many problems, all with the **same API**. Pick one by name from the list below.\n", + "\n", + "**Your task:** set `PROBLEM_ID` to one of the available problem strings (we recommend `\"beams2d\"` for this workshop)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-A\n", + "import importlib\n", + "import random, sys, os\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Import workshop helpers from the installed EngiOpt package\n", + "import engiopt.workshops.dcc26.notebook_helpers as notebook_helpers # noqa: E402\n", + "\n", + "importlib.reload(notebook_helpers)\n", + "from engiopt.workshops.dcc26.notebook_helpers import * # noqa: F401,F403\n", + "\n", + "import engibench\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "print(\"Available problems:\", list(BUILTIN_PROBLEMS.keys()))\n", + "\n", + "SEED = 7\n", + "set_global_seed(SEED)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "PROBLEM_ID = \"beams2d\"\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if PROBLEM_ID is None:\n", + " raise RuntimeError('Set PROBLEM_ID to a problem name, e.g. PROBLEM_ID = \"beams2d\"')\n", + "\n", + "# CHECKPOINT\n", + "assert PROBLEM_ID in BUILTIN_PROBLEMS, f'\"{PROBLEM_ID}\" not found. Choose from: {list(BUILTIN_PROBLEMS.keys())}'\n", + "print(f\"\\u2705 Checkpoint passed — using problem: {PROBLEM_ID}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 2 — Instantiate the problem\n", + "\n", + "One line. Every EngiBench problem uses the same constructor — just pass a seed for reproducibility." + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Problem class: Beams2D\n" + ] + } + ], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\n", + "print(\"Problem class:\", type(problem).__name__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 3 — Explore the API contract\n", + "\n", + "Every EngiBench problem exposes the **same fields**. This is what makes the benchmark fair — algorithms can only change the *method*, not the *problem definition*." + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Problem class: Beams2D\n", + "Design space: Box(0.0, 1.0, (50, 100), float64)\n", + "Design shape: (50, 100)\n", + "Objectives: (('c', ),)\n", + "Condition keys: ['volfrac', 'rmin', 'forcedist', 'overhang_constraint']\n", + "Dataset ID: IDEALLab/beams_2d_50_100_v0\n" + ] + } + ], + "source": [ + "print(\"Problem class: \", type(problem).__name__)\n", + "print(\"Design space: \", problem.design_space)\n", + "print(\"Design shape: \", problem.design_space.shape)\n", + "print(\"Objectives: \", problem.objectives)\n", + "print(\"Condition keys: \", problem.conditions_keys)\n", + "print(\"Dataset ID: \", problem.dataset_id)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Key takeaway:** `design_space`, `objectives`, and `conditions_keys` are the **contract**. Any method you build must respect them." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 4 — Load and inspect a dataset sample\n", + "\n", + "The dataset lives on HuggingFace and downloads automatically. Your job:\n", + "1. Grab one training sample's **design** (a 2D numpy array)\n", + "2. Build a **config** dict mapping each condition key to the sample's value\n", + "\n", + "We give you the dataset loading — you extract the fields." + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DatasetDict({\n", + " train: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 3880\n", + " })\n", + " val: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 728\n", + " })\n", + " test: Dataset({\n", + " features: ['optimal_design', 'volfrac', 'rmin', 'forcedist', 'overhang_constraint', 'c', 'optimization_history'],\n", + " num_rows: 243\n", + " })\n", + "})\n", + "design shape: (50, 100)\n", + "config: {'volfrac': array(0.2375), 'rmin': array(3.5), 'forcedist': array(1.), 'overhang_constraint': array(0)}\n", + "✅ Checkpoint passed — dataset sample loaded correctly.\n" + ] + } + ], + "source": [ + "# PUBLIC FILL-IN CELL 00-B\n", + "# Goal: extract a design array and build a config dict from one training sample.\n", + "\n", + "dataset = problem.dataset # <-- this is provided for you\n", + "print(dataset) # inspect the splits and columns\n", + "\n", + "sample_idx = 0\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "design = np.array(dataset[\"train\"][\"optimal_design\"][sample_idx])\n", + "config = {k: np.asarray(dataset[\"train\"][k][sample_idx]) for k in problem.conditions_keys}\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "if design is None or config is None:\n", + " raise RuntimeError(\"Uncomment / fill in `design` and `config` above.\")\n", + "\n", + "print(\"design shape:\", np.array(design).shape)\n", + "print(\"config: \", config)\n", + "\n", + "# CHECKPOINT\n", + "assert tuple(np.array(design).shape) == tuple(problem.design_space.shape), (\n", + " f\"design shape mismatch: expected {problem.design_space.shape}, got {np.array(design).shape}\"\n", + ")\n", + "missing = [k for k in problem.conditions_keys if k not in config]\n", + "assert not missing, f\"config missing condition keys: {missing}\"\n", + "print(\"\\u2705 Checkpoint passed — dataset sample loaded correctly.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 5 — Design gallery\n", + "\n", + "Eight random training designs with their conditions. Notice how different conditions produce very different structures." + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABjUAAAKCCAYAAACZA1vXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAzWNJREFUeJzs3QeYFFXW+P9LhiEPQ4YhSVRAkpEgGAATJsxhzTntu+qurznguu6a1lVXVsWc3l3MCUWiASQpoOQw5DDAkBmg/s+5++/+1T12V3fPDDNdPd/P86B9u6qrq6un76mu2/ecCp7neQYAAAAAAAAAACDNVSzrHQAAAAAAAAAAAEgGgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAABIO8ccc4xp3bq1CaOlS5eaChUqmPvuu6/I2/jd735nt5GuSmP/SuI4AgAAAMg8DGoAAABkiMWLF5urrrrKdOrUyWRlZZn69eubzp07m0suucR88803Zb17aeHJJ580o0aNKuvdAFIif7MywOP/V7NmTdOhQwdz0003mby8PJNpNm3aZJ566ilzwgknmJYtW5oaNWqYjh072j4u1uuVwS//8alatapp2LChOeKII8ytt95qfvrppzJ5HQAAACh5lQ/ANgEAAFDKfvzxRzNgwABTpUoVc/HFF5uDDz7Y7Ny50yxYsMB8+eWXpnbt2mbgwIGmvJNBDZkBIjMNDpRWrVrZY1+5ctFPtUeOHGmef/55U56VxHHMNDKA0adPH3t769atZurUqeYf//iHGT16tJk7d679nGeKH374wfzP//yPOfbYY80NN9xgcnJyzOzZs80///lP8+6775pvv/3WdOnS5TePe+CBB0ybNm3Mvn377MDIzJkzzYsvvmgHSH7/+9+bv/71r2XyegAAAFBy+IYAAACQAe6//36zY8cOewGve/fuv1m+Zs2aMtmvsJMLx6leKJZfiVevXr1YzyuDU/KvPCuJ45hp+vXrZ84666xo+9prrzX16tUzTzzxhPnqq6/M6aefbjKFzDibN2+eadeunXP/SSedZI4//nhzzz33mP/7v//7zeOGDh1qevfu7dz3+OOPm+HDh5u//e1vdvbGHXfcccD3HwAAAAcO6acAAAAygMzIaNCgQcwBDdGkSROn/c4775hTTz3V5ObmmmrVqtlfQZ922mkxU7TIzAapcTFr1ixz3HHHmVq1aplGjRrZX1Hv3bvX7Nq1y/zhD38wzZs3txeh+/fvb3755ZeY6XPkwqukiZFf4cvzduvWzbz99tspvc6LLrrING3a1KaXkX277bbbzPbt2xM+Vp5/2bJlZvz48U6aGqnd4H+dM2bMMIMHDzZ169a1+xcZ3LjrrrvM4Ycfbo+V7PtBBx1k/vjHP9rBpES1IPz3ffzxx/bX9nKs5HXI/stxTFSzInLfli1b7MVseQ9kG0cffbT9Vbu2ceNGc9lll9m/C3nPBg0aZF9bKvVK5L2V/WvWrJlN/3PYYYfZmT/FfX8kfZDsW+TvQF7LUUcdZV555ZXA4yjkeMsv7uU5ZJ8kvdDXX38d85hFXuuqVavMeeedZ1OySWo2eX/nz5//m9cqzyUpjmQdGSzo2rWr3f90Ju+NkOPt53meee6550yvXr3s65G/AZmtFSsV3bPPPmvTPMlnWLYjx/bCCy+Mfjb85BjLsR47dqw58sgj7bZbtGhhHn30UbtcZkdcfvnl9j2VZSeffLI9/n75+fk2JZQMWMjfsPyNyn4+9thj0XXkfdMDGkL6oOzsbDtrI1my/nvvvWfq1KljHnnkkaT6CwAAAKQvZmoAAABkALn4J79q/s9//mPOOOOMhOs/88wz9kKi5KeXAY9FixaZF154wV4gnz59umnfvr2z/ooVK+yvo8855xz7S3G5sC2/fpbUQHPmzLFpguQC/4YNG2x6FxkgkYGNihXd39DIL6TlguJ1111n2y+//LK92CwXlBOlhJo2bZq9MC8Xm6+++mp7AVYGWp5++mkzefJkO1gRNLvhtddesxdSZVDif//3f6P3yy+3I5YvX26fQ37VfeaZZ5pt27bZ+1euXGn+9a9/2fvOP/98+7rl+f7yl7/YgYIvvvjCJOPTTz+1F5CvueYae1H/gw8+sMdLLrbfeeedSW1DLsjLPssv1WXgQt4H+fX6kiVLorNKdu/ebS/+yswdOa4yGCEDVpELwsmS9+b99983p5xyin1e+TuRvy9J71PU90cGcORvSY6p/B1IXQgZqJH9mzhxoq0BE0TeGzmO8jcmr0det8xQiLVPQv7eZKBNBj9GjBhh15dURMOGDbMXxitVqmTXu/76681LL71k07fJoInspwzSyMX7dCGDa/IZE/K3KWnnZCBABtjk2PvJ4NJbb71lP6+XXnqp/Zt444037LGXfkIGNSPkb1COj6S3igwYyN+7vPaff/7Z9hV+8jf/0Ucf2f5Djpekg5LPvwxQyMCUDEjIANHChQvt+y/ryICm/z2cMGGC/RzIwKH0H9JfjBs3LuEgkvytyHE45JBDUjp28rrk70T2b9KkSfbvGQAAACHlAQAAIPS+/fZbr0qVKp6c3rVv39679NJLvWeffdabO3duzPW3bdv2m/tk3apVq3rXXnutc3+rVq3sdt99913n/p49e3oVKlTwTj31VG///v3R+5966im7/ueffx697+WXX7b35ebmeps3b47eL7flvvr163s7duyI3j9gwAD7vH7dunXzOnbs6BUUFDj3/+c//7HbludIRLYp2463TLYzcuTI3yzbvXu3t2fPnt/cf9ddd9nH/PDDD9H7lixZYu+79957f3NfVlaWvR0hx+3ggw/2mjRp4mz3kksusevHuk+/P/K+yP3PP/989L5//OMf9r6HHnrIWTdyvz62sXzxxRd2XXlev9GjR9v79f4l+/7MmjXLth999NHA5491HD/55BN73xVXXOGsG7lf75O817Ge6y9/+ctv/kblb3Do0KFeOop8fmL9O/roo73Vq1fHPOb//Oc/nfsLCwu9Xr16ea1bt3Y+s7H6g6+++irmsZP75HP//fffO58P+RuW+2+88UZn/VtvvdU+5tdff41+5mP9HSfrtttus49/8cUXnfvl70Tunzp1atzH/u1vf7PrPP3000V6bgAAAKQH0k8BAABkAEkDI7+Ul1+5yy+ZZQaE/ApeCunKr9QXL17srF+zZk37f7lGWVBQYH/9Lb/+l9Q7sVIZya/u5dfVfn379rWPv/HGG520P5L3X8iv3DVJmyRpnSLktvxaW1LWyK+045Ffi8sv+WWWhPziXPY38k/2Q15PUFqkVH7NLb9q1yQlT2QWiPyCX/ZXnltmCohYxywWmV3gT/0kx01SAknNk8iskERktolf5Bf6/uMtv6KXGQg333yzs+4VV1zhHP8gMkND6F/Oy2uQv5Oivj+R55c0SOvWrUtqX/yvS8hMCr8TTzzRdO7cOeZjZLaQzEBIdMxkv2TWUSppjUqbzM4ZM2aM/Sfvj7TluMtMHfmbjHj99dftrB15r/zvxebNm+2sG0kr5X/tkf5g//79tv+QdSWVnRyTWH/b0t9IKjb/50NmA0l/oI+17g8kZZikHJPtxkpvFURqaMiskiFDhsT8nCYi6aeE9HkAAAAIL9JPAQAAZAjJ/y+1K0SkdoSkkJGUPpJqRwY9Inn3JX3M3XffbQcSdH75WGl8Yt0nKZNiLYvcL6mRtFgXnmXgReiBF79IjY57773X/otl7dq1piTSeEXSEWmSNur555+3F77l4q+f/4JykLZt2/7mvkhqHzleUvcg1W34Hx8hKZak1oLenrz/8n4ls7/yfsiAgKSHivU+Srqzorw/UkdD0n9JbQOp3XDooYeaY4891g6aSa2RIPK6ZJ8k3ZImAy26louQ46ALjsc6Zk8++aRN2SSfIznGMtgkAwDyT6dR02RQqriDabomRiyyb5GBNCGfa/n8nHvuubamxZ///Gd7vxwHSdHUuHHjuNuS9yPy3kqaqQceeMAONEgqOL9Yfyux/o6T7Q/kdcqxlgE3WVf2XwaZZABG/g7ikZRjF1xwga29ITWBdP2UZEQGMyKDGwAAAAgnBjUAAAAykFw4ljz2cpFWfiktNQ2mTJlifzUvdSNk9oZc2JOBDbkYLL/UlouEt9xyS8wZA/Eu9Act+2+mmpIR2ZYUJ5dfaccSuXhaHFLYOBapWyHPLcWU5ZfocqFcLs5KXQipWaEHOeIJOo7JHq/SON4H+v156KGHbE2RTz75xA66yeCb1Ia4/fbbowWng6RyQTvZYy4DBDJzQC6ey4Cg1IB48cUX7edHbgcNOsjgTHHIrBUpal4UkdoQ/tof8rpk5tWbb74Z93GRmhRTp061f9cyUCSDIjLQILMp5BjLYEmsv+3i9gcyO0uOt7z/cqxlBobU+ZGaPW+//fZvHvv555/bWi4HH3ywnfFT1EEJmdUi9EwjAAAAhAuDGgAAABlMLkxKmhgZ1JAL8GL06NF24OLDDz+0v0b3k19TS2qYA0V+QS4XM/3mzp0b99ffEZHC5XLB1P9L9VQV5dfdkSLjkjbqs88+c361Lxdb05Hsq1yIl/fZP1ujsLDQznaQYt6JyPshF7Tnz59vLyb76RkRRXl/ZPuSukz+yewAuTgvhddlYKRRo0ZxX5fsk6Qy0rN+/DNHijNj4sILL7T/5CK8FL+WfZKC7jr9mp+kgyoOSfVUVPKeCpmZ4X8/5H2T4t+JZv/IwMe+ffvs37Z/loXM4Ep2BlJRyECQpEOTf/L8kcLm8v77Z+zIZ0xmcXTq1Mn+TRd18DI/P9/2fZJSSwZ3AQAAEF7U1AAAAMgAclFVaj1oO3fujNYyiKR5ivySWv+yf+TIkcVOo5PIc889Z3P2R8htSekkF9kHDBgQ93E9evSwvyyXdWOlqZLXLhctE5ELvMmsp8kxkwER/zGT54yk+0k3kjJJLhQ/9dRTv3mP/cc/SGTwSWZQ+EktBz2AkMr7I88fuRAfIemhIoMUQRfS5XWJJ554wrlfZlfESj2VLDlWUm/CT95veV0i0d+MDOQU519xZhlFap9IWqYImaUlgz9/+tOfEqZqi9cfjBgxIukZSKnYsWOH/ecn+9CtW7ffHGvpu04//XQ7s+Lrr7+2g05FIduUQSlJPyWpz+LNyAIAAEA4MFMDAAAgA0jxaJllceqpp9q8+3LRLi8vz/4KW36xLRc55X4xdOhQu1x+GX3DDTfYC6oyk0MuDEtNiViDIyUlJyfHzhyJFPmVguaSDkvSDwVdaJQLzDJbQnLvy8VPSV0kswfk4ujChQvNf/7zH1ujQVJBBZFfrktKIUm7JRfRZdaFXCiPFEqO56yzzrIXiOXYSRocuTgqxzZSPDzdyK/f//nPf5q77rrLHh8p4iypd959912bZiiZ91hmTsixeeWVV+xFYUkrtWjRIrtdGcDwF9RO5f2RVEtXXXWVOfPMM+3Fahloknov8jcgfxtBqYGkILjslwzORAq1y8yTF154wT5vJL1QqmSWg8wckM+PDGTITBHZrgzCyecjMphS1iRVV6TmhRxbOW7yGZJ0THKx3v/3Kp8xSek0ffp0c/LJJ9vP3ooVK8x3331n35PI4JMMGsggkRxbeV8kzZYMksqxlMeUNOmPZABTnlf+juT4yoCUHGuZKRIpLP7jjz/agTUZbJHXIjNJNJlRo8l6v/76qx2QkQEyqR8kMzTkPZai97rwPQAAAMKHQQ0AAIAMIDUfJEXOpEmTzL///W/7q3NJsyIXeu+44w7nYr8MXMiFvzvvvNP+Glt+JX300Ufb3PYyyCF1BQ4UqZcgF2b/8Y9/RAsVv/HGG+b8889P+FgpKC0XKOXiuKTOklkBtWvXtimJ5PUFFRmOePjhh+0Fenl+OUZywVQuXica1JALobKuDIhIgeMmTZrY/P9ysTUyAyadSAox+WW77Lf8XchghgwYyH0y4KF/KR+PFGSWgRF5j+RCtwyMyQCFDOj4BzVSeX8k1ZIMDEmRetmuzJLIzc21f4+SeiiIDJ7I37dcwJdURfJ3LH/jctFaCrlLWqqikAE1qScjxyeStisyyCGDWVJDJR08/fTT0dvyuZVC4PJ3KIN0uqD7Sy+9ZNPLyYCPvCd79uyxf7c9e/a07Qj57MsxffDBB+12pJ6GDBZJfyC1d0pay5Yt7aCXDG7JLJPdu3eb5s2bmyuvvNL2VZHBTfn7igzgyKBtLLEGNe655x77fxlwlMEeSXMmzyef1chsEAAAAIRbBa8sKwoCAACgXBg1apS9qFicgsgoPhlAiMyWSdd6IEUlAy6S1kp+pQ8AAAAgc1FTAwAAAMhAUk9Fk9kTMkPl+OOPN5n0uj755BP7y/4wvy4AAAAAySH9FAAAAJCBJJ2PpO856qijbDoqqaUgaaOkpobUTgirBx54wKa5ktRKkmJt5syZNtVSgwYNbPoiAAAAAJmNQQ0AAAAgA51wwgm2dojUSpAaEVJ/QeppSFtqXYSVFJKWwvaPPfaY2bJli8nOzrZFx+V1tWjRoqx3DwAAAMABRk0NAAAAAAAAAAAQCtTUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqIHQat26tXn//fej7ZEjR5qmTZuaWrVqmRkzZpTpvgEA0gsxAwCQDOIFACAZxAugbDGogYxQWFhobrrpJvPuu++abdu2mR49epTp/kyePNl0797dZGVlmUMPPdR89913cdedPn266dWrl8nOzjb16tUzRx11lJkwYYKzzooVK8zw4cPtcvk3ePDgUngVAJCZMjlmfPbZZ6Zr166mfv36dp3jjz/e/Pzzz6X0SgAgs4Q5XnzyySemf//+Nh40atTInHXWWfY7hZ9cjGvfvr3dXt++fc2vv/5aCq8CADJPJseLcePGmQoVKtjBmsi/G264oZReCRAfgxrICGvWrDG7du2yF3Ji8TzP7Nu3r1T2JT8/35x88sm2k9+0aZO5/vrrbXvz5s0x12/VqpX5z3/+YzZu3GjX/8Mf/mBOOukks3PnTrt8+/btZuDAgTYg5eXlmQ0bNpiHHnqoVF4LAGSiTI4Z8qXlyy+/tMvWrVtnl51++uml8loAINOEOV5s2bLF3HHHHfb7w5IlS0ydOnXM2WefHV0+b948c8EFF5gnnnjCbnvQoEFm2LBhZu/evaXyegAgk2RyvBB169a1gzWRf88880ypvBYgCIMaKFNyEi0n0H7vvPOO6dSpk+30//a3v5l27drZX5sOGTLELF68+DfbkGl9sr5o0aKFXT8yFfCRRx4xRxxxhB2dnjt3rnn99dfNIYccYmrXrm1yc3PN3XffbZ/HH4guvPBCO2VQfgEro9WRC0XJGj16tGnevLm58sorTbVq1ez/mzRpYu+PpUGDBvYilYx8y75UqlTJBgnZFzFq1CiTk5Nj7rrrLrvflStXNn369ElpnwAgExAzEscM2Rf5JyLLly5dan89BgDlBfHCmPPPP98ObMsvamvWrGluueUW88MPP0QHLWSf5YdTcqGrevXqdp9lMHzixIkp7RcAhBnxInG8ANIVgxooU9J5Tpo0yY4IR7z22mvmoosusv9//PHH7bToVatWmYMPPticcsopv+lYZVrfnDlz7G2ZIrdo0aLoMhkQeOWVV+wFn44dO9qLQfIL14KCAvPhhx+aF154wbz55pt23f3799vty6CBBBuZETFixAhTseJ/PyZywh9J/xTr3/Lly+16P/30k/2lrJ+05f4gso2qVaua0047zVx88cWmTZs29v7x48fbwDh06FAbSCXtyKefflrMIw8A4UPMSBwzhGxblstFqptvvtn86U9/MlWqVCnGkQeAcCFe/JZ8p+jcubPdj1jbkzjRpUuXpLcHAJmAeJE4XgjZ/2bNmtlrUzLLb+XKlUU42kDJYlADZapx48bmuOOOM2+88YZty6+DxowZEw0gkpNQpu/JhRnpzCXQTJkyJentX3vttTZwyC9V5eKPDAx06NDB/sJVOvXzzjvP5gcUU6dONb/88ot57rnnbC5B6cAlt6yMbIuPP/7YTteL909G2SOdvQQUP2lv3bo1cF9lG7KOvO5+/fo5Uwcl6F199dVm7dq1diRfchwuXLgwhSMNAOFHzEgcM4RsO/I8Tz/9tOndu3fSxwAAMgHx4re/IpbvEPKL5IjibA8AMgXxInG8kFkoM2fOtK/9xx9/tDNLZPBFBmGAssSgBsqc/MJUgoV46623bNFT6YxlhFum60VIRy4jw7rAXZBIpx7xxRdf2O1LOifJCfj888/b0W+xbNkyO0WvRo0axXo9MmVPchL6SVumFyYizy1TDSWAyK8FItuTfZZf48ovqOT/MltDcqYDQHlDzAiOGX6yjeuuu85ceumlNj8uAJQnxIv/+vnnn+1FNMl/fvzxxxd7ewCQaYgXwfFCUldJyiwZmJHbMrtk1qxZZv78+cXaT6C4GNRAmZOCdBIUpk2bFp3mJ2Ram+QBj9izZ4+d8if3JysyTS/y+DPOOMPOeJCpctKpX3PNNdH8hZKjXO6X4k6xSOcuwSHev8hUv27dutlRbD9pxysYFYvkPl+wYIG9LQXCAQD/RcwIjhma7K/so//YAEB5QLz47wUq+QWy5HSXQXA/vT2JJZLuJJX4AwCZgHgRHC80mWUCpAUPSAOXXXaZN3jwYK9GjRpeQUGBvW/UqFFeixYtvDlz5ni7du3ybrvtNq9Tp05eYWGhXd6qVStv9OjR9vaSJUskCnibNm2KbtO/XMh2K1as6H300Ue2/f3333sNGzb0hg0bZtv79u3zevbs6V166aV2O/I8EydOtM+dio0bN3r16tXz/vWvf3m7d++2/8/Ozvby8/Njri/7M2vWLPt827dv9x5++GF7HBYuXGiXy/+zsrLserKP8n9pR5YDQHlDzIgfM9566y1vwYIFdv9kv66//novJyfH27x5c4pHGQDCrzzHi9mzZ3uNGjXyXnjhhZjLf/31V/ud4pNPPrH7cu+993rt27ePHgcAKE+IF/HjxdixY73Fixd7+/fv9zZs2OBddNFFXteuXb29e/emtF9ASWOmBtJmup9Mw5PUSpEpcXLfjTfeaIshyRQ3md720UcfOcWKUiHb/cc//mGuuuoqU6dOHfPwww+bc845xxlBl+3v2LHD5jyU6YB33XVXynkCpZi3bOepp56y0wkln7m0JSeikNFz/yi6TDUcPny4zXEoUxMlf+Mnn3xi2rVrZ5fL///v//7P3H777Xa/ZZ/+/e9/R5cDQHlDzIgfM+TXZDJdXPZf8vVKW9aRbQNAeVOe48Vf//pXs379enPrrbfG/CWv7Mvrr79ubr75ZhtTJFZI0dqiHgcACDPiRfx4IXU2+vfvb++TNFRSKF3qe0g6KqAsVZCRjTLdAwAAAAAAAAAAgCQwUwMAAAAAAAAAAIQCgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1kNEOPvhg8/HHH5f1bgAAQoCYAQBIBvECAJAM4gVw4DCogYw2Z84cc/LJJx+w7e/cudMcdNBBpl69eoHrFRQUmPPPP9/UqVPHNG7c2Dz44IMpLQcAhDNm7N6921x55ZWmTZs2pnbt2qZTp07mpZdeirv+8uXLTa1atZx/lStXNqeeemp0nWOOOcZUq1bNWWfVqlUlut8AgPSOF4LvGABQPq9J3XjjjaZly5a2f2/evLm55ZZbzJ49e+KuT7xAJqpc1jsAJMvzPLN//35TqVIlky7uuece06pVK7Nhw4aEASc/P99erFq3bp057rjj7OMuvvjipJYDAMIZM/bu3WuaNm1qvvrqK9O2bVvzww8/mKFDh5oWLVqYE0444Tfr5+bmmm3btkXb8uWkWbNm5txzz3XWe/TRR+2XFwBA+YwXgu8YAFD+4oW47rrrzJ///GdTs2ZNez1q+PDh5i9/+Yu56667Yq5PvEAmYqYG0lrr1q3NI488Yo444giTlZVlf636zDPPmC5dutjO+6KLLjKbNm0y55xzjh1R7tGjh/n111+dx7///vv29qhRo8yhhx5qR5wbNWpkR5+ffPLJIu/btGnTzOeff27uuOOOwPV27Nhh3n77bfPQQw/ZGR0dOnSwAePFF19MajkAILwxQ573gQceMO3atTMVKlSw+zZw4EAzadKkpB4v+yNfns4444yUnxsAkLnxgu8YAFA+44Xo3Lmzff7IYEvFihXNggULYq5LvECmYlADaU86/ldeeSX6y9UPPvjAntxLh/3ll1+aAQMGREeVJUDcfvvtgVP/JBCtXLnSvPPOO+a2224zixYtsstkm9KBx/snI+H+X1LJ9PB//OMfpmrVqoH7P2/ePPtLW9m3CLn9008/JbUcABDumOG3a9cuM2XKFNOtW7ekXo98mbjgggtM9erVnfvlS0d2drb94vTqq6+mcIQAAJkQL/iOAQDlO17ITA1JQysDJLNmzbL7EAvxApmK9FNIe9dee63p2LFjtP2HP/zBXsgREjxk6l/fvn1tW6bcXXXVVXG3lZOTY/7nf/4nmpNcRs1nzpxpfxEl29i8eXNS+/TYY4/ZC0n9+/c348aNC1xXAp+MoMuIfoQEpK1btya1HAAQ7pgRIb+iuuKKK0z79u2TmnmxbNkym4ZEppL7ya/F5Ndh8oVo7Nix5uyzz7b5108//fSU9gcAyrOwxwu+YwBA+Y4Xf/zjH+2/X375xbzxxhumSZMmMdcjXiBTMVMDaU/yi/vJFL0IuaCj2/5c5Jp/XSEdd6od9cKFC83zzz9vBzaSISPnMp1PZndEbNmyxV6ASmY5ACC8McN/gUp+XSW/hJIp6DJFPJGXX37ZDqB3797duf/II480devWNVWqVDGDBw82V199tf2lFwCg/MQLvmMAQPmOF/5UVPJ94Xe/+13M5cQLZCoGNZD2krnwUxImTpxoO/N4/6655prolMC1a9faPIMyyj5s2DBTUFBgb0tBP01G9OXCk0wHjJCR+K5duya1HAAQ3pgRuUB1/fXX2xghU9RlQCIRqaMhgxryS910ec0AkEnCHi/4jgEA5TdeaIWFhXFrahAvkKlIPwX8//r16xc4oh4haT6OO+64aPu7776zF52k05dchpqM1EvRqLvvvtu89dZbZt26debvf/+7LQ6VzHIAQHhjhrjhhhvM5MmTbaqo+vXrJ/WYMWPGmA0bNpjzzjvPuV+mpH/77bd2unq1atVsCkSZPThy5MgivQ4AQDjjBd8xAKB8xgtZ57333rOpZ2Xwe/bs2bbenszgjoV4gUzFT/uAFEmH36JFi+i/hg0bmgoVKtjbkaLhQ4cONSNGjIg+5plnnrHBRtY5+uijzeWXX24uvvjipJcDAMJJ6mI8++yzNo1Iq1atYv7SSseMSIHws8466ze/0pVfYd1///02Z65c8Lr11lvN448/bvP3AgDKV7zgOwYAlD9y/enNN9+0dTgkRZRkDznppJPMk08+GV2HeIHyoIInc1wBAAAAAAAAAADSHDM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAOlZv369WbQoEGmTp06Zvjw4WW6L7fccov53e9+Z28vX77c1KpVy2zZsqVM9wkA8P8QMwAAySBeAACSQbwAMguDGig1//znP02lSpXM5s2bzXvvvWfSRW5urtm2bZupW7du4HpLly41FSpUsPtfnGMgz1ezZk1z0kknmdWrVweuP3fuXDN48GBTu3Ztk52dbS6//PLosvvuu89UrlzZBr/Iv3feeafI+wYA6YSYQcwAgGQQL1KPFxHnn3++fe6ZM2c6999///2mcePG9sLfBRdcYF8HAIQd8SK1eDFq1Ch7vPzfH/7yl78UeXtASWNQA6VmyZIl5uCDDzYVK6b+Z7d3714TdmPHjjV33HGHDZ7r1q2zXxTkS0I8q1atsr8iOPvss+36Ehyuv/56Z52TTz7ZBr/Iv3POOacUXgkAHHjEDGIGACSDeJFavIj45JNPzNq1a39z/8svv2xefPFFM3HiRPvr4Y0bN5qbbrrpAO09AJQe4kXq8aJr167O94fbb7+9WNsDShKDGigVMrXv1VdfNc8++6wd3ZUT5ddff9107tzZ1KtXz/Tt29dMnz49uv4xxxxjO8sTTjjBjvh+9tlnpqCgwNxwww2mVatW9ldDffr0MXl5eXZ96VxlmYwQN2rUyFx88cXO1L0JEybYzlie+4wzzjBbt26NO9o9ZswY061bN/tLV+mUr732Wnv/YYcdZv/fokULu5033ngjpWMgXxAuvPBCc/jhh9vX9Mgjj5jx48ebxYsXx1z/iSeesBeo5Je2NWrUMNWqVTM9e/ZM6TkBIIyIGcQMAEgG8SL1eCFkP2+99Vbz/PPP/2bZSy+9ZAcxOnToYI/hgw8+aN566y2zc+fOlPYLANIJ8aJo8aI0twekzANKySWXXOLdfPPN9vb48eO9WrVq2f/v2bPHe+KJJ7yGDRt6mzdvtssHDBhg2z/88IO3f/9+b8eOHd7pp5/uDR482Fu5cqW3b98+b/r06d769evt+sOHD/fOO+88b9OmTd62bdu8c88917vwwgvtsvz8fK9u3bre888/7xUWFnoffvihV7VqVbs/YsmSJZ58FOSxomnTpt6rr75qb8u2Jk+eHHO9CNl2vH9du3aNrtetWzdv5MiRzmObNWvmvf/++zGPV58+fbwrr7zSO+qoo7zs7Gyvb9++3vfffx9dfu+993q1a9e2y9q3b+/deeed3s6dO4v9PgFAOiBmEDMAIBnEi9Tihbj++uu9Bx54wN6W554xY0Z0WZ06dbwxY8ZE23IcZZ2ZM2cW6f0BgHRBvEgtXrz88ste9erV7XFo3bq1d+211zrPXZT4A5SkyqkPgwDF99prr9kR3f79+0eLJD333HN2GrTkdhXy/8hItIyIjx492ixbtsw0a9bM3tejR49osad///vfZsOGDXaEXTzwwAN2WqHkAPz444/tY66++mq77JRTTrG/Zo2nSpUqZuHChXa7DRs2NEcddVTga0k2n6GM3Ef2L0La/hF6v/z8fPurqM8//9z+AuCFF16wqUPmz59v6tevb39pcMUVV9jXJnnU5XjKczz11FNJ7Q8AhAUx47+IGQAQjHiROF58++23Zty4cc4vkoO2J/udlZUVd3sAEEbEi8TxQo7Nzz//bNq2bWtf95VXXmkuueQS88EHHxRpe0BJI/0UysSKFStM69atnfvatGlj74+QaXsR0oFKKg3/ff6pevv377ePlw5U/skFHcmTuGbNGptnXKYH+um2nwSq2bNnm44dO9og9e6775qSINMD/dMPhbRlSmG89U877TRz9NFHm6pVq9qpjNWrVzffffedXS4BUqYdyus85JBDzIgRIyj6CiAjETP+i5gBAMGIF8HxYs+ePeaqq66yF+4kViSzPckjv2PHjrjxBwDCiHiR+PuFDGYcdNBB9nXIa3v66aftAI3EhKJsDyhpzNRAmZALK9Lx+0lb7o/wF2+SDn/37t02X2HLli2dx0lb1pVAIb8i0mREXAKQnxS9kzyHsUgOchlll6D0/vvv26KrAwYMiFtMSjryeGS/58yZY29LTsSZM2dGl0UKuUpexVi6d+9u9yFZRSl2BQBhQMwgZgBAMogXwfFCXssvv/xiTj/9dOf+gQMHmrvvvtv8/ve/j27v2GOPtcvktlzIkxobAJApiBeJv19okef/b+bC4m8PKC6+0aJMyDQ/KWo0efJk++ufv//972bjxo3mxBNPjLm+FEcaNmyYueaaa2wnKZ37jBkz7GOaNGlif50qv0qV6X5CRsNldFucdNJJZuXKlWbkyJH2uWQ64dixY2M+j/x6SaYhbtq0yXbYkal0lStXttP+5L5FixY5j5Epd/H+RYKHuPTSS20hqilTptiR7TvvvNMGJhn9jkWm9sm0vh9++MHs27fPFvKTIBqZeiivT16/mDdvnt3emWeeWYR3AwDSGzGDmAEAySBeBMcLufAmF9bkIlTkn5CZexJHItuTX+MuWLDA/uL2nnvusSlYatSoUcR3BQDSD/Ei8feLTz/91L5WITNYbr75ZjNkyBBbFLwo2wNKGoMaKBPS0UnQuPzyy02DBg3M22+/bT777LPf5OPze+WVV+yJeO/eve16Ekx27txpl0mewsgUvzp16ph+/fqZadOm2WXZ2dn2Qo/kDZd1/vWvf5kLLrgg7vO8+eabdoqdTJm78cYbbVv2UU7k7733XjN06FC7Hbk/FZIz8ZFHHjFnnHGGDUYyii9BNEJuS3qQiL59+9pjdO6559rne/XVV23wixyj9957z05HlIAi+zR48GDz17/+NaV9AoAwIGYQMwAgGcSL4HhRqVIl+ytk/z8hvxaOpAu57LLL7IUqSWcoy2WfqL8EINMQLxJ/v/jmm29s+iuZfXLkkUfawQoZcEl2e8CBVkGqhR/wZwEAAAAAAAAAACgmZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBopt3Lhxpl69emW9Gxll6NCh5tlnny3r3QCAEkW8KHnECwCZiHhR8ogXADIVMaPkETMQBgxqACXovvvuM6eddlqxt/PZZ5+Z6667Lql1R40aZQ499NAiP1dhYaG54YYbTP369U12dra58cYbzd69e4u8fqrbA4DyiHhBvACAZBAviBcAkCxiBjGjPGFQA8UinQXCfcweeughM2nSJDN37lwzZ84cM3HiRDNixIgir5/q9gCUD+nW94VBuh0z4gWA8tj3hUG6HTPiBYDy2v+FQbodM2IGisxDxlqzZo03fPhwLycnx2vZsqV35513eoWFhV63bt28V155xVl3yJAh3ogRI+ztrVu3etdff719TMOGDb2LLrrI27x5s122ZMkST/5sXnrpJa9du3Zeo0aNvG+++carW7euN3LkSK9FixZedna2d9ttt0W3vWzZMu+4446z+1GvXj3vxBNPtNuJuOSSS7wrrrjCO+ecc7xatWp5HTp0sNuM2LRpk3fWWWfZ5+jYsaP39NNP231Ixptvvmlfb+3atb3c3Fzv5Zdftvfv37/f++tf/+q1bdvWq1+/vjd48GBv0aJF0ce1atXKe/TRR73DDz/c7lP//v295cuXRx97++23e40bN7bbbd++vffRRx95o0eP9qpUqeJVqlTJq1mzpv0XeX2XXXaZfS9kfdn/6dOne0cffbR9bjku5557rrdhw4bo8w8YMMB74okn7O2g4yvbqVatmlexYsXoc8rxToVs87333ou23333XXusirp+qtsDUPaIF8SLZBAvABAviBfJIF4AEMQMYkYyiBkoKgY1MtigQYO8888/3waEpUuXel26dPEefvhh77HHHrMdesTq1attxxfpIKWjO++882zHvW3bNtu5XXjhhU4AOe200+zy7du32w5OOrBbb73V27lzpzd37lwvKysrGgTkMZ9++qldtmXLFhsM/M8vHax0rLL+3r17vQcffNB24BHy3EOHDrVBbNWqVV6fPn2SCiAffvih7Wy//vprb9++fd7atWtthyskgDZr1sz76aef7H79/ve/t8dHAqyQ5+/atau3ePFiu1yeX/ZTfPHFF7aTXLlypW1Lhz1v3jx7+9577/WGDRvm7Ic8rkaNGt7nn39u90OO2cyZM72JEyd6e/bssYG+X79+NojGCyBBx1eCYvfu3Z3nlG1L0In379prr7Xr5efn22O5YMGC6GPnz59v74ucNPglWj/V7QFID8QL4gXxAkAyiBfEC+IFgGQRM4gZxAwcSAxqZKgVK1bYD610ThFvvPGGHcGVTlgChqwjHn/8cRtsxLp162xnJR2BvwOQ9aVzjwSQGTNmRJdLR1ahQgXbMUZIgJBR51jksTKSK51ppIOVEXG97zJKLM8pzz116lRnlDWZACIj/ffff3/MZbJ/f/7zn6PtXbt22SA2efLkaAB57rnnostff/1175BDDrG3x44da0eyv/zySxsA/OIFEH2fJiPqBx10UNwAEnR8YwWQZMlJgxzL9evXR++TvwG5Ly8vL+X1U90egLJHvCBeJIN4AYB4QbxIBvECgCBmEDOSQcxAcVBTI0OtWLHCVK9e3TRu3Dh6X9u2be39TZs2NYMGDTJvvPGGvf/VV181F198sb29dOlSs3//ftOmTRtTr149+69Pnz6mYsWKZs2aNdFt5ebmOs9Xp04dk5WVFW3XrFnTbN261d5ev369Of/8803Lli3tev379ze7d++OLhdNmjRxHitk+YYNG2y+P3lsvOeOZ9myZaZ9+/Zxj0/r1q2j7WrVqplmzZrZ++PtU2R/Bw4caO6//35z9913m5ycHHPmmWeaJUuWBO6L3ueFCxeaYcOG2eeUY3LhhRfa1xpP0PEtjlq1atn/b9myJXpf5Hbt2rVTXj/V7QEoe8QL4kUyiBcAiBfEi2QQLwAIYgYxIxnEDBQHgxoZqkWLFmbXrl1m7dq10fskOMj94qKLLjKvvfaamT17tpk/f77tBIV01BIsVq1aZTZv3hz9J9tq3rx5dFuyTrL+9Kc/mR07dpjp06ebgoICM2HCBHu/zBRKRDroKlWqmLy8vOh9y5cvT+p5W7VqZTvqWOQ4yPGI2LNnj33NkeOTyHXXXWe+//57uy8SfG666abA46Lvv+aaa+zxlMJFckxef/31pI5HMtsWUghJOvN4/+T5Rf369e1rnjlzZvSxclv+DurWrfub7SZaP9XtASh7xAviBfECQDKIF8QL4gWAZBEziBnEDBxoDGpkKOmcZPT2D3/4g9m+fbvt6B5++GFzySWX2OWnn366HTWW5XI7MpopI8GnnXaaueGGG6KjtDIaPnr06CLvi3SQMqIrI+wbN260I8rJqlSpkjn77LPNfffdZ0dXZV/+9re/JfXYq6++2jz11FNm/PjxdqR/3bp1ZsaMGXaZjEI/88wztgOXEfq77rrLHrPDDjss4XanTp1qvv32Wxt0atSoYUeoK1eubJfJrxDkuO7duzfhMZFRYhntluD42GOPmaKS51y9erXZuXNn9L5+/fqZbdu2xf33/PPPR9e99NJL7d+GHFv5N2LECHPFFVfEfb5E66e6PQBli3hBvCBeAEgG8YJ4QbwAkCxiBjGDmIEDjUGNDPbmm2/aTkVGh48++mhz0kknmdtvv90ukw5dRsK/+OKL6DS/iFGjRkWn+EkHJ53RtGnTirwfEjBkdFpGTGU/hg4dmtLj//73v9uRZ5kud8wxx9iAUrVq1YSPk0D4+OOPm+uvv96OyMrr+fnnn+0yec033nijOfnkk23QnDVrlvnoo4+igSBR5y+j4g0aNLCPldF0CVRi+PDh9pg1bNjQHsN4ZL8+/vhju65M+Yv8KqEoZNrmEUccYQOgPGeyvxqIkCmLRx55pOncubP9J+/RnXfeGV0uI+iRUfRk1k+0HED6IV4QL5JBvABAvCBeJIN4AUAQM4gZySBmoKgqSGGNIj8aKANvvfWWueeee8yCBQvKelcAAGmMeAEASAbxAgCQLGIGkB6YqYG0J4Hixx9/tPn95PZDDz1kR58BAPAjXgAAkkG8AAAki5gBpCcGNZD2JP+i5BuUHIsDBgyweRkl36CIV3RIcuYBAMoX4gUAIBnECwBAsogZQHoi/RQAAAAAAAAAAAgFZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAoVA52RUrVKhwYPcEQNqqWrWq065fv77Tbt26dfR29+7dnWW9evVy2l27dnXa7dq1i7vtKlWqFGOvUVaIF0D5VbNmTafdtm1bp33MMcdEbx911FHOsh49ejjtFi1aBG4bmYGYgbJ00EEHOe1TTz3VaQ8ePNhpd+vWzWk3adLkAO4dAD/iBRBb7dq1o7fbt2/vLDv22GOd9qBBgwLPvxs2bOi0K1bkt/BIX/x1AgAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqagAov/bu3eu0d+/e7bS3bNkSvb127Vpn2apVqwJzNPrzP+o6GnpZ5cp0WQCQzvbs2eO08/Pznfbs2bPj1sioVKmS0963b5/Tzs3Nddq1atUq9v4CKN/Wr1/vtMeMGeO0N27cGHheq3OR+/spXYMOAIADff6tz70XLlzotFu2bOm0dazS11zq1asX91wdKGvM1AAAAAAAAAAAAKHAoAYAAAAAAAAAAAgFBjUAAAAAAAAAAEAokKAeQEL79+932rt27XLaW7dujVtTY/ny5XFzMoqsrCynXa1atejtihUrBuZPJ6cjAKSXwsLCwHz0/ry+umaGvz5TrPpNOha1adMmbn0OhMfAgQPLehcQcps2bXLaS5cuddqbN2+O+1jd7/z8889OOy8vz2n/9NNPTrtXr15O+6STTorePvLII51lOTk5TpvzWABASfCfM+uYuGTJEqddt27duNdfhOd5cc+39bUc/VigtDFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBRIPwUgZTolSEFBQdz0UzVq1HDa1atXd9qVK1eO265QoYKzTLd1uhGm8QNAetHpCleuXBkzdaGOJWLnzp1Oe+/evXGfxz81XpCOKjzuuuuust4FhIxOjbFs2TKn/cEHHzjtCRMmJJ2OStPrzpgxI/C5/X3cihUrnGWHHXaY027fvr3Trl+/ftL7BZRHTZs2ddo6VbFOUwmEJZbp8+VE7SA6raJOP6U/J/razvbt2532tm3borfbtWvnLGvUqFFganF9/QYoaczUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQGbV1Khdu3ZgbjSd2xTAgaVzIe7bty8w93hQLvJU6c+7P89icbcVJFFORmpspIeGDRsGvm/kuwXK1p49ewJrVxQWFh6w5/Z//nWu+gULFpRY3KLGRngMGjSorHcBIZefn++0GzRoEHgeMn78+CLV10jmuSdPnhy9vXz5cmfZ1KlTnfZxxx3ntHv16uW0W7RoEb1dp06dYu0nkAn69evntMnXjzDz17LYtGmTs2z16tVOW9ct1XUzguh1ly5dGnc/YtXU8O+brofXqVOnuHFL1KpVy2nzmUVJY6YGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumhs5NHJQLjfoaQMnTdQh0TvQdO3YE1rnw50zXeROLy/+ZL059jZJGjY2y0bdvX6dNDSYgsQOZY1bHiw0bNjjtlStXOu1169YdsHgRROfwXbJkSZGPX8WK7u92Wrdu7bSzsrKKtI8A0k92drbT7t+/f9KP9dfXKIkaGwUFBdHbc+bMCexr582b57QPP/xwpz1kyJC4y/RrBsqDM88802nznQJh5r8+s2LFCmfZjBkzAh+rz8937dpVpDiVTK09//r6sfrcXdfDy83Nddr++lDU10BJYKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumRq9evQ7sngAIpHMd6vyF+fn5Tnvt2rVOe/369XHrcxQWFpbYfurcpsWpsZEoR7penqjtr7Ght4WSc/755ye9Lrk0gQNv69atTnvp0qVO+/vvv48bE3QsKc381YlqbPj7D92nV69ePbDdsmVLp12lSpVi7y+A9FC/fv3AGhv+fmzfvn3OskmTJpVojY2gbU2dOtVpL1u2zGnn5eVFb69evdpZ1qNHj8C6QfoYAJlYt0+fk/C9AmHir4uh+3993qrrXOhze//5ur7Wk4iux6HjjX+53g/9WF1TQ++LP1b562sIPr8oCq7qAQAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqavTr1+/A7gmAQDpfoc7Lu3LlSqddo0aNuNvS+YO3bNliDpSSrLGh6RzqlSpVCmz718/KynKWkcOx5Ojc1Rr1TIDSpfvdBQsWBNZs8scXXddC5/AtTXpf/LVBdO7h7OzswHbt2rWddsOGDUtwTwGkE11fwp+Xf926dc4ynddc9zs5OTlOW/c9mzZtSroeh841vmbNGqf9zTffxK0ppGtqnHLKKYG1B/R+c96LMGrWrFlZ7wJwQK6T6Gs3+tqPvz5qrHN7//UdfxwqSv1Uvf6GDRvifmfQbf1YHef8r7lNmzbOMmpsoCi4sgQAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIDMqqnRq1evwDz5fuQ+A0qezqu4cePGwHzBOn+h//E7duwI3Pbu3btNOtTY0H2Jbleu7HZhVapUcdpVq1aNu1w/tlq1agn3Hclp1KhRWe8CgID4oOl44q9VsXbtWmfZzp07nfbevXtNWfHX99B58H/++Wen3bRp08BjovPi65obADJHgwYNorc7d+7sLNM5vvPy8px2p06dAmtb+NefOnVqYM0Mff6t+WtyzJw5M24/Has2iO67e/fu7bTbtWuXdIwAAJQ8/7WNevXqxe2jY10z0bUs/LVEFy9eHBgfEsUezX9dSdeK0s+lr/UEXTfWqLGBomCmBgAAAAAAAAAACAUGNQAAAAAAAAAAQGaln9JTgUg/BZSuwsJCp52dnR2YakmnkPJPWdyyZYuzbPv27YHPpVNZlVY6Kt2X+KdVxkoZVaNGDaedlZUVN72IPl562zo9FQCEle7fcnJyAqe4d+/ePXp79erVzjI97Vy3y4rejwULFjhtnV6lbt26gfGkbdu20ds1a9YswT0FUNb855fNmjVzlnXt2tVpz507N3Bbev2BAwdGb3fp0sVZNm7cOKc9a9asIvenet3Jkyc77eXLlzvtnj17Ou0hQ4ZEbx922GGB6fr0+TUAoGTp89DGjRs77YMPPjjw3N6fNlWnUJ03b57T1uf2/nSuqdLXlZYsWVJi14Vat24deO5esSK/0QczNQAAAAAAAAAAQEgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKSSeN13nZAJQunWOwSpUqTnvv3r2BuRE3bNgQ87auYxGrHoeuuVFar1Pvl86bqOti6Jy/tWrVctr+vOj++hqxjqd+rM5bCQBhpWtEtGjRIm7e3kWLFjnL1qxZk1JNptKyZ8+ewHzBP//8c2DuYt3H+3Pu67py1NgAMoeuH9G7d2+nPXHiRKedl5fntJcuXRr38ToHeqdOnZz2l19+6bRnzJgRd9uJ6m0kqnek99uf91zXDfHXBdF1lmLV9QMAFI+uJaqvv+bm5jptfS3Dv76uPaHPW2fPnh1Yg6mgoMAcqBob/ms9+vrWvn37Ar9T6Bobul6evp6D8oGZGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKHAoAYAAAAAAAAAAMismhoA0ivPos6jqPPbNmvWzGlv3Lgxbk0NnXdX17LQ+Qx17vIDZf/+/YG529evXx9YU0Pnj/S3dT51nYNRH2+9LWpsAAgr3b/p+OHPWdutWzdn2bJly5y2jie6XVZ27twZmPdeHwPdrly5ctz6Ta1atXLaOp4ACI+srCyn3aFDh8B6Ejr3+HfffRe3psagQYOcZYMHDw6ssaFraowZMyZ6e+rUqc6ylStXBvZ5mj7XnzJlStzXNH/+/MD97tGjR9w+Uec4BwCkLtG1CH2tx38dRK+r41yiunL6nFnXyUiFfqx/27qmhr7mpOOabrdt29Zp5+TkxL0uhMzFTA0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAjU1gJDy5/sWtWvXdtqNGjVy2i1btozezs/Pd5bptq6psWvXrsB8hzof4oGin7egoMBpr1mzJrDuiL+tc6Trmho6t2TFihUDc1Pq5QAQFjq3bpMmTeLml+/SpUtgbSOd71bXQiorej+WLFkSGFP9+Yjr1asXGG/9xytWHmQA4dGiRQunPXDgQKf966+/Om1dj+Lbb7+NWzPjoIMOctq6/kRubq7Tbt++ffT2xIkTnWVfffWV054+fXpgDQ3Nf26v86frx86bNy+wpsapp54avd23b19nWYMGDZw2/SMAlPy5e8OGDeNe50h03SPRdQx/jChOfQ39eB17dN3WHTt2BF770ef2HTt2jHturq/dEIsyB1fhAAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ABCSucB1HkV69at67SbNm0aN1fuxo0bnfbWrVsDc6Tv3bs3bj7D0qqvIXbv3u209etasWJF3PyROrekPn6JclHq/Ou6fgcAhJU/frRt29ZZ1qtXL6e9du1ap61z7ebl5QXWRiorOg+vzuvrr6PRvHlzZ1lOTk7c+huiTp06JbinAEqT/vz27NnTaffv399pv/nmm0570qRJ0dtdu3YNrC+ha2pkZ2c77T59+kRvt2rVylnWrl07p/3555877RkzZjjtRHUzgpbNnDkzcFv+2krr1q0LjBk6puhjAABInb8eqK4Fp+NFotqhQdeddE264tTY0DUy9u3bF3itR1+T0jU3/LWi9PUqfS6vz92psRFezNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBdJPARlCTxvUU+oaNmwYN+2Gnvqn29u2bYs7tU9P79PLDiSd6kq/Lj2N0J8yKlH6qRo1agS2g9JR6dRUABAm/lR9/tSFokuXLk57w4YNgekMdTzR66cLHef80+unT58emH4qKysrcJq/jscAwqNJkyaB6afmzJnjtKdMmRK9PWbMGGdZ586dU0q95D83bdmypbOsVq1aTrt9+/aB6ac+/PBDpz1x4sTo7fz8fJMKnZ7Kn3Jr+fLlzrIePXo47SFDhjjtww47zGnrmKPPvwEAqV0X0mnJ27RpU2LPVZLpqPS1nFWrVgWmsNXXnfxtva5ObZUoppKOKjyYqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQIPE7UA5yoovatWvHzQ+s8xfq3OK6vXPnTqe9e/fuuPkKdT7D0qyxoffbn19SHx+ds9d/vGLlRNfr+7enH1upUqUkXwEApBfd9+Xm5gbW2Fi6dKnTXr9+fdz4oWNPWfLXhhLr1q2L3p49e3ZgDQ0dT3QuY527mBobQHjommuHHHKI0z7uuOOc9i+//BK3roVut2rVKqUaG0Hr6rbetr+2nm5PmzYtMEe6rqGh+ZfrdXWNDb3tuXPnOu2BAwc67e7du0dvZ2dnB+4HAOC3dH2IOnXqhKLGhv8ak1i7dm3gdSb/+nv27AlcV1+z0jHTfz2H+hrpjZkaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagAZSuf+q169ety8u82aNQusRVFQUBC43J8XXecr1HkUdd2LA0k/l38/N2zYEPf4xMp5XqtWraRrauh86npb1NgAEFZ169Z12u3atXPahx12WGAM8Oe41fU30qnGhr/2R15enrOscmX39Fn3+Xq5zsnvz9tbtWrVEtlfAKVD13Xo2bOn0+7Ro0f09oQJE5xl48ePd9rdunUL3FZx8njr/ezXr1/cfmj69OnOsq+//tpp//jjj0575cqVTnvr1q1x90PX2JgyZUpgzY358+c77cGDB8c8tvo1pFqTBADKq7DW2NDXmTZu3Bi3ToaulZeopsb+/fudduvWreMeH2pspBdmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBlBP+Og66xkPDhg2d9q5duwLznOtciP7lu3fvdpbpfIZ6W6VZY8O/Lzr/77p16wJrbOic6Hq5Py96onzrWVlZgcsBIF356weJJk2aBOaI1zHB396xY4ezbMWKFYH5b8uKjls6P7Du82vXru20GzRoELcuSaNGjUpwTwGUNn/ebTFw4MDo7dmzZzvLZs2aFVirIjc3N/D8vDjq1asXt92yZUtnWadOnZz2d99957S/+OILp/3DDz/EraGh6e8YuraSfvy8efPi1tQ49dRTnXbfvn0D+17yoANA+tTYKE59jVjXmfLz8+MuS9TWNTX816j066fGRnrhShoAAAAAAAAAAAgFBjUAAAAAAAAAAEAoMKgBAAAAAAAAAABCgZoaQDmUKP+3zvGt84lv27bNafvzou/cudNZtmfPnsD8hTq3bmnRed51Dl+dN17X1NBtf00N/+1Y29LHX9fnAICw0DWadH553ddu2rQpenvDhg3OMp1b158bN53omLh8+XKnPWfOHKet89Xn5OREb9eoUSMwHgNIb/Xr13faPXv2jN7u06ePs+zrr78OrE2h+8+jjz46ertWrVqmtF5D7969A2t96D6tWbNm0dszZsxIqWaGppfPnDkz7rbWr18fWB+vV69eTrtt27ZxXzMAIHGNjQNVX6Mkamz462Lobenn0nVdE7X9qLGRXpipAQAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFCgpgaA39R8qFu3btxcubHqYPjzi+tc44lqbOzbt89pFxYWmtKg8yT664KIjRs3Ou2KFd0xYF03w18XQ+dI1zUz9GMrVaoU+H4AQFjovO86F3uXLl3i1qLQNTZ0PQ4dX9LF1q1bnfbixYud9rRp05x2dnZ23Pjgz/keq2YJgPTmrzdx/PHHB/YNc+fOddpfffVV3LzdHTp0MKVF137T3wOOPfZYp33QQQfFranx4YcfOu1JkyYF9vup1NvQ21q2bFnc+iZi8ODB0duHH364s6xp06ZOW5/LA0B55a8RoetHpGuNDX2tR8cP/VzFQY2NssVMDQAAAAAAAAAAEAoMagAAAAAAAAAAgFAg/RSA36RW0lOuGzRokHRKEJ2GY9u2bSmlo/JPM9TTBg8k/Vx6vxOln/Ifs9q1awemD0mUjsr/eJ2aCgDCRKcz9Kcp6d27d+A0c52OUE8VT5d0VHo/165d67Rnz54dNwboNC96irpOR5WVlVXs/QVQOn1enz59nGULFixw2q+++qrTnjx5stPu3r179HbDhg2dZfXr1zdlRT+3v61TDur9btSokdP+8ccfA1N06ZQhQct0Oy8vL24M+eWXX5xlAwcOjHvsddpAACiv9HlqcdJR6W3payI6paDu4/U1qVTo7xykowovZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFKipAeA3dB0HXRNC58fdtWtX3BznusbGjh07AnMh7tu3L25di7KssaFf14YNG+LmSNc5z2vVqhVYs0Tnj/TX79DHnhobAMJE929NmzaN3j700EMDa1ME1W8SK1asiPvYsqRrRy1dujRuH6/79GrVqjltHU9atmzptKtUqVLs/QVwYOj6EUcddZTTnjlzptP+4YcfnPbYsWOjtw855JC0qakRRNee6Nu3r9PWNTdmzJjhtL/++munPXXq1OjtVatWOcsKCgoC90XnX58yZUrcXO3z58932oMHD3baPXr0cNqtWrUKxfsBAOlaY0N/R9CP1f2qrrmka9glignpWGOD+hrFx0wNAAAAAAAAAAAQCgxqAAAAAAAAAACAUGBQAwAAAAAAAAAAhAI1NQAkVLmy21XUrl07bs5gnUtc50DXbb3+nj17orf37t0buG5p0vuia4X4czr662vEqqGhl+uc6P4c6zrPoq6x4c/NDgDpzt+HtW7dOrCGxqZNmwJrGfnz3+bn55t0peOev8aGjgf16tVz2g0aNAjMN5yTk1OCewqgJOl84V26dHHaxx13nNOeO3du3HoTuvZEWGo66D5Nt3WdoE6dOsWtM/LZZ585y77//vvAGhqavwagrqmh86nPmzcvsKbGKaecErd2iO6XyZkOoLxIpcaGvkai40OzZs0CazbpmOk/v9Z9eqr8jy/J+hr6GOjjQ7xIHVfDAAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ACQMl0Dwp//0F8TI1YdjB07dgS2/fludX71ffv2OW39XKVJP3dBQUH09urVq51l1apVC8yxrGuW+Nt6ma6hkZWV5bTJwwggLGrVquW0c3NznfbBBx/stJcvXx63xoaOF7qORTrx12TSed11DQ2db17nE/bX5NDHE0B60Z9fXadBtydMmBC9PW7cOGdZ165dnXbPnj1DWXNN1wLp3bt33NohzZs3d5Y1bdrUaeu6I/786olqbuhlM2fODNzW+vXrnfa6devivoa2bduGov4JAJRmjQ19TUTXbdX1iXTNDV2Xzv9cug5GcWps6McWt8aG53kx62vEqrERllheljhCAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagBImc7t589nqPOBJ6qxofOe+9t6XZ0zfe/evU57//79prT4cyHq2iD5+fmBx6tSpUqBNUr8+SV1PQ69rq65odcHgLCoW7eu027Xrp3T1nnK/TluCwsLA/PdpmuNDZ3Hff78+YG513WuXX+80Hnba9asWYJ7CqCktW7d2mkPHDjQac+dOzd6e9asWc6yqVOnBtYkatSokQkjfY7cpEmT6O1BgwYFxghdU+PDDz902pMmTYpZk6kofbV/W7o+kq5vcsIJJzjtPn36OO1mzZo5bfpuAJnKX/eiRo0agdcxdO1QXUNDx4sgpVljQ18n0vzXrPT1K31eoOuIpPKaywtmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQC6acAFJt/GpyeMt2wYUOnvWvXrsCUIAUFBXGX6cfu27fPaW/bti2lqX8lyf9cej/80yyTSRnln1qpp2XqaZf+1COxpiTq5wKAdKX7s6ZNmzrtQw891Gn7U07pdIQ6HZU/NUiseFJWdIrGtWvXOu2ffvopMF740xvqWNOmTRunTUoTIL3o9HI6bdFhhx0Wvf3ll186y3Rbp6zo27ev065du7bJtOOl2zoFl/4O0rhx47jpuxYvXhyYbkrTy/3tvLw8Z9miRYuc9s8//+y0jz32WKfdo0cPp61T+wJAJtIpu3VaWn1eWxwHMh3V0qVLA69J+a9h6e8r+nuBTi2r415V9d2pPGKmBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBZOsASpSu6VCrVi2n3ahRI6e9c+dOp+2vo5GopobOQbh//36nvWPHjjKpsaGfR9fY0PkidS5Ef90Mffx0TnSdX13X0NA5lPX7AwDpSvd3Ome8v46GjiVbt24N7IdXrVpVZjWYgui4p/Py6roZ/nZQLImVb548vEB60Z/R4447Lm5dhrlz5zrtMWPGBOYe79Spk8l02dnZgXVFWrVqFb09ffp0Z9nXX3/ttH/88UenvXLlyrg1ABPV25gyZUpgjaeFCxc67SFDhsSttaLjoM6vDgCZQp/z1qlTp8g1NvS29PUYfb7t7+N13b7i1tjwX7PS17P09S7dPuigg5x2Tk5O3HN//ZozFTM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArU1ABwQFWpUsVp161b12k3bdo0bt5AXRMjUY5BnZNw3759geuXFl3rQ+dMX79+fdw6GTqnfKKaGvp463yR/sdTXwNAmOgaQ/786Dq/+Zo1a5z2hg0bAmts6Py36SJRjQ1/n69zDetc67rGkq5xVV5y7wLpSn+Ge/fuHb29YMECZ1leXp7T/u677+LWYRCNGzcud7UY6tWrF7fdokWLwJojP/zwg9P+7LPPnPb3338fWEfDb8+ePYHvna4Bpd/rHj16RG8PGjQo7t9IrNel+30AKI81NvR1D30NRX/H8J9vb9y4MfDcPBH9HcVfV0lfvwqqNxur3bFjx7jX1vR1o0w9z2emBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBmhoADiidu6969eqBOX39eWcT1dDQOQX18t27dzvtvXv3xrxd2nTuRJ1ncd26ddHbNWrUCDx+VatWddqVK1cOrKnhp/MsUmMDQJj4c+nqPLr+HOS6X41VU2PhwoXFypdbWvR++/Pyzpkzx1nWvHlzp92gQQOnnZWV5bTJvQ6kF3/dm6OOOspZNnPmzMCaGmPHjnXahxxySLmrqRFEv35dm8JfsylWf6prAs6YMSNu7aOgehuxluu2v5//5ZdfnGVHHHGE0x4yZIjTPvzww512eX/fAZSPGhv6e4G+RqJraDRs2NBp//TTT9Hb8+bNc5atWrWqWHX5/HWUli9fHnj9Sn8f0deNtqvlBx98cPR2y5YtA2uQZEqNDWZqAAAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSoqQGgVOkaD7qugz+foc4puHPnzrj5CMWOHTsC1/fXstD5CD3PM2VFv05/Ll2d/7FKlSpOO9Fy3fbXzdDvhc6vHlSPAwDKmr8/y8nJcZZ17tw5MF7o+OCv56Rzout104n/dem6INnZ2YG51HWNpnbt2gXGZwCly38O16lTJ2fZoEGDnLauqeOv8RCrnZubG7evKI90XbkmTZoEHm/dX/qP7wcffOAsGz9+fEo1NjT/+tOmTYtbb0Pk5eUFfsfo379/9Db1NQBkEn+NCF0/onXr1oE1NXQc9F+T0jXn/PU2YtVRSqXGhv6OsXr16sCasbq23lb1/ca//r59+wKPQabU2OCKFQAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBmhoA0iqHrT+Hd+PGjQPzwibKKahrbPgfr3MMbt++vcxqbOjn8u+Lzm2o27qmRtWqVZ12tWrV4uZn1sc+UY0NAEhXNWrUcNotW7YMrJmRqGaTP36sXLnSWbZ3716TLvy1otauXess+/nnnwOPkY4fOga0adMmepv6GkDZ0vm+e/To4bQPPfRQpz1hwgSnPW7cOKd98MEHR2/36tXLWabPD/Hb+hO6HVSjRLd1fROdjz2o5oaOP2vWrHHaY8eOddr6+068+hqCGhsAMoW+ZqJraOhrJPo8119HI9H5s36uJUuWFLnGhv5+smHDhsDlu1TNDf/3Hd3/62tO/vP8MNfYYKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKJB+CkBa8adH8k/7i5WOSqeX0m2dUsqfXkSnItHTufVUvtLknxqoU2zpaYD+45XMVEr/cp1qRCP9FICw0n1f69atA+OFnt69fv366O2CggJn2aZNm0w60im0li9fHphSJlE6Q3+88KdW0csAlD7dpw0aNMhp//LLL0571qxZTnvKlCnR261atQo830Zi/hRTOq2TPr46/dQHH3zgtMePH590OipNr6u3FYR0VADKC30NJScnJ25K70TpphIpTjoqfY1KfwfZv39/3OtIidKpp5KOKtF1o7KUvnsGAAAAAAAAAADgw6AGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFBTA0Da8ucyFPXq1XPazZo1C8wnrutR+Gts6HobiWps6HZp0bkO9Wvy530X1atXT7qmhs7BWFhY6LRbtmxZxL0GgPSiazTpfPRdu3Z12itXroybo1zHCx1P0oXeL53TV+cI9ufOFQ0aNEi6xlWq+YUBFI+ud9CrVy+nffjhhzvtL774wmmPGTMmbn84YMAAp60//0jtvdFtXaNIf7/R3zkmTZpUpFzsiWps6Odt0aKF0+7Zs6fTpp8HkKl0/+Y/J9a1JorLfz6eap+ubVGP9287UU2NRDVl/ecGOo7p2nplGR+YqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQoKYGgLRVsaI77lqjRo24+b7F7t27nfaOHTuc9tatW+PmGtf1OHTO9IKCAqedKEfhgaLrYOjXsW7dusBjVqlSpbh5FPXx6tevX7H3FwDSgc71qnPDdujQIW4ect3/65yzS5cuDVyeLnS8WL58udOePXt23LpKOvd6lSpVAuMxgNKl6yEcd9xxTnvBggVO+9dff43e/uqrr5xl7dq1c9qdO3cuwT2Fjke6/p2uGZiVlVVi+df9sW3GjBnOsmnTpjntVq1aOe2cnJxiPTcAhLGf1jXnSrLGhq53V5I1Npaq7yf6+pX+vqK/72zatCnueUHTpk0Da2+VZo0NZmoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFKipASA0/PUgRK1atZx2w4YNA+tkbNu2LW6+Qt3Wj9X1OtIlZ7qu/aFfx5o1a+I+Vr/GjRs3lvDeAUB6qlatmtNu3ry50+7atWvM2JFMe9WqVYG1kNKFv86UWLx4cdz86jqfsM6VS00NoGzpz2ivXr2c9vHHH++08/Lyore///77wMc2adIksCYRTGCfr8+vJ02a5LTff/99pz19+vS4Ne9036u/G+3bty/pGoC6rtJnn30WWNujf//+gX9zAJCJdL8blhobW9RjdY0NfS3IX0NDf5/Jz893lh1yyCGBNZjq1q0bWCu3JDFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKFATQ0AoVW5stuF1a5d22k3btzYaW/fvj16e/PmzYE5B/3rxso5qHPWFhYWmnSga33o3Ij+PL/6NVJTA0B5VbNmTafdunXruLUn1q1b57Q3bNgQWGNDx5t0oePW+vXrnfavv/4avV29evXAOlNHHHHEAdlHAEXTqFGjwM+ov27Dt99+6yz75ptv4tYYEtTU+C3/+faiRYucZVOnTnXan3zyidOePHlyYMzIycmJ3j7ssMMC6wmuXbvWaS9YsCDutvXzfPfdd067RYsWceNirJzqAFAehLXGRkFBQWBtVv19xx/XEtWf1W1dY0OfN/i/V+jjmSpmagAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBIGNUrVrVadetW9dpN23aNG5OQZ1XVrf1+jqfuH+553mmrOjn1rnd/bVAdE2NdM37DgClzV+jSefG7d69u9NetWqV09Z5Z3W80Hln04WOCcuXL4/erlixYmA9jhtvvPEA7x2A4tSd69ixo9MeOHBg9Pbs2bOdZTNmzAhs5+bmOu3s7GyT6XQ/rmtXfP/999Hbn376qbNs2rRpTjsvL89p63ziPXv2dNq9e/eO3u7Xr1/ge/HTTz857ddffz1ufQ9/nb1YdZWmTJnitPv06eO0mzdv7rSptQKgPAprjY1dqharjmv+5XrdHTt2BNZx1bVaDzrooLjX5fTxSrXGBjM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoH0UwAyhk6PkZWVFXd6vJ4yrVMv6SlzerlOH+Kflq6n55UlPbXcP1Vwz549cZcBQHnmjycNGjRwlnXo0CFwyrVu6/SFK1asiN7eu3evSVdbt26NmYoqVmpDAOlNp4jq0aNHzNti/PjxTvubb75x2l26dImbHqlSpUomE+h+XKfomjhxotP+6quv4qbr0lq1ahWYbuqYY46Ju7xly5bOsnr16jntnJwcp71gwQKnvWzZsujtNWvWBH5nmD9/vtMeN26c0+7WrZvTJv0UAIQ3HZWn0pj7r3/p+KBT1ubn5zvt1atXx92W6Nq1a9w0ijqFvL7GpzFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKFATQ0AGUvn9a1Vq1b0dsOGDZ1lOpefzvun2zqPoL+mhs7D619W1vy5EgsLC51lug0AMKZ69epOu0WLFnFrT4h169Y57fXr18fNd6vjRTrxxwSdo1fHQADh0rp16+jtQYMGOct++eUXpz1r1iyn/cMPP8StEdGkSRMTBkG1jsS0adOc9meffea0p06d6rRXrlwZvV2tWjVnWb9+/Zz2Kaec4rR79eoVmG89lVoVOj4NGTLEaefl5cWtlZLou4+uFaLb/u9SuoYLAJRXQTU2SrK+hq6xUZz6Golipr6+pb8L6fiha/H5v2Poa1C67pS+bqcxUwMAAAAAAAAAAIQCgxoAAAAAAAAAACAUGNQAAAAAAAAAAAChQE0NAOVGlSpVYuYyFI0aNXLaLVu2dNobN2502jpH4Y4dO6K39+zZE7eORazlAIDw8NdnipX7tUuXLk578eLFTnv16tVxa1Oka3ygBhOQWfx1Gnr37u0sO+KIIwLrSYwZM8Zp+3OCDxgwwFmmz7fLUn5+fty6IPo16uXz588P3HbHjh2jt3v06OEsO/XUU5123759nXaDBg0C86+nQh/vww47LG7tkEWLFgXmQNeWLl3qtL/44ou4dVr031CNGjUS7jsAlAf+Pl732anW2PDUdab9+/fHrK8Rq+5FceiaGmvXrnXau3btSvr7jV5XX2c79thjA/eFmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAADIrJoaOv+Wzt1VUnkgAeBA8fdbOq9fxYruGG/NmjWddnZ2ttPOyclx2hs2bIjbX+rn2rt3b9zchwCA9KbPc+vVqxc3r7jo1q2b0165cmXceKFz0up4AQAlrXnz5oH5qxcuXOi0582b57S//PLL6O127doF1hg6kDZt2uS0ly1b5rRnzJgRvf3RRx85y7799tvAWno673m/fv3i1s3QNTV0TPDXMznQ9PeXXr16RW/37NnTWbZ8+fLAGhu6/f333zvt9u3bx6011bZt25T3HQDK23eKRDU29HX4QlXjbufOndHb27ZtC7wmpetilCRdF0PXZPJfe9P76a89KKipAQAAAAAAAAAAMgKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAJlVU2PJkiVOm5oaANKd7qf87X379jnLduzY4bQLCgoC85pXrux2n1WqVInerlq1auC6lSpVctrU1ACAshV0XptoXd3H6xzmubm5cfPjrlmzxlm2ffv2wFhEvABQ0mrXru20e/fu7bQXLVoUWKtiypQp0duHHXaYs6xp06YHrJ5Efn6+0544caLT/uCDD+LW1ND5vbVDDjnEaev6E8OGDXPaffv2jRsDypK+LuOPPwMHDnSWzZ0712n/+OOPgfFH14CaMGFC3FoqWVlZTrtJkyZJvgIAKD8S1djQNZq2q+8N/tpSus6Uvt61bt26UvuOoWts+McX9Gvw1x5MBjM1AAAAAAAAAABAKDCoAQAAAAAAAAAAMiv91LRp0w7sngDAAU4f4l+up9ft3r3baW/bts1pr1+/3mnr6Xx79uyJm9qqvKXo09PR9ftSsWLFcntsgExOy5RJr8O/vn6s7uN1ekI9xVrHlxo1asRNyVG9evXAqeL+WAMAB0JOTo7TPuKIIwLTEn377bfR219//bWzTKch0mmcEp33+c+3ddorfzqpWOmmxo8f77R37doVvd24cePA1zhkyBCn3aNHj8C0giWZVutA8u+nfi906jB9vPX5vf94ip9++il6+8svv3SW6eNN+ikASEzHSJ0usmXLlk67ffv2cftwff1KXw/TaaD831/0d51U6e9S/u9K+ruNvu6WCDM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAkFk1NSZMmOC0yXsOIJNqaug8gYWFhYE5Bjdv3hw3R6HOga63rZ870+gcxhrxA0Bp1+9IJT7otl6m+3SdC3br1q1Oe/Xq1U574cKFcWOJ3nam1DQBEB6VK1eOm6NbDBo0yGnPnj07bp0LXX9D16LQ9Ts2btzotCdNmhS9/eGHHzrL9HMtXbrUBPHXjOjfv3/ga+revbvTzs7ONplGvxcnnHBC4PHU14MKCgqctj+ezZw501nWtGnTwOOdrFWrVgXGSL5jAMgkier6Fah+2N8H6lpPOt7q7ytB9PWr4l7P8j9eX2fT19ISYaYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumxvTp05POV0j+XwBlIZW+J1F+Qp0nUNfY0O1du3bFvB0r37p+rkzz9ttvl/UuAChninvuGfT4RHlkdTzYuXNnYM5af/5bXVND55XV2waA0qbrSfTo0cNp9+zZM3p73LhxzrJPP/3UaVevXt1pt2zZ0mkvWrTIaX/00UfR2xMnTgw8n27evLnT7t27t9M+8cQTo7cPP/zwwJoPNWrUMJmuTp06TrtPnz5Oe8mSJYHvjc7lHlSPY/To0U57xIgRpij8NVYE150AZLJEdf12qetOa9asid5eu3Zt4GOrVasWGJ/9265YsWKp1YhNtV9npgYAAAAAAAAAAAgFBjUAAAAAAAAAAEAoMKgBAAAAAAAAAAAyq6aGzqmoa2qQzxBAugvqp/QynScwUR/nX18/VucvzHQ657GOF/4cyDo/I4D0diBzqKarRP1/ovgRVFcpUT2n8uCbb75x2nynANJbfn6+065Zs2bcc97vvvvOaesc37Vr13ba69atc9rLly+Pux9HHXWU0x48eLDTPvLII512hw4d4tYJgTENGzZ02rruyNSpU5326tWrnba/RpSuF6XbRfXvf/876TqvADJLSZ4fFqfvKM3z1ER1YPeqmLtjx46kavrFquPnf6x+rnT+fsLVJAAAAAAAAAAAEAoMagAAAAAAAAAAgFBgUAMAAAAAAAAAAIRCBS/JhGDkKwSA8qWo+SKJFwBQvhQnv/CgQYNKdF8AlF2NjWXLlgXWUtC11HSe7sqV3ZKfDRo0iN4+4ogjnGXnnnuu0z766KOddqNGjZx2lSpVQnmuWlb7umHDBqf90UcfOe1nnnnGac+YMaPItaiS1axZs5T+ngCUH/5+JZ36+ANZg2O/6vMKCwvjLktUM1bX5/C3y7JGbKLjx0wNAAAAAAAAAAAQCgxqAAAAAAAAAACAUGBQAwAAAAAAAAAAhIKbtBIAAAAASsk333xT1rsAoJQkqnlQqVIlp52dnR293apVK2eZzvE9Z84cpz137lyn7c+xXpr51tMpt3vQvui85bq9adOmwPon/m0fqBzyq1evPiDbBQCEEzM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQqGCl+TcwHSaNgkAOPCKOnWceAEA5UtxUo0QMwDES2nUoEGD6O2mTZs6y+rUqZNSX5JKX0O/9Fs6/dTKlSud9oYNG5KOCXzHAAAkI1G8YKYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoGaGgCAmMh3CwBIBjU1AADJ4jsGACAZ1NQAAAAAAAAAAAAZgUENAAAAAAAAAAAQCgxqAAAAAAAAAACAzKqpAQAAAAAAAAAAUJaYqQEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAUGNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEKBQQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADodW6dWvz/vvvR9sjR440TZs2NbVq1TIzZswo030DAKQXYgYAIBnECwBAMogXQNliUAMZobCw0Nx0003m3XffNdu2bTM9evQo0/2ZPHmy6d69u8nKyjKHHnqo+e677+Ku+8knn5j+/fub+vXrm0aNGpmzzjrLrFixIua6d955p6lQoYITOAEAqcnkmLF3717zv//7v6Zly5amTp065vTTTzfr1q0rpVcCAJklzPHC74UXXrDfIZ588knnfvlO0b59e7u9vn37ml9//fUA7TkAZLYwx4vp06ebXr16mezsbFOvXj1z1FFHmQkTJjjryPeN4cOH2+Xyb/DgwaXwKoBgDGogI6xZs8bs2rXLdO3aNeZyz/PMvn37SmVf8vPzzcknn2xuuOEGs2nTJnP99dfb9ubNm2Ouv2XLFnPHHXeYvLw8s2TJEnsR6uyzz/7NerNmzTIfffSRHfkHABRdJseMxx57zA58fP/992bt2rWmbt265sILLyyV1wIAmSbM8SJi1apVNjbo1zBv3jxzwQUXmCeeeMJue9CgQWbYsGF2cBwAUH7iRatWrcx//vMfs3HjRrv+H/7wB3PSSSeZnTt32uXbt283AwcOtIMk8h1kw4YN5qGHHiqV1wIEYVADZUpOouUE2u+dd94xnTp1sp3+3/72N9OuXTs7YjxkyBCzePHi32xDpvXJ+qJFixZ2/chUwEceecQcccQRdnR67ty55vXXXzeHHHKIqV27tsnNzTV33323fR5/IJKLPzJwIKPP8mvYSEeerNGjR5vmzZubK6+80lSrVs3+v0mTJvb+WM4//3wbMGSKYs2aNc0tt9xifvjhB+cLhQS/K664wjzzzDOmatWqKe0PAGQKYkbimCGPk1+JyTZr1Khh7r//fjNmzBizdOnSlPYLAMKMePH/yMUs2R95rX6yz3KRSi50Va9e3a4jM/smTpyY0n4BQJgRL4xp0KCBHdiQGX2yL5UqVbKzTWRfxKhRo0xOTo6566677H5XrlzZ9OnTJ6V9Ag4EBjVQpuTizKRJk+xob8Rrr71mLrroIvv/xx9/3E6Lll8YHXzwweaUU075za+HZFrfnDlzolPiFi1aFF0mne8rr7xiO+SOHTvazlpGoAsKCsyHH35op2K/+eabdt39+/fb7UsHLcFGRp9HjBhhKlb878dETvgjU+1i/Vu+fLld76effrLT+/ykLfcnY/z48aZz5852P/yBtlu3bmbAgAFFOMoAkBmIGYljhuyX/4uRtCPPAwDlBfHiv/7v//7P7tPFF1/8m2V6e1WqVDFdunQhXgAoV4gX/49sQ35Ee9ppp9m40aZNm+j3DRmsGTp0qB3ckVRVn376aTGPPFB8DGqgTDVu3Ngcd9xx5o033rBt+XWQ/KI0EkDk16YyfU9+PSSduQSaKVOmJL39a6+91gYOGWmWzlk64Q4dOtgRaOnUzzvvPDNu3Di77tSpU80vv/xinnvuOZurXAKJ5JaVkW3x8ccf2+l68f7JKLuQYCXBwE/aW7duTbi/MsIvI/UyiBEhvwSQGRoybRwAyjNiRuKYIbM4nnrqKfulRrZ9zz332P2XL04AUF4QL4xNIXLbbbeZ559/Puby4sQfAMgUxIv/R7Yh68jr7tevn5POSgZirr76apveVr5/SF2/hQsXpnCkgZLHoAbKnIwAS6cp3nrrLVuUSDpjGeGW6XoR0pE3a9YsbhHtWCKdesQXX3xhty9T5yTPuJzky+i3WLZsWTRdR3FIShDJee4nbZmmF+Tnn3+2AU4GMI4//vjo/VdddZXNV6injANAeUTMCI4Zf/rTn+wXM/kiIl+Y5MuSPIf8KgwAypPyHi9kQOPyyy+3hcBLYnsAkKnKe7zwk+eW9FfyoymZwRLZnuyzzOCQWX3yf5mt8eWXXxZrP4HiYlADZU4K0klQmDZtWnSan5Dpbf4c4Hv27LFT/uT+ZEWm6UUef8YZZ9jR5ZUrV9pO/Zprromm6ZAcgnK/FHeKRS4eSWce719kqp+kiZo5c6bzWGnHKxgVuTglF6Ek36Iu6Pr111/bnOkS9OSf/DJAgu6tt96a9HEAgExBzAiOGfIrMpkmL1+K5PWfeOKJ9rUcfvjhSR8HAMgE5T1efPXVVzYeRL5DTJ482eZDP/PMM2Nur7Cw0KY7CYo/AJCJynu8iEViwoIFC+xtKRAOpCUPSAOXXXaZN3jwYK9GjRpeQUGBvW/UqFFeixYtvDlz5ni7du3ybrvtNq9Tp05eYWGhXd6qVStv9OjR9vaSJUskCnibNm2KbtO/XMh2K1as6H300Ue2/f3333sNGzb0hg0bZtv79u3zevbs6V166aV2O/I8EydOtM+dio0bN3r16tXz/vWvf3m7d++2/8/Ozvby8/Njrj979myvUaNG3gsvvBBzeV5envNPjolsM972ACDTETPix4xVq1Z5S5cu9fbv3+/Nnz/fO/LII70//elPKe0TAGSK8hwvVq9e7XyHOOKII7z77rvPW79+vV3+66+/ellZWd4nn3xi9+Xee+/12rdvHz0OAFCelOd4Ifsza9Ys+3zbt2/3Hn74YXscFi5caJfL/yVeyHqyj/J/aUeWA2WFQQ2khXHjxtkAcN5550Xvkwsyjz76qNemTRvbIZ9wwgneggULostTDSDiueee85o2berVrl3bO+WUU7wbbrghGkDEypUrvXPOOcdeMKpbt643YMAAb8eOHSm/Hgk8Xbt29apXr+5169bNmzx5cnTZsmXLvJo1a9r/i9/97ndehQoV7H3+f5HlWqzXBQDlCTEjfsyQL0dt27a1X0Ryc3PtlxI5NgBQHpXneKHJcz7xxBPOff/5z3+8gw46yG7vqKOO8n755ZeU9wkAMkF5jhcvv/yy16FDB3tfgwYNvGOOOcYbO3ass71PP/3U69y5s12ne/fu3meffZbyPgElrYL8p6xniwAAAAAAAAAAACRCTQ0AAAAAAAAAABAKDGoAAAAAAAAAAIBQYFADAAAAAAAAAACEAoMaAAAAAAAAAAAgFBjUQEY7+OCDzccff1zWuwEACAFiBgAgGcQLAEAyiBfAgcOgBjLanDlzzMknn1zi273xxhtNy5YtTZ06dUzz5s3NLbfcYvbs2RNz3eXLl5tatWo5/ypXrmxOPfXU6DrHHHOMqVatmrPOqlWrSny/AQDpHTOSiQkFBQXm/PPPt9tr3LixefDBB0t8nwEApR8vInbu3GkOOuggU69evcD1EsUD4gUAlC3iBXDgMKiB0PA8z+zbt8+kg+uuu878+uuvtuOfNWuW/feXv/wl5rq5ublm27Zt0X/5+fk24Jx77rnOeo8++qizXrNmzUrp1QBA5glrzEgmJsggicQSGTSfOHGiGTlypHn11VdL4ZUAQOZJp3gRcc8995hWrVolXC9RPCBeAEDJIV4A6YVBDaS11q1bm0ceecQcccQRJisry85weOaZZ0yXLl1MzZo1zUUXXWQ2bdpkzjnnHDui3KNHD3vhyP/4999/394eNWqUOfTQQ+2Ic6NGjezo85NPPlmk/ercubN9/khgq1ixolmwYEFSj5X92b9/vznjjDOK9NwAgPITM7QdO3aYt99+2zz00EN2gLxDhw72S8iLL75YpO0BQHmUrvFCTJs2zXz++efmjjvuKFY8IF4AQPERL4D0xaAG0p50/K+88or9par44IMPzKRJk+wFoS+//NIMGDAgOqosAeL2228PnPongWjlypXmnXfeMbfddptZtGiRXSbblA483j/5pa3fn//8Z5sSRIKR/OpW9iEZEhguuOACU716ded+CSDZ2dk2CDIiDgDlO2bEiwnz5s2zqatk3yPk9k8//VTkYwYA5VE6xou9e/eaK6+80vzjH/8wVatWDdz/RPGAeAEAJYN4AaSnymW9A0Ai1157renYsWO0/Yc//MFe6BESPCpVqmT69u1r28OHDzdXXXVV3G3l5OSY//mf/4nmLJdR85kzZ5p27drZbWzevDnp/frjH/9o//3yyy/mjTfeME2aNEn4mGXLlpmvvvrqN2lHZORfRvoluI0dO9acffbZpnbt2ub0009Pen8AAJkRM4JignyZkl+Fya/EIuRLztatW5PeFwBAesaLxx57zA5m9+/f34wbNy5w3UTxgHgBACWDeAGkJ2ZqIO1JTQo/maIXIRd8dDsyeh6Lf10hHXdxO2pJK9K9e3fzu9/9LuG6L7/8sg08sr7fkUceaerWrWuqVKliBg8ebK6++mo7ag8AKH8xIygmyGwPmSIuv86K2LJlix30AACEN14sXLjQPP/88/ZCVTISxQPiBQCUDOIFkJ4Y1EDak9zjpUGKIUlnHu/fNddcE/exhYWFCfOjSx0NGdS44oor0uY1A0CmyZSYEe81ya/EZLBDUlhFyK+7unbtWoxXAwDlT7rFC0k7snbtWpvLXH7JO2zYMFNQUGBv//DDD7/ZbqJ4QLwAgJJBvADSE+mngP9fv379AkfUI2Sd9957z6YBkV/Szp492+Y+l1/TBhkzZozZsGGDOe+885z7ZXrht99+a6ceVqtWzU4dlFH3kSNHFvs1AQDCFTMSxQT59ZcUIrz77rvNW2+9ZdatW2f+/ve/24KDAIDwxgtJNXjcccdF29999539MZRcWJJ6TFqieEC8AIBwIV4AqeHn4ECKKlSoYN58802b81Cm48mo+EknnWSefPLJ6DpDhw41I0aM+E2B8LPOOste1NK/2L3//vttfvX69eubW2+91Tz++OM2FyMAoHzFjGRiwjPPPGNjSYsWLczRRx9tLr/8cnPxxReXyesDAJQMuagk/XrkX8OGDW0MkduRIrD6O0aieEC8AIDMQ7wA/quC53ne/38bAAAAAAAAAAAgbTFTAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBQY1AAAAAAAAAAAAKHAoAYAAAAAAAAAAAgFBjVQatavX28GDRpk6tSpY4YPH16m+3LLLbeY3/3ud/b28uXLTa1atcyWLVvKdJ8AAP8PMQMAkAziBQAgGcQLILMwqIFS889//tNUqlTJbN682bz33nsmXeTm5ppt27aZunXrBq63dOlSU6FCBbv/xTkG8nw1a9Y0J510klm9enXcdUeNGmWPlwS3yL+//OUvRd4eAIQJMSO1Pv7ll182HTt2tPuVk5NjzjjjDPsFKeK+++4zlStXdmLKO++8U+R9A4B0QbxILV588803ZuDAgXa/6tWrF3Od+++/3zRu3Nhe+Lvgggvs6wCAsCNepBYv9u7dawdfmjVrZvetb9++Ztq0aUXeHlDSGNRAqVmyZIk5+OCDTcWKqf/ZSWcadmPHjjV33HGHDZ7r1q2zXxTkS0KQrl272uAW+Xf77bcXa3sAEBbEjNT6ePnV2eTJk+0vvFasWGHatWtnLrvsMmedk08+2Ykp55xzTim8EgA4sIgXqcULufAk8eHxxx+PO0j+4osvmokTJ9rB8Y0bN5qbbrrpAL4CACgdxIvU4sUzzzxjPvroI/Pdd9+Z/Px8M2TIEHPqqacaz/OKtD2gpDGogVIhU/teffVV8+yzz9pfh8qJ8uuvv246d+5sfyEkI77Tp0+Prn/MMcfYC/gnnHCCPfH+7LPPTEFBgbnhhhtMq1at7K+G+vTpY/Ly8uz6cnFGlskIcaNGjczFF1/sTN2bMGGCHSCQ55Zfr27dujXuaPeYMWNMt27dTO3atW2nfO2119r7DzvsMPv/Fi1a2O288cYbKR0D+YJw4YUXmsMPP9y+pkceecSMHz/eLF68uEjHtKS3BwDpgpiReh8vr1NmaAj5oiFf1hYsWJDScwJA2BAvUo8X8nwXXXSRHfyO5aWXXrKDGB06dLDH8MEHHzRvvfWW2blzZ0r7BQDphHiReryQ+4899lj7emWGy6WXXmpWrVplB7uLsj2gxHlAKbnkkku8m2++2d4eP368V6tWLfv/PXv2eE888YTXsGFDb/PmzXb5gAEDbPuHH37w9u/f7+3YscM7/fTTvcGDB3srV6709u3b502fPt1bv369XX/48OHeeeed523atMnbtm2bd+6553oXXnihXZafn+/VrVvXe/75573CwkLvww8/9KpWrWr3RyxZskSGme1jRdOmTb1XX33V3pZtTZ48OeZ6EbLteP+6du0aXa9bt27eyJEjncc2a9bMe//992Mer5dfftmrXr26PQ6tW7f2rr32Wue5U90eAIQJMSP1Pn7ixIl2O/K8lStX9v75z39Gl917771e7dq1vezsbK99+/benXfe6e3cubNY7xEApAPiRdG+E3zzzTd2W1qdOnW8MWPGRNtyHGX/Zs6cmcK7AgDph3iRWryQfr9nz57ewoUL7TG67777vCOPPLLI2wNKWuWSHyYBEnvttdfsiG7//v1tW/L0Pffcc+aTTz4x559/vr1P/h8ZiZYR8dGjR5tly5bZfH6iR48e0WJP//73v82GDRuieWEfeOABO61Q6lJ8/PHH9jFXX321XXbKKafYNB3xVKlSxSxcuNBut2HDhuaoo44KfC3J5jOUkXudt1ba/hF6Pzk2P//8s2nbtq193VdeeaW55JJLzAcffFCk7QFAWBEzkuvj5Rdmsn15bf/6179Mly5dnF+nXXHFFfa1zZ071x5PeY6nnnoqqf0BgDAgXhT/O4Henux3VlYW3zEAZBTiReJ4IdeiDj30UHPQQQfZmRoya0RmrBR1e0BJI/0UyoTk+27durVzX5s2bez9ETJtL0ICR7Vq1Zz7/FP19u/fbx8vHaj8k2mAknpjzZo1dnqcTJfz020/CVSzZ8+2BVclSL377rumJMj0QP/0QyFtmVIYL4BI8JDXIa/t6aeftsFwx44dRdoeAIQVMSO1Pl7SUF1++eW2hsb27dvtffKlSqaqy+s85JBDzIgRIygUDiDjEC+K/51Ab0/yyMv3D75jAMgkxIvE8eK6666zr1v2f9euXfbHUDIYI+2ibA8oaczUQJmQCyvS8ftJW+6P8Bdvkg5/9+7dNl9hy5YtncdJW9aVjlV+RaTJiLh0xH5S9E7yHMbSs2dPO8ouQen99983Z599thkwYEDcYlLSkccj+z1nzhx7W3Iizpw5M7pMCimtXr3a5lVMRuT5I0WZirs9AAgLYkbqfXxhYaH9UiGPky9YWlEKJAJAuiNeFP87QWR7kkddyG25kCc1NgAgUxAvEseLGTNmmD/+8Y+madOmtn3WWWfZwuDffvutvc01KZQ1vtGiTMg0PylqNHnyZPvrn7///e+22NCJJ54Yc32Z5jZs2DBzzTXX2E5SOnfpYOUxTZo0MaeddpotyiTT/YSMhsvotjjppJPMypUrzciRI+1zyXTCsWPHxnyePXv22GmImzZtsgEjMpWucuXKdtqf3Ldo0SLnMTLlLt6/SPAQUlRJClFNmTLF/trpzjvvtIFJZmTE8umnn9rXKuTXAjfffLMZMmSILcBUlO0BQFgRMxL38VKoT2KFDHzL64kUeY38Ak1eX6So37x58+z2zjzzzCK8GwCQvogXieOFvEb5xa3sk5Db8s+/PZkhvmDBAjs4fs8999gULDVq1Ejx3QCA9EW8SBwvjjzySFtcXdJgyeuV1yPfNyKDFlyTQlljUANlQjo6CRqSHqNBgwbm7bfftrn5dD4+v1deecWOgPfu3duuJ8Fk586ddpnkKYxM8atTp47p16+fmTZtml2WnZ1t61DIVDlZR/KMX3DBBXGf580337Rpn2TK3I033mjbso9yIn/vvfeaoUOH2u3I/amQaXqPPPKIOeOMM2wwklF8CaIRclvSg0R88803dqqhjPRLMJHAIMEt2e0BQKYgZiSOGfIrqcMPP9z+Ukt+3SW5eOUYVahQwS5/77337BR2GRiXfRo8eLD561//mtI+AUC6I14kjhcTJkywzylxQAYt5LZ/wOKyyy6zF6qOPvpo+4tl2SfqLwHINMSLxPFCvivI65UZGfJ88twyyCHfKZLZHnCgVZBq4Qf8WQAAAAAAAAAAAIqJmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCogWIbN26cqVevXlnvRkYZOnSoefbZZ8t6NwCgRBEvSh7xAkAmIl6UPOIFgExFzCh5xAyEAYMaQAm67777zGmnnVbs7Xz22WfmuuuuS2rdUaNGmUMPPbTIz1VYWGhuuOEGU79+fZOdnW1uvPFGs3fv3iKvn+r2AKA8Il4QLwAgGcQL4gUAJIuYQcwoTxjUQLFIZ4FwH7OHHnrITJo0ycydO9fMmTPHTJw40YwYMaLI66e6PQDlQ7r1fWGQbseMeAGgPPZ9YZBux4x4AaC89n9hkG7HjJiBIvOQsdasWeMNHz7cy8nJ8Vq2bOndeeedXmFhodetWzfvlVdecdYdMmSIN2LECHt769at3vXXX28f07BhQ++iiy7yNm/ebJctWbLEkz+bl156yWvXrp3XqFEj75tvvvHq1q3rjRw50mvRooWXnZ3t3XbbbdFtL1u2zDvuuOPsftSrV8878cQT7XYiLrnkEu+KK67wzjnnHK9WrVpehw4d7DYjNm3a5J111ln2OTp27Og9/fTTdh+S8eabb9rXW7t2bS83N9d7+eWX7f379+/3/vrXv3pt27b16tev7w0ePNhbtGhR9HGtWrXyHn30Ue/www+3+9S/f39v+fLl0cfefvvtXuPGje1227dv73300Ufe6NGjvSpVqniVKlXyatasaf9FXt9ll11m3wtZX/Z/+vTp3tFHH22fW47Lueee623YsCH6/AMGDPCeeOIJezvo+Mp2qlWr5lWsWDH6nHK8UyHbfO+996Ltd9991x6roq6f6vYAlD3iBfEiGcQLAMQL4kUyiBcABDGDmJEMYgaKikGNDDZo0CDv/PPPtwFh6dKlXpcuXbyHH37Ye+yxx2yHHrF69Wrb8UU6SOnozjvvPNtxb9u2zXZuF154oRNATjvtNLt8+/bttoOTDuzWW2/1du7c6c2dO9fLysqKBgF5zKeffmqXbdmyxQYD//NLBysdq6y/d+9e78EHH7QdeIQ899ChQ20QW7VqldenT5+kAsiHH35oO9uvv/7a27dvn7d27Vrb4QoJoM2aNfN++uknu1+///3v7fGRACvk+bt27eotXrzYLpfnl/0UX3zxhe0kV65cadvSYc+bN8/evvfee71hw4Y5+yGPq1Gjhvf555/b/ZBjNnPmTG/ixInenj17bKDv16+fDaLxAkjQ8ZWg2L17d+c5ZdsSdOL9u/baa+16+fn59lguWLAg+tj58+fb+yInDX6J1k91ewDSA/GCeEG8AJAM4gXxgngBIFnEDGIGMQMHEoMaGWrFihX2QyudU8Qbb7xhR3ClE5aAIeuIxx9/3AYbsW7dOttZSUfg7wBkfencIwFkxowZ0eXSkVWoUMF2jBESIGTUORZ5rIzkSmca6WBlRFzvu4wSy3PKc0+dOtUZZU0mgMhI//333x9zmezfn//852h7165dNohNnjw5GkCee+656PLXX3/dO+SQQ+ztsWPH2pHsL7/80gYAv3gBRN+nyYj6QQcdFDeABB3fWAEkWXLSIMdy/fr10fvkb0Duy8vLS3n9VLcHoOwRL4gXySBeACBeEC+SQbwAIIgZxIxkEDNQHNTUyFArVqww1atXN40bN47e17ZtW3t/06ZNzaBBg8wbb7xh73/11VfNxRdfbG8vXbrU7N+/37Rp08bUq1fP/uvTp4+pWLGiWbNmTXRbubm5zvPVqVPHZGVlRds1a9Y0W7dutbfXr19vzj//fNOyZUu7Xv/+/c3u3bujy0WTJk2cxwpZvmHDBpvvTx4b77njWbZsmWnfvn3c49O6detou1q1aqZZs2b2/nj7FNnfgQMHmvvvv9/cfffdJicnx5x55plmyZIlgfui93nhwoVm2LBh9jnlmFx44YX2tcYTdHyLo1atWvb/W7Zsid4XuV27du2U1091ewDKHvGCeJEM4gUA4gXxIhnECwCCmEHMSAYxA8XBoEaGatGihdm1a5dZu3Zt9D4JDnK/uOiii8xrr71mZs+ebebPn287QSEdtQSLVatWmc2bN0f/ybaaN28e3Zask6w//elPZseOHWb69OmmoKDATJgwwd4vM4USkQ66SpUqJi8vL3rf8uXLk3reVq1a2Y46FjkOcjwi9uzZY19z5Pgkct1115nvv//e7osEn5tuuinwuOj7r7nmGns8pXCRHJPXX389qeORzLaFFEKSzjzeP3l+Ub9+ffuaZ86cGX2s3Ja/g7p16/5mu4nWT3V7AMoe8YJ4QbwAkAziBfGCeAEgWcQMYgYxAwcagxoZSjonGb39wx/+YLZv3247uocffthccskldvnpp59uR41ludyOjGbKSPBpp51mbrjhhugorYyGjx49usj7Ih2kjOjKCPvGjRvtiHKyKlWqZM4++2xz33332dFV2Ze//e1vST326quvNk899ZQZP368Helft26dmTFjhl0mo9DPPPOM7cBlhP6uu+6yx+ywww5LuN2pU6eab7/91gadGjVq2BHqypUr22XyKwQ5rnv37k14TGSUWEa7JTg+9thjpqjkOVevXm127twZva9fv35m27Ztcf89//zz0XUvvfRS+7chx1b+jRgxwlxxxRVxny/R+qluD0DZIl4QL4gXAJJBvCBeEC8AJIuYQcwgZuBAY1Ajg7355pu2U5HR4aOPPtqcdNJJ5vbbb7fLpEOXkfAvvvgiOs0vYtSoUdEpftLBSWc0bdq0Iu+HBAwZnZYRU9mPoUOHpvT4v//973bkWabLHXPMMTagVK1aNeHjJBA+/vjj5vrrr7cjsvJ6fv75Z7tMXvONN95oTj75ZBs0Z82aZT766KNoIEjU+cuoeIMGDexjZTRdApUYPny4PWYNGza0xzAe2a+PP/7YritT/iK/SigKmbZ5xBFH2AAoz5nsrwYiZMrikUceaTp37mz/yXt05513RpfLCHpkFD2Z9RMtB5B+iBfEi2QQLwAQL4gXySBeABDEDGJGMogZKKoKUlijyI8GysBbb71l7rnnHrNgwYKy3hUAQBojXgAAkkG8AAAki5gBpAdmaiDtSaD48ccfbX4/uf3QQw/Z0WcAAPyIFwCAZBAvAADJImYA6YlBDaQ9yb8o+QYlx+KAAQNsXkbJNyjiFR2SnHkAgPKFeAEASAbxAgCQLGIGkJ5IPwUAAAAAAAAAAEKBmRoAAAAAAAAAACAUGNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFROdsUKFSoc2D0BQqJKlSpOu379+tHbrVu3dpb16tXLaffu3dtpd+vWzWm3adPGaderVy96u1KlSsXYa6D0EC+QqsqV3dOROnXqOO1WrVo57UMPPTRuX9qlSxdnWW5urtNu0KCB065du7bTrlatmtPm7xk4sPiMpa/q1asH9pe6P23cuHH0dnZ2trMsJyfHaevl/vPpWG3/ObHeD92uWbOm065Vq5bTrlGjRtw+X5/nV61aNbCtz8/5ewYOHD5fADKZPsfQ50L6emP37t3jXn/U1xrbtm0beB6mv4+HBTM1AAAAAAAAAABAKDCoAQAAAAAAAAAAQoFBDQAAAAAAAAAAEApJJ80ixzQONM/zivzYVP8e/c9VWFjoLNu/f3/gY/ft2+e0d+/eHb1dUFDgLFu1apXTXrlyZWAu4qysLKddsWLFuPmCw5rzDgAS9at79uxx2ps3b3baS5cujZv3XceDnTt3Ou1mzZo57YYNGzrtunXrxu2XdZ5TzoUAZLKgc16xZcuWuOfX27dvD+zHdV/rr5kRq+1fP1FNDV2XSdfU8Ld1/Q1/vY1k2jou+Gty6HP1RG3q5wEAUH7t3bs3pfOutWvXOu01a9bErHMW67xKX+PX51L+a5HpLBx7CQAAAAAAAAAAyj0GNQAAAAAAAAAAQCgwqAEAAAAAAAAAAEIh6aT8TZo0cdrkkUZZ1tjQf3+6rXPSBuVr1zl/df51ncdO19zYtWtX3HzB/px2YtGiRU5b5/H15+HV9PPqnHc6py9QVnR+xpKsn4P0od9H3c8mqk8UtC3d727atCnpvnLHjh1Oe+PGjU57/fr1gTU29PlOTk5OUvU2Eu0XAGRabmfNHwf0+bSuO6f7dX1OHNQOqpERq6aGbvvPofVj9fMmWh5Uc0OfD/nrP8Vq6xjir7mhv9skqs8RllzYAAAg8bVGsXXrVqe9bt06p71s2bKY32Fj1dTQ5zP6eqI+v0lXnO0AAAAAAAAAAIBQYFADAAAAAAAAAABkVvqpLl26BE5pDUpHRaoqJCOVlDT6709PyU40fX7btm3R2/n5+YFTuHRKqaD0VFu2bHGWrV69OnBauX4derqZf7/1a9ApUnRalEQpgIADJTc3NzAG+D/rpKIKD90/FRYWBk6P1X2lTgsVlJ5Kb9vfZ8d6rP+5dJ+u++Hly5c77ZYtWwb+/bZo0SJ6u2nTps4yPa1X98N62q5ODwLgt+k0g2JGqnRfodPkBZ1n4bfH3p++Ndbx9S/Xfb7u//Q5sT5v1WkQ/Mt136pTAeqUCkHLE6W90v26/nsNeq5E29b7pdv+9FT6+CRqB6Wy0t+dEqW2IpUVAAClT6f91Kk816u0yv50mzrdlE7FqdNrBqXETOfvsJyhAAAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSSTozVv3//wOX+/Lfk3URp19TQOd50Pmadi27Tpk3R28uWLQvMLbdixYrAmhv+XO46h7zOcZdqTnp/PmK9TOc1btasmdPOzs6Om2uXOjc4kA4//HCnHfT3Rk2N9OZ/f3S+eZ0v3d+vig0bNgS2/TlBdX+m6efW+UT9fa+ubaT77FWrVgXW2Fi6dKnTbtWqVfR2mzZtAutvJKq54c/FrnPCc+6E8kp/rooTM4Jqk8U6l9q+fXvc2j163ZKMV4nOY4Nqx6V6Dqf3219XRB+fRHVF9Lb0eaxuB9GvI6jmg16u1w2qvxGr7c8brXNIJ6pzkah+h7+tv1Mkymet2/44oZfp501UZ0S/Tv8x08sS1efQ743/7zVRfQ69nO8kSJcaTADKr6AabKmeK5Uk3af5z1tjXW+s7Iu5+rxA95/6vEKfN/jPE/Rj0+l7a/rsCQAAAAAAAAAAQAAGNQAAAAAAAAAAQCgwqAEAAAAAAAAAADKrpsaRRx4ZuJyaGihN+m9MtxPV1Ni4cWP0duPGjZ1lOl+czgWr+fO165oauq1zyut8fHo//fmc9bb0ujqPsc4L6K+xofPrkc8WJWnw4MFJ57flby+9+fsR3cfouhZr1qxx2kuWLAmsVZGXlxe3Hofu3xLx75veT137Y+vWrYH9sn4d/hocK1euDKwFoNstW7aMW3ND1z3SeU11TnPOrZCpevXqVWLb8udAjnXupPstfy5i/7mhyM/Pd9q65kYqudv1uaSuUaDPy4LqJejHJsopH3SuqeuG6L5XH89E55r+5XpZadbn0MdI95/+5brGQ6J6HEH1OXQ7lfc1Ud0M/Vhdn0N/f0n0XEHb1nm19WN1DRN/O9X6JsWppUJ9jvKnJGswASi/9DmKru/o//6oz/8SXZcrzf3ermps+L/XJqodXLdu3cDzCP+5gI7t+tynLOMv35ABAAAAAAAAAEAoMKgBAAAAAAAAAABCgUENAAAAAAAAAACQWTU1OnXqFLjcn0NL59Miv2XmSJSbsjjvdSp5L/XzJKqpofPybtmyJW4uueLkMffX14iVb0/nLtb5mnUuv1TyHuvXqPMg+/Pv5eTkOMuosYGSdPjhhwd+toPiBdKL/73TfYyuTbF27Vqn3aRJk8A8nv7c2Dovtq5zUZxcpTqfvM6LqvtWvXzz5s0x8+/Hqr+ha260bdvWabdu3Tp6u3nz5oHHq379+k5b5zzXsYrPEsKqX79+JbYt/XkPyjWs8w3rOkCJtq3P8YI+j7oWgD7v0jV29HlagwYN4uY81nmOdb5l3X/691sfH12DKNF5rN62v52oPkeitj7euh10nqHPpzX9OoLO+1Op+aCXl2Z9jkR1MYLW1/U5dLxOVGPDv5963USP1e2gY6LX1dvWx0+/N9SmCr+SrMEEoPxK9L3WX2dNf8fV3wcTnb8cSHsDvufq/Vy+fLnT1tdBg2psJIqvupZZaSKyAwAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAILNqavhzucZCLmeUpUR1XHSuXX8eWZ3/LVF9jlQkqrGhc/4WFBTEzYucau7hoJoa+njo3M06Ry2fb6QiNzc3cDl/T+Gk87TrnOQNGzZ02jpHd1AuzkR5rkuyxkZxam7o/PP+ehuxcq6uXr3aaefl5UVvt2vXzlnWsmVLp61rbjRq1Mhp16tXL25co94GwpwjPajGWqK/ZX3uoz+z+rzM/znS+YF135CoRkRQHQe93/ozqmvo6P7A327atKmzTNfY0PR++/s4fd6p+z99/HRbr++vyVGcehyJ1k9Ub0PHK507Wy8PWpaoPkcQHdsSfedIpT6Hjqmp1uvw16fQ9TYS1a4Iqouht6X/PhPV69CP96+v416itn5u/zHQtWiIk+WvBhOA8kufY/hraOj6E7qWlj4v0NsqzZoamv9cSX9P1d9LdYzUMdRf701/r9exW5+/lGZMZaYGAAAAAAAAAAAIBQY1AAAAAAAAAABAKDCoAQAAAAAAAAAAMqumhs6RBYSJzunmzwGn85QfyPxviWps6Px7/lzHOsevXlfnCw7KLxyUr1pQYwPFofM4IzMlyt+tc4fr84igOhqJagbl5+fHXR6Uk7wo+dP929b553UO1a1btzrtTZs2xa25sWrVqsAc+m3btnXaLVq0SLrmRlC9DUEucaSTNm3aOG19jpLK36d+rP6M6s+G/7OgP/tbtmwJbPvrR8R6fNB+6X5J91s6z7H/867r8ejzWN336vND/34nqpmRqK37PH8fqett6HNeffx0O+i5i1uvw9/WMSDVGnZBubP1totTD6ok63PomJ0onuuYoZ/bX6tC19vQNTT0ch2fdLtu3bpxa3fpv/0mTZoEfp/x167RnzG9X/o1EiczrwYTgPJLn3Pouoj+uKbXTXR+qM9JSpPn6xP1fusalTre6vpu/hjrr68RK4YmOm84kJipAQAAAAAAAAAAQoFBDQAAAAAAAAAAEArklEK55J+Oqqcb66nKB5KeAqan2vunresp/npKu27r6bj+dtCyWG093Zt0VAD05173pbrf0Ov7+xmdHkVP202UWsS/3D9dONbUWp2SQ6cWCUqZop9Xp5vRbZ0ixT89ef369c6y1atXO+28vDynnZubG5ieyp++Kig1lU7nEesY+dNu0L/jQNNT2EtS0N+2/vzr9HI6ZahOe6fPy/z9kO4L9DlaKilD9X7r46U/3zrFlj6n8+9bKn1rMimk/OexxUkvFSudg//90esmSnWl10+lX08Uj4JSFqZrKiudrirVVFZBqbD0thKlttIxW7fr1KkTvd2gQQNnWePGjVNK0+hPT6Ufq1Nr+J831n4nSm2J9E9XCKD80rFcny/6Y6z+jrZy5crAuKVjZlBsP5D2q/OGROmo9Ovyf5fX6R0TpaMqzRSOzNQAAAAAAAAAAAChwKAGAAAAAAAAAAAIBQY1AAAAAAAAAABAKFBTA+Wezu+m60WUZI0N/VyJ2v4cwDpnqM4fnGqdjGSXxRJUY4NcpgBi5RfVfak/z6fODa77t82bNye9PFFtj1q1agXmxdZ58v35RnWOd523PVF+en87qN5GrHz+ifK5+mtq6Hobuh6HzjOuj5G/5oaOiTpHKpDO9Odb50z290vNmjVzlrVu3TrwM6n7Jf85m86frOsu6LzGup7Hpk2b4rb1Y3XOZJ37X/eJ/vUT1XjQy3UtkKAaHKnWpkil5oaOAbqt+/Ggvlv3xYnqhiSq3xFUryNd6nMkisGp8p/768+czi2ulyda3x+DdM5uHbtWrVrltPVnulWrVnHjoq7HkSh/uI6N1NgIfw0mAOWHPjfS5zf+80P9HVb3Q+laU0PTsV6fKwXVe9TfO3U9t/r16wcek6pVq5oDhZkaAAAAAAAAAAAgFBjUAAAAAAAAAAAAocCgBgAAAAAAAAAACAVqagAp1tjQ+VuDHpuoXbFixcC2P5e7zuGrcyjr3MN6eZBENTUS1efwHxN9vPRrAlA+6dyaDRo0iFkPQmzcuNFp6zye/r5R5xbXOTv9zxMrb7bOi6rzp/ufS9e10Dn2dd52vS1//vRU88vr/P36GPj3TecV9+cRj1VzQx9/fx5ynUfWX28jVk5anUcWSCe6Joz/86/zA+vPRaKaGv7PrK53kKgOg66hoT/D/nz++jy0Xr16TjtRHRx/zY1Ua+Toc8ugug6p1oBIpf5Eov4yUX2OoJoaiR6r+3n93vrXT7RfZVWfQ+cP1++rzi2e6DuF/3tBqrU9EvEfI33s9WdQ5wNfu3Zt3M+wXldvWx9P/Tr059Bfr4f6GgCQ3nQ/reuR+ft0/d1H12fU9dr0uZWOJ+liZ4JzUf93y7y8vMCaVYnqjuhjUpJ1eLnaCAAAAAAAAAAAQoFBDQAAAAAAAAAAEAoMagAAAAAAAAAAgFAg8TGQgM73pvOHp1JjQ9eX0Lnlgto6L6zO8avz3+oceToXdCo1M3QeZJ1b198OyjEryDMLQPeluu6FzmWva0DoGhv+/lD3V7rP0fnnc3NzA2t/+PN5L1++3Fm2YsUKp52o5oY//3dQvY1YbZ1PXT/enw9c19tYs2aN005Uc6NNmzZJ1duI9d7VqVMn8HgSA1CW9HmZ/+/TX7ciVv0dnXN/69atcT+ziWoS6HM0vS2d79/f19SvXz8wb7GuqaHrDPn7wFRraiSqBZfK9vS5pq5ZEFSDI6g+RDL9p7/ehK49kahehz7/Dlqu1y3N+hx6uf6b87/ORPU69HLd9r9X+r1IVLcvkVTeK30MdM0Nf1sv0++Nfi799xiE7z4AEK7zQX2u5O/H9Xcb/V0yUU2NdLVHxTl9Lur/Pqm/O+p248aNnbY+V9XfB/XxLg5magAAAAAAAAAAgFBgUAMAAAAAAAAAAIQCgxoAAAAAAAAAACAUqKkBlGCNDV1PIlFNjcqVKwfmlvPn49O5+XSO9IKCgsAcef5cujrPe6KaGkE1NHSeWZ1ztlGjRk67Vq1agccAQPng7w91LlLdb+i6DjrXvT///KZNmwLzZOsc3Pq59XP5+97WrVsH1vZYvHhxYM0Nf/5RXSdJ51YvTs0NnVdcb1vHAJ0X1f+69Gv219uIdbyaNm3qtHWdAn/+/2rVqgXGSOBA83++dc7kJk2aBNYkCKpxoD+fus6APlfSNQp0vn//Z1SfR9WtWzewT9Pnlv7zLl2PozTPyfQ5cqI6c0H0eWmi+hz+tn6sfq8S1ZMIqkdRlvU5dG7soHodibatv2Oksp+J6nMUp+aGfq8SvXf+z3Ci/UpUFycV1NgAgPSmzzn8/bY+zyrOeVesc5R0sUvFRf+5qL72mJeXF1hzMScnx2nr803/8dbng6ni2yMAAAAAAAAAAAgFBjUAAAAAAAAAAEAokPcFKCb/dCl/KqpY6ah0ao1EU+2D0k/paWyrV68OTF3gn1atUyj4U7ckk44qaBp/oin/jRs3TnoqGoDyQachqlevXuCU1rZt28ZNl6TTZOiUHLq/0yk69JRh/3PrVEvNmzcPTIul00/501Ppabs6BZROT6X7dJ3+w59yUPfxOq2GTkOit+0/Rjq26Nek3wudrkofE39Kn/r16zvLdFod/V6QngoH8hxO90P671On9dTT9P2fu6CURLG2lejz7f9M6v3Myspy2tWrV3faev2gKf/6nCwsaXJ036D7jlQkOudNJbVVWaay0m3d7/vbell+fn7g36du+9M+bty4Me6yWDFZx6uSTMuhP2f+bev3Tb9XxUmLpenPmf7MhuVzBgCZSl9b85876XMjnVpXf2/V32d0zEzX9FN7VMz0f0fesGFD0qmLY13z0+fU/nPV4pyzCb4dAgAAAAAAAACAUGBQAwAAAAAAAAAAhAKDGgAAAAAAAAAAIBSoqQGUIJ0zVec1zsnJCcyhGpTLL1GOZP1YvS/+XH5BeaCTqbERlE9Y56TV6+q2P7+6qFu3bonl1wNgQpkPvWbNmk67UaNGTjs3N9dpt2rVKm7OT92frVmzxmkvW7YssAaEv6ZGgwYNnGW6rfdT1wLxb1vX1PDX2xDLly932np9/br8fbzOpb5//36nrfOn65jgf7zOh65rfejjqfezXbt2cd87XZNE51/VOWr134U/DuqYB6RKn5PpvzddJ03XP/CfS+l6B7qd6NxIf4b9tYL0Z1CfKyVq+88Xdd+bqC8uD7n/dV+iz691O13rcyQ61/f/TepliWpT6boZ/tzaOnbpOkz6+Oq23m//MdKxLFX+x+s4WFz+z47+HOm/Gb1c19ggngFA6dLnN/5auXXq1EmppoauwaGv4+k4l652+fZT1wXR3/90rNff8fR1UH/dEV1TN9UYyEwNAAAAAACA/6+9O9mu4rrfBuz/soOxjcBg04MB0zl2JpnkLnIHmeYeM8sgo8QrmcQNYIHpO5vedmLyhXyLCetXr0/trUISqKTnGWmvOjo6TdXeVTrrvC8AMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NSA19ixkXl8meVX8+Uya66VkbxoXHPOMws3c/1ynBn1mflbM2oz8/fp06eDca9zo95X5hNmjwiwOeWxnnPl0aNHB+MzZ86Mdk1kln3maGenRvZg1AzQzEjNfNDMn885rHZuHDx4sPl383EtLy8PxpcvXx6Mb9y4MfoaZD56K7M8b5+3zdev17lx69at0ffu448/Ht226DXJzpL6+rb6Np6TUc5ad2xkB0w938mOghznuVCOM6u4/n7mGuf5Xp4v5vb6vHqdGmkrdmzMtZ+jN677XKsfZtG8n9cRdS2sOeSLHmeuKbnv5+1bz2E1HRv5ek7t2Gi9t3mNlutRbu9d4wGwvnJOr/N2r1MjOxZrR+yi/wHW9SLXtY3kaTmvffLkSfP/g7Vba9E4z5nra9TrDu7xTQ0AAAAAAGAWfKgBAAAAAADMgg81AAAAAACAWdCpAa9R5sXt3r17NKu4l8Pby1CuOYGZmZz5672OjcxQr7m0vZzoKR0bmaub+YUy02Fzyjmql2V/4sSJFz/fvXu3OV/V7olFHRDnz58fzQrPjox33313MO71ANXnkXmr2c+RzzHHBw4cGIwvXrw42seR+fz3799v9o7UOT/n7Bxn9nr2d+T6Ujs2rl+/3uzU6HVuHD58+MXPe/fubb4XuQ9lZrn1g548z8qOnXpM9s51ej0Cmfdf56k8XrPfIPfl7L2o47xt7zjI7TkH6tiYRz9H9lHk/pf7Z14H7NixY/S+cs3NtS47N/K4yn2oPrbclsdJjtezYyNf77re53PM4yTHea6Qr5njCuDVqtcJOWfn/+yy8y+355pZu9E2cqdG6zwg+93yWrP2+S7qSaz/18vXJ88Len1vvqkBAAAAAADMgg81AAAAAACAWfChBgAAAAAAMAs6NWADyfy4mqnaykRe9Lu5vWbR5bbM6c0M9MxMz0y9ms+euby9nN7WOH+3ZvY+98EHHwzG27dvH4xlpMPW6B+q3QqnT59uzmdPnjwZjB89etTMAK0dG9nrkxmg+Tgzg7XOUdnpkLfduXPnYJx/Ozskap5/Zrvmc8oui+whqTmprb6N537++efmuNW5kX+39m0sepzZqXHy5MnRbXWfWPSaZGZ5vv51TbWWsEgew3Wfyv0vM5N75zetcfYA5THW69io49yWucW9fT+Pq3oc6QHY2Op7ne9775rixx9/HN2e+26vPyZv39pf83FO7UaqvRn5OPIYzY6NfM55blHX/+xwyv6dPHfI65fs66j357gCWH91rs2epN41Wvb65RpQ5/y8rtqonkZXXF5Pf//994PxzZs3m+PaJZmvT+96OvmmBgAAAAAAMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NSADaxmqmbWXObGtjo0cvvUzORex0bN2KtZ7IsyajNbN7fXPPbcluPM4a3ZfIvyD+Wiw+aQ2Zr79+9fccZn5s3nfFY7gp5bXl4enWMyMzVzsjOXvI5zHs7nNLVzoz622q+xqFPj4sWLze6Kmnva6ttYSedGa5y/m/0m+bdb+azZqXHq1KnBOLcfOnSo2VFSX9987TPv3NqyNeUxXM85Ml85z1cyr7/XR1bHuS3P0fKYyzlwit6+ndvrcZS9Avl6sXHlPpZ9Eg8ePBjdx3J/zDUj1+jWNUXKnO3eOOfq+rzy7+ZzzOMoj+FWx0auGXks5Pqdt8/nUa/h8rbWH4C1V+fWnJNzDs+e13pdumh7XS/yWqi1Bm4k/441Mtf627dvN69F6/lib03UqQEAAAAAAGwKPtQAAAAAAABmQfwUzETGS2UcVS9Sqm5vbVu0fUocVcZL5dfMe5EL9fdzWy9+Ku87o2LqV9d8XRs253x48ODBwbaTJ082o1jy67K3bt0aHZ8/f775deKMo8oIvLq9NwdlbEbO+flV6Lo9o2/yNTl8+PBgfOPGjdF4qlY01WrjqXpRVfm7jx8/Ho0Sa71vzx07dmwwPnHiRDOeqr5muZbk16TzfRZPtTXVc6ncJ3IfyvOVPJ/J85+6Pbf1ztFqrOdq46hS7tutfV0c1XzjpnKfunr16mB86dKlFz9fuXKlORdndFXO82n79u0vft61a9dg2+7du5trcMY41muMXKsymjIfZ74meb1T4xMzdiPng4yqzGMjb1/X+4y1zOcIwNrKeTbn8DzHy+uu3F7Xsry2mUv81NN4nL3459Z1Wl635jVvxnklZ5MAAAAAAMAs+FADAAAAAACYBR9qAAAAAAAAs6BTA2Yqs4gzjzWz56Z0avTk79dM9dqvsagHI3MDM0e63j5/N/Nr83d7HRt79+5d2K+x6DkB81HzpjOX86OPPhqM79+/38zR/uGHH0bnrMwRz06N/Ns5L9fHmXNQT85R2alR817zvjNnvM6Fizo26rjVt7Hazo1W38aicXYD1Peq5pmvJMs1H3ercyNfn8zKzf0gs2AzH71molt7Nqc8r8p9IPeZKeczuS3luU8eC/W4ym29jox8Xq1x77b5mujYeLXqfpRzcXZoXLt2rbkOXLhw4cXPly9fHmzLfomcq3N/zvWr9mbkXJxdSAcOHGiuwXUNyTUh1/fs7kp5rlDvO/s4cr3O55iPM/Pa6+3zvrJnpPe4AZgm59Wp11k5rrfP69Bcj/P/YRvVv+OaLdf6vB6s15e9To1PPvmk+bedPQIAAAAAALPgQw0AAAAAAGAWfKgBAAAAAADMgk4N2CIdGy29DOXeuMp85h9//LGZnfvkyZPR7VM7M3o5061sQx0bMF/1eM1jed++fYPxxx9/PKljo2aE5m2Xl5cH4w8//LCZsVpzsmuvwqLxal6D7du3D7ZlBneuD60s2Fbfxmo7N+7cuTPat7EoVzY7Nep78/Tp0+bak/edfzufR82Qz76NHOdrkp1W2Z9QM9Bzf13tfsA8ztHyfc9zkjy/qePcNlU9T8sM5DyH6+VK5/itt95a8W3zNcl5y3nY2sr9pr732QGR82GudefOnRuML126NNpVkbna//3vf5vdKrVDI3uxTp48Odh29uzZZsdGrnW19y/7n3o9V/m4M+e83neuP9lR0luTsyejbs/jpB5zi+5LxwbA6uT5SM7DOWfn9WB28dXtvWuhuXRqPI3rsOydymvo2reVr9fOnTsn/W3f1AAAAAAAAGbBhxoAAAAAAMAs+FADAAAAAACYBZ0asEWy/2rG6mo6M3q3791X5sxmxm/N32tlSq9knFqZ1L2Ojcx+BjamzJfOXM7MNT116lSzU6PmgWdWeI7Pnz/f7FKo3RWZI569FmuZJ9/Lgm3le7f6NlbSuZGdGjV7vfZWLMo47+XM1vXkP//5z+i2RX0cmfWamfI1Az0f15UrV5o57jUD/rkjR46Mvkb5emYub+4n1qLNIXPue11AU0w5V8oOjdoLsGh7zq853rZt28KfF43zNcguhZyXdGysTs6RtdOuZlvnPL2oQyM7NupcnnNp5oHnnJZZ2sePHx+Mz5w58+Lnzz77bLDt9OnTzfUnj6u6f+f+lnNrvl65hmR+eO3c6HU8ZcdGPs7av5XjvG0eJ725xXEEsDrZf5ddRnn9l+d0e/bsGZ3v8xotu8/m4t/xuLNfq66D2eOV5wk9rowAAAAAAIBZ8KEGAAAAAAAwCz7UAAAAAAAAZkGnBmwRNUM181Uz07snc2frfee2zFvOHNnMma3Zu7nt2bNnkx5nK0e6lzmdr0lmJco1h3nIvOnMOc3+g/v374/OWdnD8Pjx48H46tWrzb9VM1RzTsnHmfP0emp1brT6NlbSuXHo0KHRfons1Lh48WKzuyJvXzs38r346aefmrnu+V5m9mtdf7JnJbtU8nHl4z527Nhg/PHHH4/mx2cmfL6emb2bPQXMUy8Hv3Welucvea7UGufv9jo27t6923zcNWc6M6dznOeHeV6Vc0vd1/UC9NWOh0Xn1PW9zDnrwoULzbk557w6R2b3ROaD576c8+Mnn3wyGH/66aej2/J3877zb9fHlnNn9mDkGpJrRm6va0iuN9nPketVdppknngd59yQzzGfVx5nuaYDsLZdhUtLSyvu2MheqbzmyLUm16qNqtctVc83e91QPf4rBwAAAAAAzIIPNQAAAAAAgFkQPwVbUH5lrhdzkLfPiID6FbH82nN+nSy/Bl3jQ/Lr3fl17V4MQmpFTPXiGabGUU39mhywMea7+hXgjAbKOKoHDx4056T8yvDy8vJgXL9inNEqGSvUi27ZKF+xnhpPVV/vgwcPDrblOKOYMhLl8uXLL36+efNm873oxU1lXMiTJ09G3+e6bdHfyiiRfGx1nOvY6dOnB+OMp8o4rxppJlZk88hzijyuWucrGTuU43pu1btt7tt5LGSkaJ2n8nyvF4uTzznPNXft2jV6X/TjpvK9qhFSGS916dKlZrRizlt1Ps15KKM1enFTv/nNbwbjs2fPjv5uL56vFY+Wr1euCRn58ejRoxWvKXnfvfUnzy1y/67xU/kcc27Ic5zWdZhrF4DVyzk746dyHazn8hn/mBG3ufbMJX7qWfxvLc8f67q32lh339QAAAAAAABmwYcaAAAAAADALPhQAwAAAAAAmAWdGsDkjo3Mvav5rJn7nlmumTmY45pFnhmCP//886SOjVanRm7rdWxkPm7m8NeM28yJBjaOPD537tzZ7HWoHRvff//9YFvOUZmDmuNz5869+PmDDz5odk/UDO1F23Pefl2mdm7U9SVf+3xNcu3JeXf//v0vfv7222+bGfDZ3/Tw4cNmfnrNPM++jcxHz9/N7fm3ak9L/XlR7n32d+Q6WNeqzOzVsbF51Oz7zNDP42Q1nRq5r+f2zP7Pfb0eZ5nX3+vU6HVs1HHOHVvxvCvfm5yHci7JObF2PmVfUWZ85/l1nn/XuSbnoewF6nVo5Pbao7Hafru6Pbspcn3pzevZWVJfk8w8z+MqX7+8rzzXqK9vXqPlsZDPK9fkep2mHxBg9XLuzHk6r+Hq9Uted+a1Y15DZCfT05l0bOTjfvz48ej/3fK2Pb6pAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALWy+AFJickZ7Z7pmXWzs2Mtc4M717eev197O7o5cpmBm1mSdcezR6nRlTxzUbcWlpabAte0aAjSPnoOx1qHneOQdl7nVmbte80OeuXbv24ufz588Ptu3Zs2cwzqzrVjfFXNeT3nPKuTRfo5qvfvjw4cG2ixcvNvPkb9682cyMr90BmXee+bW5nuR+0OqD6u0zua61snMz0zf3ZWvR5pDvc84V2Q3Q6tSoP69knPt69grVcR77vQ6NKZ0b+Rpkj8Bm7AbodWjkHJa9GNmb8fXXX7/4+dKlS81M75ynci6pc01dM587e/bsS3do5P6ca8Rq3ud8Dr1+rV4PRh3nbXMNyPcy5/W873o9k69B7vs5zmu4uu7mdVbed24HYHq/4O7du0f/b3T06NHm9Ulea2bX3tOZdGrk+WRdJ7N3KtfAHisVAAAAAAAwCz7UAAAAAAAAZsGHGgAAAAAAwCzo1ABWnRNYs3R7Gck5zkz1uj1ve/369WbG4JSOjdqvsZLOjF7OdL39gQMHmjm9+byAjTO/ZaZ0zfM+fvz4YNu9e/cG4/v37zfnoHr75eXlZlfR+++/38zJzjzwOfYlZF53dgPkXJnvza5du0Z7BA4dOjTaZ7IoQz47N+p6k/nyvfc55XpRu1ly3crs9fzdVF+jXJvz9ct9Sl765pDnXTlX5LFRs4tz/8ps5ta5zqJxzXrODqLsP8hxzmGt88f83ZzHcy6Za8dGPVfNuSLnoTxHzg6NL774YjCuvU45/9VOoXwcOfdmp9GpU6cG2z777LNmx0arQyPn/bV8H3OfyeuR7HBqdSNlBnjmgWf/Sb6XmSeex13tWrp9+3azMyPXyRzXdaLXY5NrCgB9eT6THYH1mi+vV3Kcc36vY/bpTDo26uPMNXDqc3A1AwAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALOjWANe3YyIzuzL/tdW7UTNvMI+xlKGfHRubd1szBvG3mQj979mwwnpI7nbfNbMTMIs4c33xewKuTc1Q9Xmtu+KJc0+zYePTo0WguavY0nDt3brSraFEfQuZo1+2bZQ7JNSDnzvoaZHdRdpTkPJzv5ZUrVwbjL7/8cvRxpHzfM9+2JW979+7d5u3zsdTugHwNslcg15rczuaQ+0juF/X8ppfl3+t4yXOhen/ZI5AdEL1OjZyL6zjPNXv9MHPt2Kivd+1VWLSGXLx4cTD++uuvRzs0nrt8+fLoOXG+r7kPZe9F7ZvKzozs2Pjoo48G47179zY7IF7Ve5V/Jx9HPs48Nn744YfRtb92zSz63bwGqfeVt8/zjuxSyR6MVsdGbut1bOQYgOl9tLt3737x8/79+wfbjhw5MhjfvHlzMM71+kmsL3Pp1Gj93y3HPb6pAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALghGBNZV5rJkD38tQruNep0ZmKGd+4XfffTfasZE55g8ePBiM//e//zVzpFs51HnbHGeW+549e0ZzFzdLNj7MVe0hyJ6LY8eODcaZdZ25pzUnOzNQr1271sw/z3mi1ZeQOdmbRc6H9Tnn2pOvT65F+Xru2LFjNOM8uwEy276Vf75oPWnJ3819KHN5a9Z9r4cln2O+Zr3uEDZfx8bBgwcH23odG7k9s5trDnLtEFpJx0avk63VrTD1XKnODxupX6PVrZDntNkDdOHChcH4m2++aa4xdW7JeSfXkOwoyrWv9mZM7dDYqH0n+TjycebzqPv3jz/+ONiW47wGaR1H+ft5X3ncZHdSvpf1+M81IdeXXCPy9hvlvQKY03nY0tLSijsAc5znVg/j2rOuL3Ps13gZvqkBAAAAAADMgg81AAAAAACAWRA/Bayrt956a/Rrz714gV7cVC9+Ksd3795dGEW1kjiqjATIcf16X36NP8f5VcC8rxohkl8bF0cFr1Y95vJ43Ldv32B8/PjxwfjevXujcSsXL15s3nZ5eXkwzq8nt6KFWrF+m1XOjRmjkdtzzs9okfpe533lupb3nWtXRvZMkWtVK4ImI7V6+0zuz7nderP5Y9vyPc/zkV4cVet8Ju+rnoMt2rdzDuyd07Vk5FuOa3RQHvt5fK+nfI0yWqge71evXh1su3TpUnN8/fr10fvKc9Oc4zJaqRU3leNe3NRcI4xaMW7PHThwYHTf7sVR5e3zuqEeh7kt76sXWVhf/3wv3nnnnWb8VF535bGT2wFoX6PkeVhdS547cuTIYHzjxo3mnP+kxBuLnwIAAAAAANhAfKgBAAAAAADMgg81AAAAAACAWdCpAbxSmZ2bea41c7DXoTG1U2NKtnNm1j58+HDFnRqZX5jZ7b1OjTrOLOLMQJdfC69OZq3v2rVrMD506FBz3qi5p48ePRpsu3XrVnN87ty50e6dzGTNXOyt2JWQc3j2JOUacPv27cH4zp07o1n/uV5kXn+uc62M/pz/s7MgZX56fdzffvtts1Mj95mlpaVm9nruR2wO9fiv/RrP7d69e1LHRo7r7XPbs2fPmh0Pea6UOdGrkX+7PrY8z8rjIrsUVjN/5uv5008/NV+Ta9eujfYwXbhwYbRfZ9Ecl69vfe9zbjh69GizQ+Ps2bOjfVL79+9vvp6vsrPkVXXTZKdRnufnmtEb53tV17NcI/I4q3nqi/aDeh2Rc3yOc03Ijo1c6+rvb4XzDICXUc8rsp8pz93z2vLw4cOj1yt57dn7H9Rm4b9hAAAAAADALPhQAwAAAAAAmAUfagAAAAAAALMwz1BLYNPITojasdHryJjSodHT69jIDNvHjx+vOEc6s90zz7CVSZ050Jn9/N577zXzbYH1k5namUt+7Nix0V6H7Gn44YcfmnNMzVZ/7vz586N53jkv5OPMbp65ql0WOWfn65uv3zfffDMYf/XVV6N59ZlVn5nlmTOeXVH5+lf5uLMzIzP3cy2q3Sw3btxo9iPs27dvdJ/pdQnMNQefaedgmamfc1qek7TG2R+R3TM5zg6NKR0beV+9rpp6XpbnaHmcZN519gy0zrt6HRp57plzTZ2Hvv7668G2S5cuNXuBci7Jeaoe/7UTY1Fnxqefftrs2Dhy5Mho11T+3c0i3/e6rua5eu5juR/kep/nA/VY6HVo5N/K/q66TuR7s9qOjbpObNb3HWC16v+scl7NHsQDBw6MrrfP3bx5c/RcKdcHnRoAAAAAAACvkQ81AAAAAACAWfChBgAAAAAAMAsCcoENmzGYufDr2akxtWMjM21rHm5mKLc6MxZtr9nPmQOd48x+ziz3mom+1q8RbHV5POWclcfniRMnRjsfcpyZ27l9eXl5MP7www9H81h780KON6qc/2ru+HfffTepg+SLL75obq+545l3nq9Xvs85zoz5ugY8fPhwsO3WrVuD8Z07d5r56HVtys6BzOfPnPccZwdHzfnN52At2RodG9m/U+eZRV0WtVMj+zZ6pnRs5LZep0bmSNe+iZxr83wv86yzi6Z1rpr3lY/76tWrK56nar/Gojku54Z8L/P4/uijj178fObMmWaHRm6vv5vdK7nP5OPYrGrHRu9coNellJ0adXt2zfR6W/K+6/uRvRf53uXzyHHevt5fdtHoZQL4pbymyLkzz7sOHz48GB86dGi0XyuvMXL92CwdG1vjLAMAAAAAAJg9H2oAAAAAAACz4EMNAAAAAABgFoQbAhtWZnZndmvmgU+5v14eeG7PvPbMqK15zpmF28qc7nVq/Pzzz80sxBxn9nPmoNeMdJnosLYyMzqPv5p7eurUqWZnRuaj14zURd0L586dW5hvvqhj45133mlu3yhzQ85/Dx48GH0NLl++PNj2zTffDMZfffXVYJz59Pl61rk1157Ms/34449Hu1MWvR91DWi9j4vei8xPr30fuS5lN9SlS5eajyvHtYvl7bffbu5DbM2OjTwPq+c7vXOf3JezMye31/OwXldF/m7OJfVYyXO2HOffyseZHRv1Ncs86+zQyHnoyy+/HO3YyDku14x8zpnLffDgwcH45MmTox0aZ8+eHYyPHTvWzPiu+8VW6dCYci6wtLTUPFfvdWzUcd429+28psjtrS6qeo2wqEMj96mcD2o2fK5d+Rro2AD45VyZ83D2Ye3fv7/ZcXW70amRa0uez+R521w46wAAAAAAAGbBhxoAAAAAAMAs+FADAAAAAACYBWGGwJbt2GjpZQLnY6kZhZlHmPmFmXvc6tjIzoxWzu6i29cM/8x+ztdPDjKsrewhqJ0FmYGafRGZEZ/Heu1SeO7atWsLc9gXZb5nTnY+zpwb1kvOfTl/ZWZ8fY7Zm5FZ9MvLy4Pxt99+23y987HUzPjMkz99+vRg/MknnzQ7NfL1r3P89evXmznjmYfeylPP22aWbv6t7NDIzP3WelGz0xc9bjanN998c8XnYXku1OvQyOM/9+enT5+OHr8//fRTs1eg/m4eR73zqjzmcrxv377Rc6l8nNlr0+v6qR0cOR/mc+r1NuS8VOet7HjKOS/Pr3MNyf1iq8trhFxjs8cqz9VbHRtTO1/yuKvHRq+HKbuT8n1vnUv0rikyJx6AX55f59qe5xxHjx4djO/cufPi5/v37zevHXvXGHPhP1gAAAAAAMAs+FADAAAAAACYBd8VB7ZMHFW9ff5ub9z721V+nTsjGDJiIWNmWvFTGbHw5MmT5vb8WmG97xqv8pw4AVhbOU/UY2z//v3NaJCMGsmvEOexXm+f0Ut5rO/atas5d9YooW3btr2xlmo0RsZofPfdd824qYzV+uKLL0a33bhxYzDOv5Vf787opZMnT47GS509e7b53mWUSL7e9TXIeTcjZTK+JveD+rzyd3MtyrXmypUrza+013iqfJz5+mWUFVtDnifU/STPwTISqhev2YrVybk1j+/8W7m9RvLkeVI+rhzneVceN/U1yeM3Y/AybiqPyTon5uPIeTvn+YyQOnPmzGBcI6cyDjHfux07dgzGzg+nySim3nvXOjZ65/35uxlHVeNHcs149OjRYHzr1q1JcVS5LrRisMRPAfxSnt9s3769GWl7KK456jVJXjPkHJ/nWTlP5xqxUfmmBgAAAAAAMAs+1AAAAAAAAGbBhxoAAAAAAMAs6NQAtkzHRmbWtn43x5mHm2pvRnZoZE50yvzbmn/Yy3rOLN1WBnWOMycxM5RlpMPaql0V2bOQmag173xRx0bmot6+fXs0B/vcuXOD8fvvvz8YLy0tjea3Zu711Cz1nMNqxnw+zsuXLw/G33zzzWD81VdfDca1R+POnTvNuTIz4Q8fPtzMm//0009Ht2VWfWbq79y5czDOXpKaWZvZ/5mXnj0j2YtR94N8ztkjkOO6zyzK+69rZmb6Pnv2bDC2XpDzQ+bt5zlYnq/k/lmz//P2mftc59aV9ArUeSmPsbxtnis9fPiwue/X88V8Djdv3mx2/9y9e3f0ebz99tvNc7acl3INyXG9fXY86dB4fV00i96Puh/kNUXuY7mG5LFQ9+c8BvO22QmT+2t2bNTn1evQ+fWvfz0YA/BL2VWU1xj7Y72oa0J2auT5Tp7P5P+VdGoAAAAAAACsIR9qAAAAAAAAs+BDDQAAAAAAYBZ0agBbsmMjs4h7nRop88SzR6Ml8wrzd2t+YWbS5jjz6jMLOnOn6+17+dUy0mH9ZD56Hm8fffTRYJy5qNmtUI/nPJazKyH7PDKfteZk5+PMrPWUudn5OK9duzbamfHll18Oxrk9Ozdq3nfOoznHHz9+vJkv/9lnnw3Gp0+ffvHz0aNHm+9V9jf18ufr9nztDxw4MBifPHmymYdb94uc0zMLt5eXfvXq1WbOe2sd++1vf/vGy8oM+Hwve+sx85Dva84teSxkB0c97nJfz3OyXsdGHedxknNtnldlBnXOiXV/zb+bv5vj7DioWdo57xw5cqQ5V5w9e7Y5B9a5JnuV8vXj1Wam5z5V36uce3P+zPU/b1/3ybxGyGMh+zty/cljuPbJ5PVK3tfvf//7wRiAX8pz4Jx3d0f3YV0v8jwhu/TyfKfVybSR+zV8UwMAAAAAAJgFH2oAAAAAAACz4EMNAAAAAABgFgRmAlsyk7Bmxi/Kbk6Z15w5tHV7r18j8wszQ7nmN+ffzazDzKzN2+e4dmo8evSomZX7u9/9rvk8gLXLSM3+gn379g3GJ06cGIzv3bs3Os45JXPbL168OBi///77o+Neh0bOMdmhkT0N586de/HzP//5z9Ftz924caM5/9Vc2YMHDzY7M379618PxmfOnGm+vvX+9uzZM9i2ffv2N15Xt0q+73Xezm2ZYd7LS79z585orn72N+Xf+sMf/vDGy8rOl9Yaql9jvvJ8JbP/e+c3Na9/27Ztg215Ttc7N6rbszMjezBy388+j3wsdR/N+877ymMyuyxqVnZmY+cc98knnzQ7NvL36zzfeg68erku1Pfq0KFDzXm8d1zV2+e+nvtrHje9jo2qdx4CQF+vZ257XJO01ou8prh7926za6/O+bk+TOmTXW++qQEAAAAAAMyCDzUAAAAAAIBZ8KEGAAAAAAAwCzo1gC0p8wgzjzlzzbNDozeufvWrXw3G169fb+YX1rzmzF/OnOjMrG3l2+b9ZbdH5tf/8Y9/bN4XsHYyS33Xrl2DcXZGZF56PZ5/+umnwbabN28254lLly6Nzn+Z7Z353JmxmvPIhQsXBuMvv/xytEPj1q1bg3Hmfb/77ruD8dGjR1/8fPbs2cG23/zmN4Nxbs9c2b179w7GS0tLo3nzr7Nb5fjx46PvZb6v+V7lepGvbyvzPPep27dvv7FW/v73v6/ZfbFxZT5/7o95LpTzQZ3jct/OfTm7Klo9aKl3fpf3/eabb674vlLOLdnfc/jw4RX3BJ0+fXp0flx03/W8t/aV8Prl+1Hfq7w+ybk5j42c1+s1Rh6DeY2RHRq53mc3X81Yz8eV1xwvq/U3c03VDQPMTc5pOe71vP6/Mo/n/6Dqtc2ia82dO3eOXpPkeVaOXydnMAAAAAAAwCz4UAMAAAAAANhc8VP5FcLWV/0A5i7nuIyn+vDDD0ejTDIyqhczs3379tGvaD9+/HiwLb8qnn+rF0dVYxPyK9wZG/Oyel9HbK0X1hJe9bE9l68bZ/RSzkE1auTOnTuDbXms1/iJ565duzb6tzJuYvfu3c3j/erVq4Px8vLyYHz58uXR+Sm/Qp0xGxkZVSOlenFTGcWS952v7+uKY+nFkB06dGg0hizf93v37jVjSb7//vvmelLjgPJ9zqig1fjLX/6yZvfFxpWRCRnj1ItDq+Ocw/LcJ+et1jgjd/JxTo2CaMk4vzy+9+/fPxifOHFidE7LuKljx44114iMtptL5NSUNbt329b21fzuau+7p/5+L7KwdX3SO45yDcg1uXcNUq9ZcluuPy8r4zJbr+1c9nGAl42behbb69yb5+o5h2d8ZsZV1XFen+Ra9Dqvr830AAAAAADALPhQAwAAAAAAmAUfagAAAAAAAJurU6NmIC/K7qqZhTLRgbnp5QBm5nLm0Nbfz/6NPXv2NPMMW3nMU+fTXsdG/VvZlbRt27Y31sKVK1dW3MEk75aNbr0yQqfmtOexnXNQvX329OSxndn1t2/fHr2v7OPYsWNH83Fkr0OOa+Z2ZrkePHhwNE9+UYZ8zZiv3RLPHT58uNmhkdn2+XpPyclfz30o14ClpaXB+MCBA6OvV76vta9p0RqQHU618yDXwPzd1fj73/++4nVvo3bi0JfvXe5TOc55qs6B2ceRv5t/q5VB3evUWI08x8l5J4/nvXv3jh7f9edF55Y57+fzWMtj9lWuhXU85bZT72tql8paPs7UylDPfTnf1/zd1jVJ7kOZv57re3bTtI7ZPEZzvV+r9aL1WvqfFLAR1blp6vVgrgH/bYx7fXjZXZZz/FzOuf03CQAAAAAAmAUfagAAAAAAALPgQw0AAAAAAGBzdWp8/vnn6/tIADawzDPMHNkff/xxNIO+l2OcGcvvvffeijKlF2Uf5riVy79eebd//etfV9ypIe+WjW4t80Rb99XLgG/NOc/dvXv3xc/fffdd87aZk53zSr39999/P9j2q1/9qvm7T548af7t2u+RGfHZg3Hq1KnBOHsz6u/XeXPR65mPK8dT8tHTavPTp3R75Lxd95PsO/nwww8H48xPz/c2O5jqerKenSPffvvtYJzrwlwyfZlmSu9Fb3/cqLJTI8+zcj7NXox6fpivV86tOe/nuehGOd9aTVdFbx6akkU+Ncd8ynhqX0dve13/e3nqed2Q+0G9r9z/cpx9XLn/5nlJtV5rxl/+8peX/t2NcgwAjJnaqfGssX7ktjzPz/UhrwvqetKa718339QAAAAAAABmwYcaAAAAAADALPhQAwAAAAAA2FydGn/+85/X95EAbCC9DO9Wx0ZmnvfGmX+b26fkw2Z+cyvDtpXZuxp/+tOfmo+pPuZW3wash1eZz7+aDOlehmr259Tunnv37g223b9/v9mpkRmrtW+il8uejyvnr7x9zYx/5513BtuyEyLzvfM518eWf/fOnTtvTDElk7aXf76WGfG9fag+73yf873p5fnn9ny910v2m8BW7RHJufjBgwcvfr5+/XrzttnHsVHOp6Z2Eq1mLp6SPb6azoxF910fZ2vbonHvvuv5+dRz9bx9nW+zl6X3uDeCf/zjHyvezzfi4wdYy/OG/02Y5/K2ee2UPbB1nGvJRppffVMDAAAAAACYBR9qAAAAAAAAs+BDDQAAAAAAYHN1avztb39rZqRPyf/dSPlbAGuhzmu9OS7zX/P2Ncc8sw5//vnnwTjzDVeT4b9WPv/888FY3i1b1WqOx6kZ3HWu6M0bvXlk7H5f5jm+/fbbo38r87yzEyI7H7Lfo973avPjW/npq81Hb22fmtuez/Ott94a7cDIbpXsrsjbm49hbfW6kHIOzC6gOh/k/JidRNmJs1FM7dRozZe9uXbK3Lvaeb01Xm3vUj62Os59KP8nk/tBbq9rcPZr5Rqb6/9GuMa4fPnyYOwaA9hKplxz/N/E7tUc1/VmI8z/Y3xTAwAAAAAAmAUfagAAAAAAALPgQw0AAAAAAGAW/u9/Kwwb3LFjx4rvdCPnbQGsh9ZU2spA7+Ugt3LzV5LXvBovm0W7ffv2dblf2Mqm5JL3MlJfpcxvfffdd1/8vGvXrsG2999/fzBeWloajLdt27biLqOpHRutTPO17NDoPe7ee5fPq/aO5FqS3SmZl/7w4cNmvn+ra2Ut5/XV9qHAXGRPUPZi1Pkxr7/z3CrPJedqSt/EaufiKfc1dW6ecl+9+bLVrdRbz3PNzX2uzrd5TfHvf/+7Oc41ZTXz/sv+rvUCYGv5X2e98E0NAAAAAABgFnyoAQAAAAAAbK74KV/1A1gf+VXxVhxVxk2tZ6yMr4YD6znfvf3224NtGa/Sis1Y7/ipVpzX1CiR1vbeffUioGokV8ZzpYwa+de//tXcPoX4KZguI6Ra4zw3nOtxM3WumDJ/9ra3bt87n57yuFcbs9p63HkdkLfN/STHdQ3uRWxlpO1axse6xgBgJcRPAQAAAAAAm4IPNQAAAAAAgFnwoQYAAAAAADALOjUAWEjeLbCR+oV6+d+t+5pqNX1Fq8kdX83vZh5/vgb5nHp9HVPo1ABgpVxjALASOjUAAAAAAIBNwYcaAAAAAADALPhQAwAAAAAA2FydGgAAAAAAAK+Tb2oAAAAAAACz4EMNAAAAAABgFnyoAQAAAAAAzIIPNQAAAAAAgFnwoQYAAAAAADALPtQAAAAAAABmwYcaAAAAAADALPhQAwAAAAAAmAUfagAAAAAAAG/Mwf8H71hTAst2I5QAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "show_design_gallery(dataset, problem, n=8, seed=SEED)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 6 — Explore the dataset interactively\n", + "\n", + "**Drag the sliders** to filter designs by condition range. This is the dataset your generative model will learn from." + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b436e2c3c6084c0da018e6a249e36c25", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HTML(value='

Explore the dataset — drag sliders to filter by condition

')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4943216cde4543099009c1189602749e", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(FloatRangeSlider(value=(0.15, 0.4), continuous_update=False, description='volfrac', layout=Layo…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e5454d8cb5624b19900ed1c66c0ad5b0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "interactive_condition_explorer(dataset, problem)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 7 — Render a single design\n", + "\n", + "EngiBench problems have a built-in `render()` method that draws the design with physics-aware styling." + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmkAAAF5CAYAAADET73UAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjEsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvc2/+5QAAAAlwSFlzAAAPYQAAD2EBqD+naQAAXrhJREFUeJzt3Ql4U1X6P/A3SfedUvaygyyCgGyyKyKIDgIzyKosKgqIozLwk4qAqFBARVBZ1BFQBwEXcGTEKusgICqLMOxUlrIVSind1+T+n/f4T0l6btLcpGmT9Pt5nvu0OclNTrZ735zlPTpFURQCAAAAAI+ir+gKAAAAAIAMQRoAAACAB0KQBgAAAOCBEKQBAAAAeCAEaQAAAAAeCEEaAAAAgAdCkAYAAADggRCkAQAAAHggBGkAAAAAHghBGoCXevXVV0mn01VoHR566CEaP348ebuxY8dSgwYNyv1xU1NTKTQ0lDZv3lzujw0Ang9BGoCTVq9eLYIk8xYUFES1a9emfv360bvvvkuZmZnky/bs2UM//vgjvfTSSxVdFY9069Ytevrpp6latWoiELvvvvvo4MGDVrepWrUqPfXUUzRz5swKqycAeC4d1u4EcD5IGzduHL322mvUsGFDKiwspOTkZNq5cydt2bKF6tWrR99++y3dddddbnn8oqIisXFwWBEGDRpEubm59MMPP5AvtKTx+3b+/PkyuT+TyUQ9evSgw4cP07Rp0ygmJoaWLVtGFy9epAMHDlDTpk2Lb3vixAlq2bIlbdu2jXr37l0mjw8AvsGvoisA4O369+9PHTp0KL4cFxdH27dvp7/85S/0yCOPiJNwcHBwmT+un5+f2CrC9evX6bvvvqMVK1ZUyON7uq+++or27t1LX375JQ0ZMkSUDR06lO644w6aPXs2ff7558W3bdGiBbVq1UoE/QjSAMASujsB3IBPttyFdeHCBfrXv/5ldd3JkyfFiTs6Olq0gnGAxy1ulrhVbs6cOaLFhW/D3WLdu3cXLXT2xqRxy9bf//530XITHh4ugsTLly+L2/HtS+6bmJgoWpGioqIoMjJStAzm5OSU+vw4QONWvD59+miu95EjR8RjNmrUSNymZs2a9MQTT4jxWZbMdTx9+jQ99thjon7cdcivK3cAcKvUwIEDKSIiQtzH22+/bbU/t4zx/uvXr6eXX35Z3Ia7Hfk14X0daQ1bvHgx3XnnnaKeNWrUoGeeeYbS0tIcCtL49n/961+Ly7juHKj9+9//pvz8fKvbP/DAA7Rp0ybxvAAAzBCkAbjJ448/Lv7yuC2zY8eO0T333CNa16ZPny4CCw4cuOtw48aNVgEKBzs8jun999+nGTNmiO7TkmOaSuLg57333hMD+hcsWCBa8B5++GGbt+eggcfOxcfHi/+5NYcftzTcSsQBWP369a3KHak3B2xnz54VASHXdfjw4bRu3TpRZ7UgZdiwYSJgmj9/PnXu3JneeOMNETxxYFOnTh3xPJs0aUJTp06lXbt2SfvPnTtXBJU8do4DWH58Di45oLWHAzLuquzWrRstWbJE1HfNmjVizCEHo/YcOnSI7r77btLrrQ+xnTp1EkEwB56W2rdvL8aw8ecDAKAYj0kDAO1WrVrFEYXy22+/2bxNZGSk0q5du+LL999/v9K6dWslLy+vuMxkMildu3ZVmjZtWlzWpk0b5eGHH7b7+LNnzxaPb3bgwAFx+YUXXrC63dixY0U5377kvk888YTVbQcPHqxUrVq11OfevXt3pX379lK5I/XOycmRytauXSvqs2vXLqmOTz/9dHFZUVGREhsbq+h0OmX+/PnF5WlpaUpwcLAyZsyY4rIdO3aI/evUqaNkZGQUl3/xxReifMmSJcVlvF/9+vWLL//000/iNmvWrLGqZ0JCgmp5SaGhodJry7777juxP9+Ppb1794ry9evX271fAKhc0JIG4EZhYWHFszxv3rwpxqqZW69u3LghNu7m49aZM2fOiK5Jxt2P3KrCZY5KSEgQfydNmmRV/txzz9ncZ8KECVaXebA71ycjI8PuY/FtqlSpIpU7Um/L8Xl5eXniNeDWRabWUsizH80MBoPoHuYWtyeffNLqcZs1ayZa6EoaPXq06Po1467mWrVq2U17wWPJuHuVW+vM7xNv3OLF7+mOHTvIHm6lCwwMlMrNkzxKtuKZX0t+DAAAMwRpAG6UlZVVHCDw+C8OLnhMFY9Pstx4MLl5QD7jGaPc/cUDzVu3bi263Xgslz08/o2713imqSXuCrSFuyLVggVHxl2pdU06Um8OVp9//nkxZosDNn7+5jqnp6eXWkcOnjjY4XF3JcvV6m05k5LxODV+TezN5OQgk+tSvXp16b3i99T8PtnCz6vkuDNzUGq+Xu21rOi8dwDgWTC7E8BNLl26JE705iCJx1UxHjvFLWdqzLft2bMn/fHHH2KQOY9p++c//0nvvPOOmE1p2bLkKm6ZUlPaAHYej6YWEDlSb25J5DFtHMC1bdtWtEzxa/Pggw8Wv0al1dHZejuK68EBGo9BU8PBmj3cUnf16lWp3FzG+fQsmV/LkoEnAFRuCNIA3OSzzz4Tf80BGc9mZP7+/tKsSDU8+5MHq/PGrTccAPHAfFtBGg/i5+Di3LlzVq1H3IJX1po3b05ff/215npzMML5wHhywaxZs4r30dKtq1XJ++ZAjl8Te/nrGjduTFu3bhWTBpxJn8LB508//STeD8vJA7/88guFhISIlkZL/J6Z03EAAJihuxPADXjs2euvvy668UaNGiXKuGXm3nvvpQ8++EC1lSUlJaX4/5LpKLi1iVvZ1LrQzMzBICdNtcQzKMtaly5dRMBVcgxYafU2t4CVbPHi2Zru8umnn1qt/sDpMfj15/x2tnBrn9FoFO9hSZx6hLt07eFxb9euXaMNGzYUl/F4Mx7rNmDAAGm8Gie45e5aTvcBAGCGljQAF33//fci9xmfvPnEzAEap3ngli3Of2a5IsDSpUtF3jAer8VrXnLrGu/z888/i+5RzlDPOAM9B3Q8UJ1bpvbv3y+Ci8mTJ9usB9/2b3/7mwh4OFjiwfj//e9/i9M9lOV4J07rwYl0ubWJlz4yK63enNOMW9YWLlwo0lhwCg3uFjW3JLkD14Nfc27Z49eaXx8OHO2tOdqrVy+RgoNTk/z+++/Ut29f0QLKrXIcaHFKDnOSWjV8Hb/+/JjHjx8vXnGAAz+1FCf8eeHgDWPSAMASgjQAF5m77QICAkRAwAEYBwJ8gracVWgOYjhw4RM15yTjYIpb2Nq1a2fV/cf5vDjA4wCGW6E44OP8YDyOq7RWI07aunbtWpF3jbtVOZkrz3wsy+WjeNA/5zX74osvrII0R+rN2fZ5xikHrNyixgEQB7olx2mVFU5ky5MXOODiFrX7779fBEzc7WgPj6PjYJNbPvk+OCjlRdg5sS53g9rDLYY8e5SfN6/jyrM5O3bsKN5zfi8scYB/9OhRt7YmAoB3wtqdAD6OW4I4COSVD8xdr2WBx1xxqxkHGSVnUHoCXnGAk+paLs3kiV544QWRhJe7PNGSBgCWMCYNwIeoZdHnFhoevM7djGWJc6pxKxh3XYJzuCWVZ8ByayMCNAAoCd2dAD6EAyZukeEWJO6e425E3rhLsm7dumX+eHzf4DxOZcIzYAEA1CBIA/AhXbt2FYPQeVYin/w5ESynv+A1NAEAwLtgTBoAAACAHTxu9M033xQ9FZzChydmDRo0qNRxsVOmTBFL5XFPxiuvvEJjx44lLTAmDQAAAMCO7OxsatOmjZiV7ghOK8SpinjoCU/e4glCnND7hx9+IC3QkgYAAADgIJ7kU1pL2ksvvUTfffedSK9jNnz4cJEIOyEhwdGHQksaAAAAVD75+fmUkZFhtdlb1UULTlBecvk/XhWGy71y4kD3Af+t6CoAAEA5Co2OlMrq3hErlbW7W154vmuTG1aXm13ZKt0mZ9sWqSxpzympLPWovMxXwc1CG7WGkh4ulF/T8vKdv3VyaC1+mzFCWgFk9uzZYrKVq5KTk0XSb0t8mQNBTpXk6JrAmoM0Xn9u5cqVIhrkSjDOcM6zynhAXLVq1bTeJQAAAIBmOn/n8wvGxcWJgf2WSq6rW9E0BWm//fabaK7j5VS4Ge+OO+4Q5bweHi99Mn/+fDEorkOHDnbvh5sTSzYpmowFpDcEOPMcAAAAoBLS+zkfpHFA5q6gjBuvODayxJd5/WJHW9E0B2m83t6jjz4q1rQrmR2b5x9MmDBB3Ka0PldeQ69kE2PdpmOoXrNxWqoDAAAAlZjO3zOH1nfp0kWs32uJc1hyuRaant3hw4fpxRdfVF2+hMv4Op5q6kgTY3p6utUW26Ts1hQEAACAytGSpndy04KTg3N8Y45xOMUG/5+UlFQc14wePbr49txodfbsWfq///s/sb7xsmXL6IsvvhBxkhZ+Wpvvfv31V2revLnq9XxdyYFyjjYxoqsTAAAAPNH+/ftFzjMz81i2MWPG0OrVq0WCW3PAxho2bChScHBQtmTJEoqNjRXr9PKQMbcFaVOnThVrAHLG3fvvv784ION+1m3bttFHH31Eb731lqYKAACA7wuOCJPKajWsJZU1bxEtlbWuly2VNcw6YnVZOfk/6TYZSdZjgljuzVypzJhrtFFr8OWJA1rce++9YliXLRyoqe1z6NAhcoWmIO3ZZ5+lmJgYeuedd0TTndH45wfbYDBQ+/btRSWHDh3qUoUAAAAA3D1xwBtoTsExbNgwsRUWFop0HIwDN39/f3fUDwAAAKBCW9IqitPJbDkoq1VLbqoGAAAAKA96tKQBAAAAeB6dwbeDNM9MMAIAAABQyaElDQAAylRQWKhUVqtRbamsRSt5GcEOd8gLXLcu2i+VBR7da3U59X+npdvcPJcqleVcke/fmGuSysA76H28JQ1BGgAAAHglnR5BGgAAAIDH0Rl8e9QWgjQAAADwSnp0dwIAAAB4Hp2Pd3f6djshAAAAgJdCSxoAAJTpTM6aKjM572zz51rPlu5pkSeVtcvfI5UZftsplV377ajV5eT/XZVuk54or/lZlFEklYH30qO7EwAAAMDz6BCkWTtx4gTt27ePunTpQs2bN6eTJ0/SkiVLKD8/nx577DHq3bt3qffBt+XNkslYQHpDgNbqAAAAQCWl0/v2qC1Nzy4hIYHatm1LU6dOpXbt2onLPXv2pMTERLpw4QL17duXtm/fXur9xMfHU2RkpNV2KXGNK88DAAAAKuHEAZ2Tm88Faa+99hpNmzaNUlNTadWqVTRy5EgaP348bdmyhbZt2yaumz9/fqn3ExcXR+np6VZbbJNRrjwPAAAAqIRj0vRObj4XpB07dozGjh0r/h86dChlZmbSkCFDiq8fNWoUHTlypNT7CQwMpIiICKsNXZ0AAAAALoxJ0+n+jD71ej0FBQWJrkqz8PBw0SoGAAC+xz84SCqrVre6VNbyLsdmct6du0sq0/0iD5m5vFv+8Z/8v2tWlzNP5dioNfgynZd0W5ZLS1qDBg3ozJkzxZd//vlnqlevXvHlpKQkqlWrVtnWEAAAAMDGxAFnN59rSZs4cSIZjcbiy61atbK6/vvvv3dodicAAACAq3Q+3pKmKUibMGGC3evnzZvnan0AAAAAHOItEwCchWS2AAAA4JV0Pt6S5h2dsgAAAACVDFrSAABAojawOjKmilTWsJk8k/PuZrfHLpu1Kdjn2EzOXYelsisHk6WynPPybFGofHReMgHAWQjSAAAAwCvpfLy7E0EaAAAAeCUdgjQAAAAAz6NDkAYAAADgeXQ+PibNt58dAAAAgJdCSxoAAEiCwkKkshr1YqSyFk0DpbK2QYeksoB9P0lll/ce9aqZnIZguV3DEGyQykxFilSmFJocvJ1cBrYhmS0AAACAB9JhTBoAAACA59FhTNptBw8epHPnzhVf/uyzz6hbt25Ut25d6t69O61bt86h+8nPz6eMjAyrzWQs0F57AAAAqNQtaTonN58L0saNG0d//PGH+P+f//wnPfPMM9ShQweaMWMGdezYkcaPH08rV64s9X7i4+MpMjLSaruUuMb5ZwEAAACVjs7HgzRN3Z1nzpyhpk2biv+XLVtGS5YsEYGZGQdqc+fOpSeeeMLu/cTFxdGUKVOsyh4c/ou2mgMAAAD4ME1BWkhICN24cYPq169Ply9fpk6dOlld37lzZ6vuUFsCAwPFZklvCNBSFQAAAKjkdD4+Jk1TkNa/f39avny56Ors1asXffXVV9SmTZvi67/44gtq0qSJO+oJAABuovczOLSYer0GkVJZixppUlnUuQNS2Y39crqNa0fdm25D5y93aen95DK/CPlU6B8uvyb+of4OpYAwFsgLzOenFUplBTflMiNScGii85Juy3IJ0hYsWCAmCnCAxmPR3n77bdq5cye1aNGCTp06Rfv27aONGze6r7YAAAAAlaQlTdOzq127Nh06dIi6dOlCCQkJpCgK/frrr/Tjjz9SbGws7dmzhx566CH31RYAAADATKdzfvPFPGlRUVE0f/58sQEAAABUFJ2Pd3f6djshAAAAgJfCigMAAADglXQ+PiYNQRoAQCUXGBIslVWtFSWVNa0vdy01KjgmlRUcl2dy3jgtz+TMOJND7lz8PLiOvPh7UJRcFhgup4AKipRfE0OAfMpUTPLC6fmZ+VJZpiFLKjPmYtF1V+l8vLsTQRoAAAB4JR1a0gAAAAA8jw4taQAAAACeR+fjQZpvtxMCAAAAeCm0pAEAAIB30vt2WxOCNACASr5OZ3hVeU3O2PoRUlmj6HSpLCRRZU3Ok+elsvSLmWU6a7HkbM6QekHSbcJqhMpl1cOkspCq4VJZQKS8r94gv3bG/AKpLPuavJ5pQbZ8u7xgeRZoofwygR06L1k5oNyCtKtXr4pF1nfv3i3+1+v11KhRIxo0aBCNHTuWDCofYgAAAICypvPxljRNz27//v1iMfXNmzdTYWEhnTlzhtq3b0+hoaE0depU6tmzJ2Vmlv4zID8/nzIyMqw2k1H+lQEAAABgb+KAs5szli5dSg0aNKCgoCDq3LmzWL/cnsWLF1OzZs0oODiY6tatSy+++CLl5eW5J0h74YUXxANwsPbTTz/R6tWr6fTp07Ru3To6e/Ys5eTk0CuvvFLq/cTHx1NkZKTVdilxjZaqAAAAQGWn1zu/abR+/XqaMmUKzZ49mw4ePEht2rShfv360fXr11Vv//nnn9P06dPF7U+cOEEff/yxuI+XX37Z8aenpYJcqccff7z48siRI0XZtWvXqEqVKrRw4UL66quvSr2fuLg4Sk9Pt9pim4zSUhUAAACAcrNo0SIaP348jRs3jlq2bEkrVqygkJAQWrlypert9+7dS926dROxEre+9e3bl0aMGFFq65vTQVr16tXFODQzDs6KioooIuLPAaZNmzalmzdvlno/gYGBYh/LTW+Ql+UAAAAAcEd3p9rQKy5TU1BQQAcOHKA+ffoUl/GYfL78888/q+7TtWtXsY85KOMeRx4u9tBDD7ln4gBPDpgwYQK9+eabItB6/fXXqVevXqKvlZ06dYrq1Kmj5S4BAKAcBQTJsyCja8jrdDauK08Ca1h4QiorOHNaKks7nyqVZV9wfByOIwzBhlLX3wyJDpHKQqvLM1mDVGa3+qmsZ6oqI8vpwexYp9N1Op3zEwd46NWcOXOsyrhr8tVXX5Vue+PGDTIajVSjRg2rcr588uRJ1fvnFjTer3v37qQoimjU4hjKbd2db7zxhmjiGzBgAN1///0i4rRs5uOpsPykAQAAANxOr3N6Uxt6xWVlZefOnTRv3jxatmyZGBq2YcMG+u6770QDl1ta0sLCwsSgN56ZwBEhX7bE/a0AAAAAnp6CIzAwUGyOiImJESnGeJiXJb5cs2ZN1X1mzpwpxvE/9dRT4nLr1q0pOzubnn76aZoxY4boLi2NU8+Op56WDNAAAAAAfDEFR0BAgEg5tm3btuIyk8kkLnfp0kV1H854UTIQM+eS5e5PR2DFAQAAAIBScPqNMWPGUIcOHahTp04iBxq3jPFsTzZ69GgxLt887IuHhvGM0Hbt2omcaomJiaJ1jcsdTfyPIA0AAAC8k678VhwYNmwYpaSk0KxZsyg5OZnatm1LCQkJxZMJkpKSrFrOOG8sj9Xnv5cvX6Zq1aqJAG3u3LkOP6ZOcbTNzc26D/hvRVcBAMDnx+tE1YyRytp0aSyV9e8krwLT/uJ6qSzl35ulsrM7/pDKshJzyVk6f7lrKiDa3+pyeF15JmdkrDxrM6yGXOYfKs94NQTKs0VNRqNUVpCRI5VlXpHX7ky7cEsqyzgtzww15prI2zxceKrCHjtj0QtO7xsxZTF5OrSkAQAAgHfS+/banQjSAAAAwCvpdM6twektfDsEBQAAAPBSaEkDAAAA76T37bYmp5/dpUuXKCtLHvRYWFhIu3btcrVeAAAAAB6RJ81rWtJ4gfWBAweKRUO5L5jXpuIlD8zJbXmB9fvuu0+scQUAABVHr5KLKSRcngVZs7r1TElWJ+C8VGZMOufQOp25l9UXqS6rdTr/LLNuY1BMcqKCvHR5RqmpSD43GQLkU6HeT35MY0GRVFaQLc+CzUmVZ3zmJOf5xEzOypyCoyJofnbTp08XeUB++eUXkR/k+PHjIihLS7s95dhDsnoAAACAL9M7v3anT7akbd26lTZu3Cgy7rI9e/bQo48+Sr179y5eLsHXZ1sAAABAxdOhJc0arxJfpUqV4su8OCmv7N6gQQPRonb9+vVS7yM/P58yMjKsNpNRbjIGAAAAqKw0B2mNGjWiI0eOWJX5+fnRl19+Ka77y1/+Uup98LpWkZGRVtulxDVaqwIAAACVmd63uzs1B2n9+/enDz/8UCo3B2q8llVpY9Li4uJEi5zlFttklNaqAAAAQCVf+kzn5OaTY9J4YdCcnBz1O/Pzo6+//losJGoPd5HyZklvkNdJAwAAALDJx8fAaw7SOBCLiIiwm6Jjzpw5tHLlSlfrBgAALvBTSS0RFimn4KgqrzlOUZmXpLK8q/KY46zr2U6nllBbOD24jvUPeBZeW65zcFRwqSkz1NJtFOYWSmW5t+T0GIpJfg5FeSoLrGfL95efIpcVZsrpO6AM6L2jRcxZZf7sOE/aJ598UtZ3CwAAACC3pDm7+WJL2rfffmv3+rNnz7pSHwAAAABwJkgbNGiQyINmb3IA8qQBAACAu+nQ3WmtVq1aIi+ayWRS3Q4ePOiemgIAAABY4mS2zm5eQHMt27dvL9bttKW0VjYAAACAMqH37Txpmrs7p02bRtnZ8mwesyZNmtCOHTtcrRcAALjI4C8vnB5RxXpWJKsWLq/4Eph6VSpLvX57jWaz/HTnV4tRm8lZo1WMVBbTvI68b81qVpd1Ks/VmCMvsF5wK0Muy5TTShVmyzM+c1KzpLKMK5kOze5UCtF44Q46L2kRK7cgrUePHnavDw0NpV69erlSJwAAAIDSeUmLmLN8OwQFAAAAqCwtaQAAAAAeQefbbU0I0gAAAMA76Xy7u9PlII1ncu7cuZMSExNFeo5+/fqRv8oATgAAAIAypUdLmpWHHnqI1q5dS5GRkWIJKL7866+/UkxMDKWmptIdd9xBu3btomrVrGfeAABA+Sb1DAwJksqiouQZldFB8oxH3Y1kqSwnNdPpdTr9IuTTTXRjedHQWh3ukMqCO3WWyjJrt7C6nO8fKt0mqECub1CuPEM1NEd+/vo0eZ3SgvPnpbLUI4lSmcl41aG1OzHjswzofDtI0/zsEhISKD8/X/z/yiuvUGZmJv3xxx90/fp1unDhgpjdOWvWLHfUFQAAAOA25Emzbfv27bRw4UJq2LChuBwbG0sLFiyg8ePH292PgzxzoGdmMhaQ3hDgSnUAAAAAfIZT7YTmtTnT0tKocePGUjLbK1eu2N0/Pj5edJdabpcS1zhTFQAAAKisdFgWSjJ27Fj661//SoWFhXTu3Dmr65KTkykqKsru/nFxcZSenm61xTYZ5UxVAAAAoLLS6ZzffLG7c8yYMcX/Dxw4kHJyrJfT+Prrr6lt27Z27yMwMFBsltDVCQAAAJrovaNFrNyCtFWrVtm9fvbs2WQwGFypEwAAaKRXOe6GhIdIZVWj5cN+jE6eyViUkiKV5aTK6zYXZcizFtWExsqzSqs2rSmVBbdrJ5Wda/SAVHbsRi2ry2mZ8snaT+VUFBYsz0aNjJKfQ+06N6Wy+rG/S2VV5YdQXeMz+6LKOqI35TU+QSOdd7SIOavMQ1BOyzFp0qSyvlsAAAAAaxiTpj1I++STT8r6bgEAAAAqFc3dnd9++63d68+ePetKfQAAAAAcgzFp1gYNGiRScPByUKWl6AAAAABwG51vxxuaQ1Ben3PDhg1kMplUt4MHD7qnpgAAAACVaEya5pa09u3b04EDB0T6DTWltbIBAEDZ8wuQD+dhkfLszugIed+ILHmtyfwb8uzGnJt5Dq3daQiWT4AhMXJdIhrWlsoy67eRyn5Plm/36yHrmaYpV1XW31RZ+icwWE73FBElr3HatLE885Qay+mlmjQ4LZWFVJXX8zQE35DvD1yn8+2WNM1B2rRp0yg7W56GbbniwI4dO1ytFwAAAIB9GJNmrUePHnav5wXWe/Xq5UqdAAAAACo9lxZYBwAAAKgoCro7AQAAADyQzre7O516dv/5z39o1qxZtGfPHnF5+/bt9NBDD9GDDz5IH374YVnXEQAAAECG2Z3WPvjgA5o8eTK1adOGlixZQkuXLhXLQA0bNkys2fnCCy9Qbm4uPf/88+6pMQAAOLR2Z1iEPGsxOkxeLzIgXV6782bKLaksP73AoboYglXWEY2WZ3cG1LJef5MlBdaXys5flmeQXjidbF3fK/JaozqVQeVqMz6DwuS6ETWUSprUqiKVNY6UV+/0C/JXuT9wBwXdndbeffddWrZsGY0fP17M4uQWtLfffrt4vc577rmHFi5ciCANAAAA3EvnHS1iztL87M6dO0f9+vUT/993331kNBqpZ8+exdffe++9dOHCBbv3kZ+fTxkZGVabyejYLzQAAACAykBzkFa1atXiIOzKlStUVFRESUlJxdfzddHR0XbvIz4+niIjI622S4lrnKk/AAAAVFbc3ens5ovdnbzSwJNPPkljxowRi62PHj2a/vGPf5BerxerDXCy2759+9q9j7i4OJoyZYpV2YPDf9FeewAAAKi89L7d3ak5SFuwYAEVFBTQunXrqGvXrvTee++JcWocvBUWFopEttxSZk9gYKDYLOkN8lIdAAAAALZg4oDKigIl02xMnTpVzPjkIC08PLws6wcAAABQKScOlFky26CgILFdvHiRZs+eTStXriyruwYAgFIEhgRLZeGR1j0WrGpwjlSmPy+n4MhPz3ZoMXU1agusB0XK9aNIOaXFrQL5h37aTXliWW6G9fMoynd+8plO5USflyOnKik0BlS6IMHTKT7++pf5s7t58yZ98sknZX23AAAAAJWK5pY0nixgz9mzZ12pDwAAAIBjynlMGifwf/PNNyk5OVkk9edx+Z06dbJ5+1u3btGMGTNow4YNohGrfv36tHjxYpFj1i1B2qBBg8QsTkVRbN6GrwcAAADwle7O9evXi8wUK1asoM6dO4tgi/PGnjp1iqpXry7dnidZPvDAA+K6r776iurUqSPSlEVFRTn8mJqfXa1atUREaDKZVLeDBw9qvUsAAAAAj86TtmjRIrHa0rhx46hly5YiWAsJCbE5Bp/LufXsm2++oW7dulGDBg1EBgxugXNbkNa+fXs6cOCAzetLa2UDAAAAqOgF1vNVVj/iMjXcKsaxT58+fYrLOD8sX/75559tDg/r0qULPfvss1SjRg1q1aoVzZs3T6zU5CjN3Z2crDY7W571Y9akSROxpicAALiHwV9ewDs4TJ49GV1Fvl20/02pzJgiz+7MvuH87E41ej950XXyl2dLmlR+4/v5y+0JgSHWi8eblEjpNorKnSkm+TmERIRJZeGR8uL0IQHyjE9dQZ5UZipy/CQMFZcnLT4+nubMmWNVxtkpXn31Vem2N27cEMEVB1uW+PLJkydtjtHfvn07jRo1ijZv3kyJiYlinXNOV8aP45YgrUePHqXmUePmPAAAAABPFaey+lHJRPuu4CFgPB6Nc8saDAbRE3n58mUx8cBtQRoAAACAR9A5P3FAbfUjW2JiYkSgde3aNatyvlyzZk2bY/j9/f3FfmYtWrQQM0O5+zQgoPSVlnw7CxwAAAD4LIV0Tm9acEDFLWHbtm2zainjyzzuTA1PFuAuTr6d2enTp0Xw5kiAVuZBWlpaGn366adleZcAAAAANlNwOLtpxV2jH330kUjYf+LECZo4caIYo8+zPdno0aNFF6oZX8+zO59//nkRnH333Xdi4gBPJHBUmXZ3JiUlicpyRQEAAADcSld+HYLDhg2jlJQUmjVrluiybNu2LSUkJBRPJuAYiGd8mtWtW5d++OEHevHFF+muu+4SedI4YHvppZfcE6Tx9FR7MjMztdwdAACU0UzJkHCV2Z2RcpdOVE6yVFZwPVUqy03LlcqKMoo01tRi3zyVtTUz0qSiao1uSWVNG9WRynS6+laXc1TW2nQ0G1RoqDwLtmkjeaxSraAkeee0FKmoIFt+rqYipKbytNmdzpg8ebLY1OzcuVMq467Qffv2kbM0BWmcJdfeagKcHw2rDQAAAAC4TlOQFh4eLtag4uUQ1Jw5c4aeeeaZUu+Hk8WVTBhnMhaQ3uDYQDoAAAAApRy7Oz0+SLv77rvFX1t50LilzZHVBtQSyNVtOobqNftz8B0AAABAqXy8905TCDpy5EgKCpKzMJtxrhBHErTx7If09HSrLbbJKC1VAQAAgEpOKcfZnR7fksYLi9rDMxwcCdLUEsihqxMAAAC0UDTmO/M2WHEAAMDL6PXyiSkgSJ6hGBEir1MZmC3P5My6Jc/Mz7ulvtC0I9TW+FRbCzT/3HmprF7t36QyQz15Lcw7akRbXS4wyjNeTQ6ewIMM8szQGoFXpLLal+W65Saek8qyUzLLdN1TsM1bWsScpfnZ5ebm0u7du+n48ePSdXl5eUhmCwAAAFDeQRpnzOV1p3r27EmtW7cWEwiuXr1afD2PLTNn3gUAAABw+8QBnZObrwVpnCW3VatWdP36dTp16pRIycFrU3GWXQAAAIDypJDe6c3nxqTt3buXtm7dKlaD523Tpk00adIk6tGjB+3YsYNCQ0PdV1MAAACAClxxoLzptY5H8/O7Hdfx6gLLly+nAQMGiK5P7g4FAAAAKA8KUnDc1rx5c9q/f78Yl2bp/fffF38feeSRsq0dAAA4xGCQTzr+BnlGoT4nTyorys0v09mIhZnyGp8Zl+W1n28cSZTKYhT5cevVkxsA6kVaz+4kvTy70+FxR0aVNUnTVNYzPSvPRr1+5KxUlnElq0zXPYXKm4JDUyg5ePBgWrt2rep1HKiNGDHCoRUHAAAAAKAMgzReKWDz5s02r1+2bBmZTMgFAwAAAO6noLsTAAAAwPMomDhQut69e9OFCxfK4q4AAAAAHB6T5uzmcy1p3377rWr5rl276D//+Q/VrVtXXMYEAgAAAHA3xUu6LcslSBs0aJBIu6E2OeC5554Tf/l6o1FeZw0AAMqXak+QST4+K2U8llgplM8RWUnyrNJrhutSWUF2gVQW+sdlqcwQKK9V6gidXu/Q81eb8ZpxJV0qS78ol2WdzXWqbgAlaQpB+/XrR/3796fk5GQxQcC8GQwGOnr0qPgfARoAAACUB8XHuzs1BWnff/893X///dShQwfRvems/Px8ysjIsNpMRvnXEwAAAEBlnd2puZYvvviiGJvG63g+88wzlJOTo/lB4+PjKTIy0mq7lLhG8/0AAABA5aWgJU3Wtm1bsfIAjz/j/7UmsOV8a+np6VZbbJNRzlQFAAAAKinFx1vSnM6TFhwcTCtWrBCtary4Oi+47qjAwECxWdIbApytCgAAAFRCipe0iFVYMltOt4GUGwAAAABlS3N7X25uLu3evZuOHz8uXZeXl0effvppWdUNAAAAwO6KA85uPheknT59mlq0aEE9e/ak1q1bU69evejq1avF1/PYsnHjxrmjngAAAABWFEXn9OZzQRrP6GzVqhVdv36dTp06ReHh4dStWzdKSkpyXw0BAAAAVCikd3rzuTFpe/fupa1bt4pJArxt2rSJJk2aRD169BCTB0JDQ91XUwAAAIBKNHFAr3U8mp/f7biOU3AsX76cBgwYILo+uTsUAAAAoDwoPp4nTVNLWvPmzUV+NB6XZun9998XfzHLEwAAAKACgrTBgwfT2rVr6fHHH5eu40CN1+7k3GkAAFDxVPOM6w0OLTruCp2/3EoRVi9IKqvRsrpUFtO6kVQWWK+u/CCR0aU+L/UV5lUYi+SytFSpKOLseaksIPSsVGYyygvHZ57SvjoPlM5bWsScpde6UsDmzZttXr9s2TIRqAEAAAC4m4LuTgAAAADPo3hJKo1yaUn7+uuvnVpQHQAAAKCsKT7ekqYpSHv00UepVq1a9PTTT9Mvv/zivloBAAAAlAJBWglTp04VMzy7dOkiEtsuXryYUlPlAZYAAAAAUI5j0p555hmaOXMmHThwgD7++GOaM2cOTZ8+XaTfGD9+PD3wwAMuVAcAAJxhNMqTtgqN8u9wU4A8y9IvOFAqMwQ7P+PTP1w+tUTUiZDKYu5qItel231SWVL1u6WylHzr2Z0FRnl2p8nB1pIggzy7s0bTFKmsdp3fpDJ5fipRbpo8LCj3aoFUVpShMqsUNFG8pEXMWU5/C9u3by9mc/LanR999BGlpKTQgw8+SA0bNizbGgIAAABUwrU7NbWk8QoDJQUFBYm8abwlJibSqlWrSr2f/Px8sVkyGQtIbwjQUh0AAACoxExoSbtNUc2MeFuTJk1o7ty5pd5PfHw8RUZGWm2XEtdoqQoAAABUcgomDtx27tw5qlatmssPyklx09PTrbbYJqNcvl8AAACoPBR0d95Wv379MnnQwMBAsVlCVycAAACAC7M7c3NzxczO6OhoatmypdV1eXl59MUXX9Do0aO13i0AADjIZJKHnhTkFUplGTlyZ0l+TFWpLDAqXCoLipJnfGZRrkP1U5sZGhoTKj9uwwZS2bnqHaWyfUl1pLLEc9bjmnNy5Odfygid23UL9ZfKmjaSJ8F1rSvPIK3XRF7PM/T4BanMEHxLKivKcKx+YJu3dFuWS3fn6dOnqUWLFtSzZ09q3bo19erVS8zuNONuy3HjxrmjngAAAACVqrtTU5D20ksviQS2169fp1OnTlF4eDh169aNkpKS3FdDAAAAgEo4cUBTd+fevXtp69atFBMTI7ZNmzbRpEmTqEePHrRjxw4KDZWbswEAAADcQfGSFrFyaUnj8Wh+fn5WedOWL19OAwYMEF2f3B0KAAAAUB5MLmw+15LWvHlzsW4nj0uz9P7774u/vDQUAAAAAJRzkDZ48GBau3atWF2gJA7UTCYTrVixogyqBQAAtpiKjFJZTqY88/Jmujy98Va9mlJZzeryjM/gKsFSmV9EltPrT/oFqaRZiqgiFaXkRUllZ85az+Rkxw9Yz6DMzZbXy1RUZsEqJrkNJSQiTCozGuWUUw2rxUhl9arIuUMDQuXnqvfz7W65iqKgu9M6Ce3mzZttXs9reXKgBgAAAOBuCiYOAAAAAHgeBS1pAAAAAJ5HKeeWtKVLl1KDBg0oKCiIOnfuTL/++qtD+61bt05Mthw0aJCmx0OQBgAAAF7JpDi/abV+/XqaMmUKzZ49mw4ePEht2rShfv36idyx9pw/f56mTp0q0pWVS3cnR44///wzJScni8s1a9akLl26UKdOnZy5OwAAAIBylZ+fL7bS1hY3W7RoEY0fP754ZSWeKPndd9/RypUrafr06ar7GI1GGjVqFM2ZM4d++uknunVLXh6szII0jhb/9re/0Z49e6hevXpUo0YNUX7t2jV68cUXxeoDX3/9NVWvXl1TJQAAwHHGQnmdytwsldmdafLtbhZGS2V1qlV3aK1NQ3Cq0+tPqs1IpcICqUiv0gtVVChPSMvPybO6nH0znZylNgs0M936/llOgfyaKAFBUpneT17jE9xDcWECQHx8vAieLHEr2auvvirdtqCgQKxbzhMozfR6PfXp00c0Wtny2muviZjoySefFEGaVpqCNF5dgKPCEydOULNmzayu42WinnjiCXr22Wfpyy+/1FwRAAAAgPKaOBAXFye6Ly3ZakW7ceOGiH/MjVNmfPnkyZOq++zevZs+/vhj+v33352uo6Yg7YcffqBdu3ZJARrjsnfffZfuvfdep5oYTcYC0htU8ugAAAAAqFCcGFvmSNemqzIzM0VO2Y8++kgso1kuQRo/mYyMDLuVcuQJqzUx1m06huo1+7OfFwAAAKA0pnLKd8aBlsFgEMO7LPFlHpdf0h9//CEmDPCymcV1/f95ZHl5Te59bNy4cdnO7hw2bBiNGTOGNm7caBWs8f9cxoPpRowY4VATY3p6utUW22SUlqoAAABAJacoOqc3LQICAqh9+/a0bds2q6CLL/PESbVlNP/3v/+Jrk7zxktn3nfffeL/unXrln1LGs9s4EoNHz6cioqKRKXNA+o4MuSBcW+99ZZTTYzo6gQAAABPNWXKFNFQ1aFDB5HNYvHixZSdnV0823P06NFUp04d0VvIedRatWpltX9U1J9LnpUsL9PuzuXLl9OCBQvELAfLFBwcYUZERGi5OwAAKCP5OfLszsx0ec3L1NwQqcxURZ7dGRipNrvTsc4XY648GzMvXa4fpadJRVEBmVJZlWh5fczgCOvnkZuVLd1Gp5frq1eZPhoYKs/QDArxl8r8DSrLHipYCtFbx6Rpxb2JKSkpNGvWLBH/tG3blhISEoonEyQlJYkZn2VJc540ntm5b98+0bzHzXY8q2HJkiX02Wef0WOPPUa9e/cu0woCAAAAqCnvNTgnT54sNjU7d+60u+/q1avdG6RxxDhw4EAKCwujnJwcMQ6Nm/c46y53g/bt25d+/PFHBGoAAADgdqZybEmrCJra5Tgp27Rp0yg1NZVWrVpFI0eOFNl3t2zZIgbP8XXz5893X20BAAAAynnigFcEaceOHaOxY8eK/4cOHSpSbgwZMqT4el764MiRI2VfSwAAAACVMWnObt5A8wg3XsVd7KjXi9kLkZGRxdeFh4eLdBoAAAAAUI5BWoMGDejMmTPFl3m9Kl7D04xnNtSqVcvFKgEAAAA4lszW2c0baJo4MHHiRLF2lVnJXB/ff/89Jg0AAFQAk8Wx2SwrQ14k/GZWmFRWECGn4Aiu9mdOJ0uBkXI+yxySH8OYK9cl52aO/LhXr0plNfIvSGUN6livl8iu32Gd5T0kPMSxdBvB8nOIiJJTcDRtLKcgqRnyZ9opS7qzKovO58kL24N7KF7SbVkuQdqECRPsXj9v3jxX6wMAAADgEG+ZAFBuedIAAAAAPIEJLWkAAAAAnkfx8SBN8+xORVHo3LlzYu1O87qd69evp08//ZRu3LjhjjoCAAAAVDqaWtJOnTpF/fr1o4sXL1KjRo3E6gKPPvqoWBqKg7eQkBDau3cvNW3a1H01BgAAAKDyXxbKo4O0l156SSwBtWnTJlq5ciU9/PDDdMcdd4hUHLwsFAdsvCoBr+MJAADlp6jgz94NS1np8ozKmxnyvhk15NRJ0THRUllItDwLMiM4y6EF1nNuyHXJOHdFKou5cFgqa9ssVirz72hd57RMedaqn0EqorBguW6RwfJrVztUnslZ/9bvUlnB+fNSWU6qY68JuM6E7s7buJVszpw51Lp1a3rjjTdEC9rUqVPJ39+fAgMDafr06bRr1y731RYAAACgkqw4oKklLSsri6Kj//x1FRoaKjbL5LV169ala9eulXo/+fn5YrNkMhaQ3iDnrwEAAABQ4y3BVrm0pNWuXVusKmC2cOFCql79dhLElJQUqlKlSqn3Ex8fL5aTstwuJa7RWncAAACoxEyKzunN54K0Pn36iC5OyxUIeL1OM55IcPfdd5d6P3FxcWKNT8sttskorXUHAAAA8FmaujtXrFhh9/phw4bRmDFjSr0fHr/GmyV0dQIAAIAWio93d2pOZnvixAnat28fdenShZo3by5a1pYsWSLGmD322GNYuxMAwEPW7szJlGdUpt6UZzLeUOS1O6tXqyaVhVSV17P0i7gllRlzC6Sy7Ev5cl3OyDMoQ2seksoa+ss/4mNqt7C6nF9FrltQQaZUFpibJpUZbslTXvXnrjs0kzP1SKJUln5ZftzCTPl1B9cpCNJuS0hIoIEDB1JYWBjl5OTQxo0bafTo0SItB6fg6Nu3r+jyRKAGAAAA7mby8SBN05g0zoE2bdo0Sk1NpVWrVtHIkSNp/PjxtGXLFtq2bZu4bv78+e6rLQAAAIDFAuvObj4XpB07dozGjh0r/h86dChlZmbSkCFDiq8fNWoUHTlypOxrCQAAAFDJ8qRpXrtTp/sz+tTr9RQUFCTSZ5jxTE+eqQkAAAAA5RikNWjQgM6cOVN8mZeDqlevXvFlzqFmmdwWAAAAwJ1j0kxObj43cYDzohktZhC1atXK6vrvv/8ekwYAACqAYpLXhszPyZPKbt2SZ1nezJPXvVRiakplIVVv58U0MwQ79lu/KEOe3XjzD7nnxT/4tFQWk5UtlQXX/J/1ZX9/6TbGnFypLE9lJmeByizYwuw8h9bkzLgiz+TMvig/rlLoJVGBl1F8/GXVFKRNmDDB7vXz5s1ztT4AAAAADkGQBgAAAOCBTAjSAAAAADyP4uNBmubZnQAAAADgYS1pvPQTp97w//8DNP/44w9auXKlmNVZv359evLJJ6lhw4buqisAAABAMZX5MpU3SOvXrx9NnjxZJLDds2cP3X///dSsWTNq0aIFbd68md555x3aunWrWNcTAAAqlrGwUCrLSJNnHqZkyute5kfJ6ZSCq1eRygIj5XU1c0ieGakm97I80/Qa3ZDv76Y8+zI46pLVZb2fQbqNqUhez7QoX55lWpRvdGi2bFGefLuCbPk1VqPzlzPcY8an6xQffwk1BWmHDh0S63SyGTNm0KRJk2jRokXF18+cOVMsDbV79+6yrykAAABAJQrSNI1J4xxp5jxpJ0+epDFjxlhdz0tGHT582KFu04yMDKvNZCzQWncAAACoxEw+nsxWU5DWuXNn2rRpk/i/cePGUkD2+++/U3R0dKn3Ex8fL5aTstwuJa7RWncAAAAAn6Wpu/ONN96g/v37U3Z2No0YMYL+8Y9/iGWieEzaqVOn6N1336W4uLhS74dvM2XKFKuyB4f/or32AAAAUGkpLvV3yuMEvTpI4wkBvPQTB1i//PJnUDV37lzxt3bt2vTqq6/S888/X+r9BAYGis2S3iAPPgUAAACorGPSNCez5UCNF1ZPSUmhs2fPkslkEouq8+LrAADgOYoK5JmMWenyTMlUeQlNulUjViqrVqu6VBZWXZ4ZmhEsr3FpzDU5NLsx57w8M7Tgpvw8AqKtn0dwtPUPfxYUKZcFhMoNAkGRwVKZ2mxRo8rrWZAtj6fOCZVf46zL8qza/GsYi+0qk4+n4NCczPbEiRO0atUqunnzphijVqVKFVqwYAE98cQTtH37dvfUEgAAAEClJc3Zzeda0hISEmjgwIEUFhZGOTk5tHHjRho9erRIy8Etan379qUff/yRevfu7b4aAwAAAJD3zNIsl5a01157TeRBS01NFa1pI0eOpPHjx9OWLVto27Zt4rr58+e7r7YAAAAAlYSmIO3YsWMiFxobOnQoZWZmitUHzEaNGkVHjhwp+1oCAAAAlIDuzhJ0uj+nrPIankFBQSLHmVl4eDilp6uMQAUAAAAoY4rJt1NwaGpJ4xmcnBfNjGd51qtXr/gyL7TOMz0BAAAA3M3k4ysOaGpJmzhxYvGyUKxVq1ZW13MONUwaAADwDCaL47VZTqacHiL5urxI+OUGtaWymvUaSmVVGpyQytLO3ZLKshLlFBSOMubKz8OYa93GoNPrHEqtEVbjdu+PmX9okFRmCAxw6PUsyJBfT//gNJV95aigKKPIoVQlYJu3dFuWS5A2YcIEu9fPmzfP1foAAAAAOMTkLU1i5ZUnDQAAAAA8cOIAAAAAgCdQfLshzbkgjVcW2L17N129elXM8mzUqBE98sgj1LRp07KvIQAAAIAKBGkWrl+/TgMGDKD9+/eL4IxXGWjXrh1t2LCBXnrpJbHw+sKFC91XWwAAAID/z+TjUZqmIO3vf/871a5dm9LS0igwMJCmTp1KGRkZImjj1jVOcFunTh16/vnn3VdjAABwiKKy+nRuRrZUlnw5Qyq7cLOqVNa8TgupLPKOY3JZ4nWpLPtCnkMLrKtRu52j+5akM8hDsf1C5VmgfiFymRq9QV6IvTBbfq6B4fIsUL8I+RRszMWi61ooPj4ZVtPEAU6x8cYbb1BERIQI0ngJqLVr14pAjVNvLF68mJYvX+6+2gIAAAD8f4qiOL35XJDGgZl5xQGxs14v8qYVFf2Z66Vr1650/vz5Uu8nPz9fBHaWm8mIXw8AAADguZYuXSoS+/OKS507d6Zff/3V5m0/+ugj6tGjB1WpUkVsffr0sXt7l4O07t2706xZsyg7O5sKCwvp5ZdfFpMGoqOjxfUpKSmiIqWJj48Xy0lZbpcS12iqOAAAAFRuJpPzm1br168XY+9nz55NBw8epDZt2lC/fv3EeH01O3fupBEjRtCOHTvECk1169alvn370uXLl90TpL311lv0+++/U1RUFIWGhtLq1autujdPnDhRvAC7PXFxcWKNT8sttskoLVUBAACASk4px+7ORYsW0fjx42ncuHHUsmVLWrFiBYWEhNDKlStVb79mzRqaNGkStW3blpo3b07//Oc/xYTLbdu2uWfiALeaHTlyhPbs2SO6LO+55x6KiYkpvt6RAM3cbcqbJb1BXoIDAAAAwBZXFhzgOIa30uITVlBQQAcOHBCNTJZDvrgLk1vJHJGTkyN6Ic29j27Jk3bhwgW6dOkSdenSRQRoJ0+epCVLlogn+thjj2HtTgAAD1aQJ888vHlNXmvzj4tRUtm5avLszlZN75DKqjQ4K5WlX8x063qe+ZnyuOacm/KMSr2f3IGkGOW+r4DIAodmchrzCxyaVatG7yevN6rz15XZTNbKQHEhSuOhV3PmzLEq467MV199VbrtjRs3xBj8GjVqWJXzZY6DHMGpyjhDBgd2bgnSEhISaODAgRQWFiYiwo0bN9Lo0aNFvyw34XFf648//ohADQAAANxOcSF+5VYxHmNmSa0VrSxwNox169aJcWo86cAtY9Jee+01mjZtGqWmptKqVato5MiRon92y5Ytoo+Vr+OKAAAAAHiywMBAkVLMcrMVpHHPocFgoGvXrlmV8+WaNWuWOp6fYyNuxLrrrrs01VFTkHbs2LHicWecuDYzM5OGDBlSfP2oUaPEmDUAAAAAdzOZFKc3LQICAqh9+/ZWg/7NkwB4+JctvArT66+/LnoiO3TooPn5aR6TZs6TxgPmuMmO02eYhYeHi5maAAAAAO6mlGNSWu4aHTNmjAi2OnXqJBL4c0oynu3JePgXr7rEY93YggULRNqyzz//XORWS05OFuU8ZIy3Mg/S+EHOnDlDjRs3Fpd5RkO9evWKr09KSqJatWppuUsAAAAAj18WatiwYSIfLAdeHHBxag1uITNPJuAYiBuwzDhFGc8KtexxtDc5weUgbeLEiWJ2g1mrVq2kZaMwaQAAwHOZiqxnRbLMVLkH5NIFeT3Psw3kGZ+N6lmfB1iV5vJst7TzqWW6nqcx1/rsnJOkcl9G+b4KcwqlspxUeT3ToEh57U5DgJ9DMznzM/NVygpKfQ7g+QusT548WWxqeFKAJUdWYCrTIG3ChAl2r583b56r9QEAAABwiLesweksTRMHAAAAAKB8aJ44AAAAAOAJTK4sOeCrLWk87dRWOQ+cAwAAAHA3RXF+87kgLSMjQ+RH48XVeTYDz3CwnEjAsx4aNmzojnoCAAAASMtCObv5XHfnzJkz6fDhw/TZZ5/RrVu36I033qCDBw/Shg0bRKK3yjCIDwDA1+TnyGtopl6V1/M8c+F2XkyzRu3ulMpatZRnd8acvSyVZV2TZ1WmH5fLHKE2U1Jt9mjuZXnmpV+EfCr0D5fXGvUP9ZfK9AZ5rU1jgTyDNj+tsNT1RxnW6fTs2Z0e3ZL2zTff0AcffCByfjz11FO0f/9+0Xo2YMCA4pXkzcluAQAAANzJ11vSNAVpHJDVr1/fai2rrVu3iuWhHnroIbHouiM4oOOuU8vNZJRzyAAAAABUVpqCNF5d4MSJE1ZlvBQULxqam5tLgwcPduh+eMkEXk7KcruUuEZbzQEAAKBSU9CSdlvfvn1p1apVUjmvQfXDDz+ItTwdERcXJ9b4tNxim4zSUhUAAACo5EyK85vPTRyYM2cOXblyRSrnyQLcorZlyxYxkaA0gYGBYrOkN/w58QAAAADAEd7SIlYuQVqVKlXEVhIHXDzrs0WLFtSrV6+yrB8AAFTAep7pN9KksqTz8tqdJ2pHS2WxDdpLZdEd5NmduWnyTM7CHPl2OeflWZqOUJspaVQry5XHROdfk+/PECzPDDUEG6QyU5H8GEqhyaHbgTaKj8/u1BSkTZkyRbWcc6XNnz+fqlatKi4vWrSobGoHAAAAUElXHNAUpC1evJjatGlDUVFRUiTLEwo4yS1ScAAAAACUc5A2b948+vDDD+ntt9+m3r17F5f7+/vT6tWrqWXLlmVQJQAAAIDS+Xp3p6bZndOnT6f169fTxIkTaerUqVRYKGdQBgAAACgPClJwWOvYsSMdOHBAJLbt0KEDHT16FF2cAAAAUO4UHw/SNHV3WuZF++STT2jdunXUp08fq0XWAQAAAMqDyce7O50K0syGDx9O3bt3Fy1rlstFAQCAd8vLkpf5u5Z0Qyo7US1UKqsWIS+6fk8bOaVHrexsh9KBXKHkMkvL4Qq1RdzVyqD8KF7SIlYhQRqLjY0VGwAAAAB4UJAGAAAAUBEUdHfad+7cOUpMTKRatWpRq1atyqZWAAAAAJU8ma2m2Z2TJk2irKws8X9ubi4NGTKEmjRpQv369RNJbjl3mvl6AAAAAHdSfHx2p6Yg7YMPPqCcnD8Hk77++uv0yy+/0NatW0VgtmvXLkpKSqK5c+e6q64AAAAAVt2dzm4+191p+aQ2bdpECxcupPvuu09c7tatm1izc9q0aRQfH1/2NQUAgHKjmEwOLbp+7lSQVBYWVkcqC251j1R2d2d5YXN5TyKdXs7FmRxovQJ65il5NipUzs+pL9GczNacuDY5OZnuuusuq+u4y/PixYtlVzsAAACASkrzxIGZM2dSSEgI6fV6unLlCt155+18OKmpqWKR9dLk5+eLzZLJWEB6Q4DW6gAAAEAlZfKSsWXl0pLWs2dPOnXqFB06dEgspn7hwgWr6zdv3mwVtNnC3aGRkZFW26XENdprDwAAAJWWgjFpt+3cuVO1nJ8sd4OOHDmSxo4dW+r9xMXF0ZQpU6zKHhz+i5aqAAAAQCWn+HhLWpkksw0MDKTDhw9TixYtHL49b5bQ1QkAAABaKAjSbivZ+mXGC6zPnz+fqlatKi7zLE8AAPAthbnyepkpF69LZcf95VOLwVBTvsMWPaWidl0MUlkdP3+pLCD0qNXl5NCr0m3SE+W1QYsyiuR6gNcyKb49u1NTkLZ48WIxgzMqKkrq7jxx4oSYNGCe/QkAAAAA5RSkzZs3jz788EN6++23xeoCZv7+/rR69WoxmQAAAACgPCg+3t2paXbn9OnTaf369TRx4kSaOnUqFRYWuq9mAAAAAHZgWagSOnbsSAcOHKCUlBTq0KEDHT16FF2cAAAAUO4UpOCQhYWF0SeffELr1q2jPn36iIkDAAAAAOXJ5OPLQrmUgmP48OHUvXt30bJWv379sqsVAAB4hbwseQZl8tkrUpleZf1NohpSidK8u1TW5h45RVONkGCry35Bh6Xb+AVdksrSTmZKZQU3MXTHWyle0m1ZYXnSYmNjxQYAAAAAHpbMFgAAAKC8KciTBgAAAOB5FHR3WuPln3gM2r333kuNGjWiY8eO0dKlS8XgvcGDB1O/fv3cU1MAAAAACwjSLGzYsIGGDh0qVhzIz8+njRs30qOPPipScRgMBnr44Yfp008/FQutAwAAALiTCd2dt82dO5fmzJlDM2bMEOk3OEDj9TxnzpwprueVCN58800EaQAAlZjajM+rajM+DXKqToO+mny7Zh2ksrtaWad+qpKTK92mIEsuy88okMqMuXIaKWOub5/8fYXi4y1pmpLZnjp1ikaNGiX+HzZsGGVnZ9OgQYOKr+fuzsTExLKvJQAAAEAlo6klLTw8nFJTU6lBgwZ069YtKioqEpfN+H9OdFsa7irlzZLJWEB6g5wLBwAAAECN4uPJbDW1pPHqAs8++yytWbOGxowZQ3379qW4uDg6efKkaGWbNm2aSG5bmvj4eIqMjLTaLiWuceV5AAAAQCWjYO3O29566y2KiIigCRMmUEFBgVhsnScNtGzZUmxXrlyh+fPnl3o/HNilp6dbbbFN/uxGBQAAAHA0T5qzm891d9aoUYN+/PFHq7L33nuPXnzxRcrJyaHmzZuTn1/pdxkYGCg2S+jqBAAAAC1MXtIiVqHJbDk44/xpjgRoAABQ+eRmZEllV89dlcr8/A1SWXBwjFQW3uQuq8vNml+XbhNxOVkqu3XxllSWc8V6jDTD7E7wBJqiKk63ocZoNIpuzqpVq4rLixYtKpvaAQAAAFTSiQOagrTFixdTmzZtRDJbS4qi0IkTJyg0NJR0Ol1Z1xEAAABA4i0TAMpl4sC8efPEIH9OXrtjx47ijVcbWL16tfh/+/bt7qstAAAAQAVNHOBlMDkNWVBQEHXu3Jl+/fVXu7f/8ssvxZAwvn3r1q1p8+bN7gvSpk+fLmZ0Tpw4kaZOnUqFhYWaHgwAAADAG1NwrF+/Xgz7mj17Nh08eFD0LPJ65devy+Mh2d69e2nEiBH05JNP0qFDh0Tyf96OHj3qniCNdezYUSywnpKSItJv8IOhixMAAAAqYkya4uSmFY+3Hz9+PI0bN06kHVuxYgWFhITQypUrVW+/ZMkSevDBB0UO2RYtWtDrr79Od999N73//vvuC9IYryrwySefiHxnnOCWJw4AAAAAeIv8/HzKyMiw2kquhmTGuWG5gYpjHjO9Xi8u//zzz6r7cLnl7Rm3vNm6vSrFRRcvXlS++eYbJSsry9W7UvLy8pTZs2eLv9jXPft6W32xb/ns6231xb6e/ZjYt3z29bb6eprZs2dzn6fVxmVqLl++LK7fu3evVfm0adOUTp06qe7j7++vfP7551ZlS5cuVapXr+5wHV0O0spSenq6eBH4L/Z1z77eVl/sWz77elt9sa9nPyb2LZ99va2+niYvL088B8vNVuBZUUEass8CAABApROosvqRLTExMSKTxbVr16zK+XLNmjVV9+FyLbcvszFpAAAAAJVFQEAAtW/fnrZt21ZcZjKZxOUuXbqo7sPllrdnW7ZssXl7NWhJAwAAACgFp98YM2aMyGzRqVMnkeA/OztbzPZko0ePpjp16lB8fLy4/Pzzz1OvXr3o7bffpocffpjWrVtH+/fvpw8//JC8MkjjZkfOP+Jo8yP21b6vt9UX+5bPvt5WX+zr2Y+JfctnX2+rr7cbNmyYSD82a9YsSk5OprZt21JCQgLVqFFDXJ+UlCRmfJp17dqVPv/8c3rllVfo5ZdfpqZNm9I333xDrVq1cvgxdTwwzS3PBgAAAACchjFpAAAAAB4IQRoAAACAB0KQBgAAAOCBEKQBAAAAeCAEaQAAAAAeqEJTcNy4cUOsHs+LjfJ0VsaZeHna6tixY6latWrkaa5evUrLly+n3bt3i/95um2jRo1o0KBBos6ckRgAAADAVRWWguO3334Tq8GHhISIVeLNeUZ4yQTO0JuTk0M//PCDSBqn5sSJE7Rv3z6Rubd58+Z08uRJWrJkiVjB/rHHHqPevXur7nfw4EGqUqUKNWzYUFz+7LPPaMWKFSK/Sf369Wny5Mk0fPhw1X05CR3XtUmTJhQcHCyCy5EjR1JBQYGoa8uWLUXOlPDwcPJ1aWlptGnTJpG8z1H8nqxatUq8zrZ8/fXX1L9/f/G5gNJxxmvLvDyW5ZcuXaJ69eo5dD/nzp2jxMREqlWrVqk5fA4fPkwHDhyge++9V/xAOXbsGC1dulQ85uDBg8X32pfw95tzG6n9mBw4cKDIRG5LamoqHTlyhNq0aUPR0dHih+nHH38sjlOPPvootWjRQnU/fu+CgoLEUjTsp59+sjpOPfvsszazlufm5tLatWtVf0jef//9VJnw+eSDDz4Qea0cxa8VH885p5UtnJx0yJAhdo9lvvJ5YvhMVSClgnTu3Fl5+umnFZPJJF3HZXzdPffco7rv999/rwQEBCjR0dFKUFCQuFytWjWlT58+Su/evRWDwaBs27ZNdd+77rpL2bJli/j/o48+UoKDg5W///3vyvLly5UXXnhBCQsLUz7++GPVfbt166a8+uqrxZc/++wz8TzYzZs3lbZt24r7sufixYtKZmamVF5QUKD897//VRzBr8/27duVDz/8UNm0aZPY1x6+zcyZM5Xdu3eLy/za9O/fX+nXr5/ywQcfKM74/fffFb1er3rdv//9b9WN35f333+/+LIanU6nREREKOPHj1f27dvnVN1++eUXZfHixcr06dPFxv9zmSOv69mzZ5XCwkJxOT8/X1m3bp3yySefKCkpKTb34wV5Ld+DxMRE5eWXX1Yee+wxZcaMGeI+S8PvyZw5c5QJEyYokyZNUt566y3l9OnTNm/PCwE/+uij4vPPi/Xy+1tUVFR8fXJyss33Z+LEicWfwZycHOVvf/ubuC2/9vz3vvvuU/2Msq+//lq8j1WrVhXfFf4uRUVFie8ef574ujVr1ijO4O8Qv9ZacF3Pnz9v9zZfffWVkp2d7VSdzpw5ozRq1Ei8zr169VKGDh0qNv6fy5o0aSJuo4Y/c5GRkeJ1rVKlirJ//36lYcOGStOmTZXGjRuLY8+BAwdU9+UFm/l7y7755hvxvjzyyCPKSy+9pAwePFgs3Gy+vmR969evLz4TdevWFY/98MMPi+MUvzf8mTF/vm0xGo02yy9cuKA4ij/3P/74o/K///3PoeMJH3f/+OMPcfno0aPic/rMM88oCQkJDj+m2v3a+h4sWbJEdePXKS4urviyGn5d+Xb8uedjBB8rfPHzVFafKXBehQVp/IE8ceKEzev5Or6Nmi5duoiTH1u7dq34wPJJ0YxPzA888IDqvvxBNh/U27VrJwIdS3yCadmypc19zQcR80GLP9x8QmR8QKpdu7bqvleuXFE6duwovhz8wX788cetToT2TqocUN26dUv8n5qaKr4c/EXhwJT3ad68uXL9+nXVfVesWKH4+fkp7du3F8EPB5bh4eHKU089JQ6A/Jw4iFELAuxtP/30k836mk/2/NfWZm/f1157Tbw3/P+dd96pvPPOO8qNGzeU0ly7dk3p3r272I8PKnxg4o3/5zK+jm+j5uTJk+J2XC8+UPIJhl+z0NBQJSQkRImJibEZNPEB9ssvvxT/cyAcGBgofgwMGzZMPA/ef+/evTbrzHXkx+X3if/y49asWVN8TqZNm6a6H/8YuOOOO8Tj8o8NrjsfOM0nC/488XNWw49hfh34ZBQbGyuCfg5kuP58wOfvkJq7775beeONN4q/exyg8ftlxsEl/1jxlcCfT8IDBw4Un/mSuIyv69u3r819+XuWkZGhvPnmm+J15stm48aNUwYNGqS6L3/uzME9f9/nz59vdf17770nPltqxwr+Xpt//PJ+XMb489ugQQNl9uzZPhP4Hz582O62fv16u8cafk/4NbHcuLxOnTrifw6CbO27atUq8f7zOYDr/vzzz5cakHrb58nVzxR4cZDGb6y9X818HZ941PAB1/xrgwMlPrkdPHiw+Hr+otSoUUN1X/4y8S8QxgciPjFY4lYQDlzUcH3MrVHmwIu/rHxQYufOnbMZWI4ePVp8OX777TdxEOITcYcOHUTrQWknVS43n1T5YMhBpPkLxy1zfF/cAqOGb2sORPlEzPVbunRp8fV8oGnRooXqY/LBzdZmL9B68MEHRcBQMiDi9+nYsWOq+6g9V36f+PnyAZsDHz6BcCBsC58YOIDngKskLuvatasyZMgQ1X354Mi/LI8cOSJaVPk14TJuIeOWsgEDBoiWMVufR3MAxwHbiy++aHX9K6+8Ilph1XAgxwdWPkDz40yePFl8Vsyta/x5VQui69Wrp+zYsaP4Mrf0cbDHB3i+H3snVMvXuFWrVsrnn39udT0HOxwA2jrY8+ec8UGbT1D8mpnxjxg+0arxxsCfjwX2Trz83G0dL/jH4/Hjx8X//Dni+lm26HKrBwcDarjFhIMM83HK/L/lcYqD/5K4zPLHBAft/B6Znyu3ovCx11cCf3ufi9KOUxx48P2a3yNnj1P8d8GCBeLHMj8W/xjnYy4HU97+eXL1MwVeHKTxr18+8fKBgU8K/AuXN/6fy/iDahlMlDwp8ofKjE8Kli1c3FJmK1jiE+2TTz4p/ueTPp9ALc2bN09p3bq16r78S4lPaty9ygcg/nV47733Fl/PzfJ8MFLDLWyWXyjziZ8PEtw65uhJtVmzZlKLwdatW23+4uPX0bKLgr9clgcJPuGqfTn5NeYDz86dO1U3Pojbqi9btGiRaBq3bELXevAzy83NVT799FPxWvNj2jog8OfAMlgviYM+WwEEt0oeOnRI/J+VlSXqwUGD2Z49e0RgZCtwMbcK848DtcDf1uPy68xdO2b82PwemX9pc8snv+dq72vJblQ+KXCQyl3+fJ29z5O55ZVbCC0f3/z9sXWi4BY+848c/oHB92UZLP7666/iNrYe19sC/1q1atnsBmLffvutuE1pAa3acYq/l7aOU/yDwRzUcGtSyW43/v5xN5faccayyystLU08f3PAwJ8Lft5qvDHw5x8x3E3Kn1m17bvvvrN7nNqwYYM4TnFLkqvHKbZr1y5lzJgx4jnx5u2fJ1c/U+DFQRrjvnxuXeIvhfnXD//PZdxMbQt3JXGgZMZBh2WfOH9RbAUtly9fFif5nj17KlOmTBEnI+4G464QLuOxbvzFVsPN9Tx+wFxfbpmxPFH+8MMPyhdffGHzC1ayu4zrzK0o/Hz4oOTISZV/BamdVG19SfgXLb8e5ufO92X5/Djg4tuUxAERB2m2cCBi61e1GQc93JLH4wv5F7UjBz/LX+RquAXVsmu75AGbn48tfALi2zgSzPIB0PKHQFJSks3XmIOihQsXiv/5M1GyhZjHRNkK8Dg4tHxNuFWWXwMO3BkfhNUelwM3tc8pf0Y5UGvTpo3dzxO3InCLH3+eSgYpfEDm4M3Wjxz+fv7rX/8SPzL4gM9jRzlI5dZKbkm01VrpjYE/d/lxCwY/Nrc+cKDCG//PZTwu1lZXD7esWI6N/c9//lPc6s74R6nad49xiwl/VrlV9fXXXxefR37t586dK8r4M8Gt4CVxgMDvAb8ffGwyd7mb8evMr6Eabwz8OYDk18eV49SlS5fE8+MfAlevXi2T4xT/yCo5lMYbP0+ufqbAy4M0M2665a5D3kobBM94kD9/QG3h5nZza5ka/iXAAyY5gOBfHhyYcdP+yJEjRXdkafgAb2t8hS3cOscn65LMgRqfxO0dAB966CExwJO/4CV/ifGX01b37rPPPit+IXF3Av8q5i8cf9k5yOWWP67XE088Ie3HBxhbg2YZH1gsJ1HYwgcRDgi4Djy2xNlfqI7gAff8PvKvY8sxH/w/l/GJmLsT1XALqGXL2bJly6y6KzhwsXWi4PFm3J3AB1f+Rc4nKW6h5XE0s2bNEq02tgJefk+5m5Zb0Pizz12tPCbO8r1Ve9znnnvOZjDE9eZAytbniQ+4HJyYNw6OLPFBnG9j633n8Z58kOcAjcdK8mtqbgXj99kyuPX2wN88BodbNyxbAvl/LrP3fPj7wd13tvBj/vWvf7V5Pb+Ow4cPF2NIzT9iuZWJfwhs3LhRdR9+nhw0m+vK3wfL1mXuynz33XdV9/XGwJ+/19zabAsHfatXr1ZKwy143ItiHgvqzuNURX6eOMDS8nkq+Zkyj/e1bFmz95kCHwnSKoP/+7//szkglAM1bo62dYIaO3as1VaylZEHl/OBTQ2f/LmVkLsg+MTG4wl40CkHpvx4fOJ09mCjBXd3cABS2mPxL261Gb+O4C4ZHpvHz40PfByA88b/cxl3c/Ft1PAJpmSwYik+Pl4EyrZwoGZ5IDNvPEZEbUyZGbeUcYDIwQYfMDmgM88+ZvzrVm0sD598SrZWMPNrx4GavVZFNeZ9uU481lEL3qdki7YvBP6WuBWB32feHJmxWxoOMm19Hku+L/zaOPojlnGrfWnvR0kcbHtb4F/WuEWPv6/mscLu5E2fJ8vPlOVkEnC/CsuTVtkUFRWJ3G8RERE2r798+bJTeXeys7NFEl3Og+OovLw8Kiws9MmcbhkZGSKPl2UOovbt29t87R3NI8avL+cRsyclJYXOnj0rcobxbRs0aFDqffPnYs+ePSLX0T333FOcy8gZnGOJ85jZypfkifs6i/P0bd++neLi4qh69eo2b3fhwgWRL06n05Vb3bw19+GVK1fozjvvVL0+MzNT5Jns1auX5vvm7wR/RmJjYzXtw98NzoPp51ehedc9JiG6N+4LrsGyUOWEDzL2ggT+4M+ZM8ep+7558yZNmjRJ0z4ccHCAdvHiRXriiSdsJjDkL+Xx48dVg7xPP/3U5v1X1L6c5JgT4nKANGLECGrXrh198cUX9MILL4gTuj28Lyfb5cTIjP9OnDhRvD4cpNkL0Mz78nvRuXNnkTB5wYIFYt/SHpeDCE42yUmSOUCzfFxb+06ZMkV1MxqNNH/+/OLLnrSv2o8Lfs1mzJhB77//vkjU6SjelxN5ctJjfn/t7cs/fCwDNC2Py0EJv/dmnPy6W7duVLduXerevTutW7fOo/Z15TE54St/fm3h44WtAO25554TCVJt4RO6rQDN1r68DydWLi1A4/eQk2qbnxs/Z04szsHdyy+/LH4Ae8q+nBCdf8hs3rxZ/Eg+c+aM+AEZGhpKU6dOpZ49e4pgWI037gtloBxa68DFHFEVse+pU6eK84vx9TypgpvHzezN9KqofV1Jcuxt+/LrwzODLbuYeONyTgHA//PsYzUVtS+nNTFPiOCJGDxGkMfy8X78/Hksk61un5L78mfE2X21PK4rya8rYl9XHtOye5HHTfEgekdV1L7cjcpjrHhcJ48n4/15gDyPweUxZvxd4nGhnrKvKwnRvXFfcB2CtHJiKxmneeO8TVoTebpzX57MwCkPeBo+D6zm/3nGrHkGpL1gqaL2dSXJsbfty2Pk+HUpGcA5MpC+ova1HBs2atQoMWDZnKSZB6dzYDpixAiP2tfV5Nflva8rj8mvE6fz4VRDPMifx0jyWFmeqGRrFYKK3pfHc3IyXPMPTv5RwxMQLCcWWE7Eqeh9XUmI7o37gusQpJUTV5Nxlve+3LpgmauIB5vyoHyehcpfWHvBUkXt60qSY2/cl1MTcO6pf/zjH8UDgB0JlipqX8tgiZfGKTkDkHPR2ZrKX1H7upL8uiL2deUxLV8nfl95gpI54z+fhPnHg60liypqX7U8kJYTajhgtZWktSL2dSUhujfuC67DmLRywuOZNmzYIAaUq208lsST9uVxYZZjQXhMDw8cHTBggBiXcvr0aZuPWVH7mm/PeGArj7uLjIy0GlOTnp7uM/t27NhRTJDgyQodOnSgo0ePOjw4vqL2Nd+OxxaWHONXp04dcZ+etG///v3F54/x5++rr76yup7Hw/FYQk/Z15XHtOTv709Dhw6lhIQEMYB//PjxtGbNGmrWrJlH7cuTgsxjV3msFI+NtBzLeuzYMZuTSipiXx5oP2HCBPH8duzYQaNGjRLvU3BwsLj+1KlT4vOoxhv3hTJQBoEeOIDz/3AiQ2dyRFXEvjxeh5N92sq9xqkibLVoVdS+riQ59sZ9LXFXKbe48WvjSGtYRezLnzPOy8fdcDw2qmTewP/+9782l7WpqH1dSX5dEfu68pilpSrhVm1bqzNU1L6cj5DHf/Ealvw94eEB3OrOY/B43WJuIS25TFtF7utKQnRv3BdchyCtnPDJ1vJkrJbPzFZeq4rYlwe/mhfRVcM5x2wFhhW1rytJjr1x35I4txmvo8fvqVblsS8PPrbcOJmypalTp4rkrZ60r6vJrytiX2f34+DOkfVMPWlfHiLAWfP/8pe/iGMHB3T8w4GDJO765byStj6XFbWvswnRvXlfcB7ypAEAAAB4IIxJAwAAAPBACNIAAAAAPBCCNAAAAAAPhCANAAAAwAMhSAMAAADwQAjSAAAAADwQgjQAAAAA8jz/Dyly+8ecEBSPAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "fig = problem.render(design)\n", + "plt.title(f\"Design (sample {sample_idx})\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 8 — Break constraints deliberately\n", + "\n", + "### The EngiBench constraint API\n", + "\n", + "Every EngiBench problem declares **design constraints** — rules a design must satisfy\n", + "to be physically valid. The `@constraint` decorator wraps a function that `assert`s\n", + "what must be true; `check_constraints()` catches failures and returns a `Violations`\n", + "object.\n", + "\n", + "Each constraint is tagged with a **category** that tells you *why* it exists:\n", + "\n", + "| Category | Import | Meaning |\n", + "|---|---|---|\n", + "| **`THEORY`** | `from engibench.constraint import THEORY` | The constraint comes from **physics**. Values outside the domain are unphysical (e.g. negative volume fraction) but may not crash the solver. |\n", + "| **`IMPL`** | `from engibench.constraint import IMPL` | The constraint guards the **implementation**. Violating it causes runtime errors or undefined behavior in the solver (e.g. mesh resolution too small). |\n", + "\n", + "Constraints also have a **criticality** level:\n", + "- `Criticality.Error` — hard violation, design is infeasible\n", + "- `Criticality.Warning` — soft violation, solver may still run but results are suspect\n", + "\n", + "The `Violations` object returned by `check_constraints()` supports filtering:\n", + "```python\n", + "violations.by_category(THEORY) # only physics violations\n", + "violations.by_category(IMPL) # only implementation violations\n", + "violations.by_criticality(Criticality.Warning) # only warnings\n", + "```\n", + "\n", + "For example, in `beams2d` the volume fraction has a `THEORY` constraint (physically,\n", + "volfrac must be in [0, 1]) **and** a stricter `IMPL` warning (the solver works best\n", + "with volfrac in [0.1, 0.9]).\n", + "\n", + "---\n", + "\n", + "### Exercise: force a constraint violation\n", + "\n", + "A design is only valid if it **satisfies all constraints for its operating conditions**. Let's see what happens when we lie about the conditions.\n", + "\n", + "**Your task:** copy the valid `config`, change one **scalar** condition to an extreme value, and call `problem.check_constraints(design=design, config=bad_config)`.\n", + "\n", + "The function returns a `Violations` object — `len(violations) == 0` means no violations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 00-C\n", + "# Goal: force a constraint violation and inspect the result.\n", + "\n", + "# Find a scalar condition to perturb\n", + "scalar_keys = [k for k in problem.conditions_keys if np.asarray(config[k]).ndim == 0]\n", + "perturb_key = scalar_keys[0]\n", + "original_val = float(config[perturb_key])\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "bad_config = dict(config)\n", + "bad_config[perturb_key] = original_val * 10 # push it far outside the valid range\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"Perturbing '{perturb_key}': {original_val} → {bad_config[perturb_key]}\")\n", + "violations = problem.check_constraints(design=design, config=bad_config)\n", + "\n", + "print(f\"\\n--- All constraint checks for {PROBLEM_ID} ({violations.n_constraints} total) ---\")\n", + "print(f\"Violations triggered: {len(violations)}\\n\")\n", + "if violations:\n", + " print(violations)\n", + "else:\n", + " print(\"No violations. Try a more extreme value.\")\n", + "\n", + "# CHECKPOINT\n", + "assert hasattr(violations, \"__len__\"), \"violations should be a Violations object\"\n", + "print(\"\\n✅ Checkpoint passed — constraint checking explored.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 9 — Simulate and optimise\n", + "\n", + "Beyond constraint checking, EngiBench problems expose **`simulate()`** and **`optimize()`** methods — the same solvers used to generate the dataset.\n", + "\n", + "| Method | What it does | Returns |\n", + "|---|---|---|\n", + "| `problem.simulate(design, config)` | Evaluate objective(s) for a given design | `np.ndarray` of objective values |\n", + "| `problem.optimize(starting_point, config)` | Run the full optimiser from a starting design | `(optimised_design, optimisation_history)` |\n", + "\n", + "> **Colab note:** some problems (e.g. `heatconduction2d`, `heatconduction3d`, `airfoil`) require a **Docker container** for their solver and will not run on Colab. Problems like `beams2d` and `thermoelastic2d` use pure-Python solvers and work everywhere." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simulate: evaluate the objective for an existing design\n", + "obj_values = problem.simulate(design, config)\n", + "print(f\"Objective values for sample {sample_idx}: {obj_values}\")\n", + "print(f\"Objectives defined: {problem.objectives}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Optimize: run the solver from a uniform starting point\n", + "starting_point = np.full(problem.design_space.shape, float(config[\"volfrac\"]))\n", + "optimised_design, history = problem.optimize(starting_point, config)\n", + "\n", + "print(f\"Optimisation ran for {len(history)} steps\")\n", + "print(f\"Final objective: {history[-1].obj_values}\")\n", + "\n", + "# Compare: generated design vs. dataset design\n", + "fig, axes = plt.subplots(1, 2, figsize=(10, 4))\n", + "axes[0].imshow(design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[0].set_title(f\"Dataset design (obj={obj_values[0]:.4f})\")\n", + "axes[0].axis(\"off\")\n", + "axes[1].imshow(optimised_design, cmap=\"gray_r\", vmin=0, vmax=1)\n", + "axes[1].set_title(f\"Re-optimised (obj={history[-1].obj_values[0]:.4f})\")\n", + "axes[1].axis(\"off\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection\n", + "\n", + "Before moving on, think about:\n", + "\n", + "1. **The API contract** — what is *fixed* by the benchmark (design space, conditions, objectives) vs. what is *yours* to choose (model, hyperparameters, training strategy)?\n", + "2. **Constraints as a test** — why is it important that `check_constraints` exists as a separate function, rather than just training on feasible data?\n", + "3. **Simulate vs. optimise** — `simulate` is cheap (one forward pass), `optimize` is expensive (iterative solver). How might you use each when evaluating a generative model?\n", + "4. **What surprised you?** — anything about the design shapes, condition ranges, or dataset size that you did not expect?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Next\n", + "\n", + "Proceed to **Notebook 01** where you will train a generative model on this exact benchmark and produce new designs. The API you just learned carries over unchanged." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/solutions/01_train_generate.ipynb b/workshops/dcc26/solutions/01_train_generate.ipynb new file mode 100644 index 00000000..57a9a9d5 --- /dev/null +++ b/workshops/dcc26/solutions/01_train_generate.ipynb @@ -0,0 +1,1478 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 01: Train a Generative Model for Inverse Design\n\n**Can we learn to skip the optimizer?**\n\nIn Notebook 00 you saw that EngiBench bundles an optimizer with every problem.\nRunning that optimizer produces an optimal design — but it takes time. For\nBeams2D it runs in seconds, but for complex 3D problems it can take minutes or\nhours *per design*.\n\nGenerative AI offers a different approach: **train a neural network once on a\ndataset of optimal designs, then generate new designs instantly.** The trade-off\nis quality for speed — and the central question of this workshop is *how do we\nmeasure that trade-off rigorously?*\n\n### What you will do\n\n| Step | What happens | Key concept |\n|------|-------------|-------------|\n| **Prepare data** | Extract conditions and designs from EngiBench | The standardised data API |\n| **Train a model** | Fit a neural network to map conditions → designs | Supervised learning on design data |\n| **Generate designs** | Produce new designs for unseen conditions | Instant inference vs. slow optimization |\n| **Inspect results** | Compare generated vs. ground-truth designs visually | Setting up evaluation (Notebook 02) |\n\n> **Heads up:** We deliberately train a simple model with limited data and few\n> epochs. The results will be imperfect — **that is the point.** Understanding\n> *why* they are imperfect motivates the rigorous benchmarking we explore in\n> Notebook 02 and the discussion session.\n\n---\n\n### Exercise legend\n| Marker | Meaning |\n|---|---|\n| `FILL-IN CELL` | Your turn — edit the code between `START FILL` / `END FILL` |\n| `CHECKPOINT` | Automated check — if it fails, fix before moving on |" + ], + "id": "cell-0" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **Colab users:** click **File > Save a copy in Drive** before editing so your changes persist." + ], + "id": "cell-1" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependencies" + ], + "id": "cell-2" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab / local dependency bootstrap\nimport subprocess, sys\n\nIN_COLAB = \"google.colab\" in sys.modules\nFORCE_INSTALL = False # Set True to force install outside Colab\n\nif IN_COLAB or FORCE_INSTALL:\n def _pip(pkgs): subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n _pip([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n _pip([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n try:\n import torch\n except Exception:\n _pip([\"torch\", \"torchvision\"])\n print(\"Install complete.\")\nelse:\n print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ], + "id": "cell-3" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## The inverse design problem\n\nTraditional topology optimization works like this:\n\n```\nConditions (volfrac, loads, …) ──► [ Optimizer (iterative) ] ──► Optimal design\n ⏱ seconds to hours\n```\n\nA **learned generator** replaces the optimizer with a neural network:\n\n```\nConditions ─┐\n ├──► [ Neural network ] ──► Approximate design\nRandom noise ─┘ ⏱ milliseconds\n```\n\nThe noise input lets the model produce **diverse** designs for the same\nconditions — useful for exploring the design space. But the designs are only\n*approximate*: the network has to generalise from training examples rather than\nsolving the physics directly.\n\n**Key question:** How close can a learned generator get to the optimizer? That\nis what benchmarking measures." + ], + "id": "cell-4" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Imports" + ], + "id": "cell-5" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "j", + "s", + "o", + "n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "r", + "a", + "n", + "d", + "o", + "m", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "y", + "s", + ",", + " ", + "o", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "p", + "a", + "t", + "h", + "l", + "i", + "b", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "P", + "a", + "t", + "h", + "\n", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "m", + "a", + "t", + "p", + "l", + "o", + "t", + "l", + "i", + "b", + ".", + "p", + "y", + "p", + "l", + "o", + "t", + " ", + "a", + "s", + " ", + "p", + "l", + "t", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "u", + "m", + "p", + "y", + " ", + "a", + "s", + " ", + "n", + "p", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "t", + "o", + "r", + "c", + "h", + " ", + "a", + "s", + " ", + "t", + "h", + "\n", + "\n", + "#", + " ", + "W", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + " ", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "(", + "v", + "i", + "s", + "u", + "a", + "l", + "i", + "z", + "a", + "t", + "i", + "o", + "n", + " ", + "+", + " ", + "t", + "r", + "a", + "i", + "n", + "i", + "n", + "g", + " ", + "u", + "t", + "i", + "l", + "i", + "t", + "i", + "e", + "s", + ")", + "\n", + "i", + "f", + " ", + "\"", + "g", + "o", + "o", + "g", + "l", + "e", + ".", + "c", + "o", + "l", + "a", + "b", + "\"", + " ", + "i", + "n", + " ", + "s", + "y", + "s", + ".", + "m", + "o", + "d", + "u", + "l", + "e", + "s", + ":", + "\n", + " ", + " ", + " ", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "\"", + "/", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "_", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "o", + "s", + ".", + "m", + "a", + "k", + "e", + "d", + "i", + "r", + "s", + "(", + "_", + "u", + "t", + "i", + "l", + "s", + ",", + " ", + "e", + "x", + "i", + "s", + "t", + "_", + "o", + "k", + "=", + "T", + "r", + "u", + "e", + ")", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + " ", + "=", + " ", + "\"", + "c", + "o", + "d", + "e", + "x", + "/", + "d", + "c", + "c", + "2", + "6", + "-", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "-", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "_", + "b", + "a", + "s", + "e", + " ", + "=", + " ", + "f", + "\"", + "h", + "t", + "t", + "p", + "s", + ":", + "/", + "/", + "r", + "a", + "w", + ".", + "g", + "i", + "t", + "h", + "u", + "b", + "u", + "s", + "e", + "r", + "c", + "o", + "n", + "t", + "e", + "n", + "t", + ".", + "c", + "o", + "m", + "/", + "I", + "D", + "E", + "A", + "L", + "L", + "a", + "b", + "/", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "/", + "{", + "_", + "b", + "r", + "a", + "n", + "c", + "h", + "}", + "/", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + " ", + " ", + " ", + " ", + "f", + "o", + "r", + " ", + "_", + "f", + " ", + "i", + "n", + " ", + "(", + "\"", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ".", + "p", + "y", + "\"", + ",", + " ", + "\"", + "_", + "_", + "i", + "n", + "i", + "t", + "_", + "_", + ".", + "p", + "y", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "i", + "f", + " ", + "n", + "o", + "t", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "e", + "x", + "i", + "s", + "t", + "s", + "(", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ")", + ":", + "\n", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + " ", + "s", + "u", + "b", + "p", + "r", + "o", + "c", + "e", + "s", + "s", + ".", + "c", + "h", + "e", + "c", + "k", + "_", + "c", + "a", + "l", + "l", + "(", + "[", + "\"", + "w", + "g", + "e", + "t", + "\"", + ",", + " ", + "\"", + "-", + "q", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "b", + "a", + "s", + "e", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + ",", + " ", + "\"", + "-", + "O", + "\"", + ",", + " ", + "f", + "\"", + "{", + "_", + "u", + "t", + "i", + "l", + "s", + "}", + "/", + "{", + "_", + "f", + "}", + "\"", + "]", + ")", + "\n", + "e", + "l", + "s", + "e", + ":", + "\n", + " ", + " ", + " ", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + " ", + "=", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "a", + "b", + "s", + "p", + "a", + "t", + "h", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "i", + "f", + " ", + "o", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "s", + "d", + "i", + "r", + "(", + "\"", + ".", + ".", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + ")", + " ", + "e", + "l", + "s", + "e", + " ", + "\"", + "w", + "o", + "r", + "k", + "s", + "h", + "o", + "p", + "s", + "/", + "d", + "c", + "c", + "2", + "6", + "/", + "u", + "t", + "i", + "l", + "s", + "\"", + "\n", + "s", + "y", + "s", + ".", + "p", + "a", + "t", + "h", + ".", + "i", + "n", + "s", + "e", + "r", + "t", + "(", + "0", + ",", + " ", + "_", + "u", + "t", + "i", + "l", + "s", + ")", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "E", + "4", + "0", + "2", + "\n", + "i", + "m", + "p", + "o", + "r", + "t", + "l", + "i", + "b", + ".", + "r", + "e", + "l", + "o", + "a", + "d", + "(", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + ")", + " ", + " ", + "#", + " ", + "a", + "l", + "w", + "a", + "y", + "s", + " ", + "p", + "i", + "c", + "k", + " ", + "u", + "p", + " ", + "l", + "a", + "t", + "e", + "s", + "t", + " ", + "e", + "d", + "i", + "t", + "s", + "\n", + "f", + "r", + "o", + "m", + " ", + "n", + "o", + "t", + "e", + "b", + "o", + "o", + "k", + "_", + "h", + "e", + "l", + "p", + "e", + "r", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "*", + " ", + " ", + "#", + " ", + "n", + "o", + "q", + "a", + ":", + " ", + "F", + "4", + "0", + "1", + ",", + "F", + "4", + "0", + "3", + "\n", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "b", + "e", + "n", + "c", + "h", + ".", + "u", + "t", + "i", + "l", + "s", + ".", + "a", + "l", + "l", + "_", + "p", + "r", + "o", + "b", + "l", + "e", + "m", + "s", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "B", + "U", + "I", + "L", + "T", + "I", + "N", + "_", + "P", + "R", + "O", + "B", + "L", + "E", + "M", + "S", + "\n", + "f", + "r", + "o", + "m", + " ", + "e", + "n", + "g", + "i", + "o", + "p", + "t", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + ".", + "c", + "g", + "a", + "n", + "_", + "c", + "n", + "n", + "_", + "2", + "d", + " ", + "i", + "m", + "p", + "o", + "r", + "t", + " ", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r", + " ", + "a", + "s", + " ", + "E", + "n", + "g", + "i", + "O", + "p", + "t", + "C", + "N", + "N", + "G", + "e", + "n", + "e", + "r", + "a", + "t", + "o", + "r" + ], + "id": "cell-6" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Configuration\n\nAll tuneable knobs in one place. **Experiment with these** — especially\n`EPOCHS` and `N_TRAIN` — to see how they affect the generated designs." + ], + "id": "cell-7" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ---------- Reproducibility ----------\n", + "SEED = 7\n", + "\n", + "# ---------- Problem ----------\n", + "PROBLEM_ID = \"beams2d\" # Change to try a different EngiBench problem\n", + "\n", + "# ---------- Training ----------\n", + "EPOCHS = 15 # Short for workshop; try 50+ for better results\n", + "BATCH_SIZE = 64\n", + "LR = 2e-4 # Adam learning rate\n", + "LATENT_DIM = 32 # Size of random noise vector fed to generator\n", + "# ---------- Generation ----------\n", + "N_SAMPLES = 24 # Designs to generate for Notebook 02\n", + "\n", + "# ---------- Device ----------\n", + "if th.cuda.is_available():\n", + " DEVICE = th.device(\"cuda\")\n", + "elif th.backends.mps.is_available():\n", + " DEVICE = th.device(\"mps\")\n", + "else:\n", + " DEVICE = th.device(\"cpu\")\n", + "print(\"Device:\", DEVICE)\n", + "\n", + "if \"google.colab\" in sys.modules and not th.cuda.is_available():\n", + " print(\"\\n⚠️ WARNING: No GPU detected! Training will be very slow (~1 min/epoch).\")\n", + " print(\" Go to: Runtime → Change runtime type → T4 GPU → Save\")\n", + " print(\" Then re-run from the top.\\n\")\n", + "\n", + "# ---------- Artifact paths ----------\n", + "ARTIFACT_DIR = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + "ARTIFACT_DIR.mkdir(parents=True, exist_ok=True)\n", + "\n", + "CKPT_PATH = ARTIFACT_DIR / \"engiopt_cgan2d_generator_supervised.pt\"\n", + "HISTORY_PATH = ARTIFACT_DIR / \"training_history.csv\"\n", + "TRAIN_CURVE_PATH = ARTIFACT_DIR / \"training_curve.png\"\n", + "\n", + "# ---------- Seed everything ----------\n", + "random.seed(SEED)\n", + "np.random.seed(SEED)\n", + "th.manual_seed(SEED)\n", + "if th.cuda.is_available():\n", + " th.cuda.manual_seed_all(SEED)\n", + "\n", + "print(\"Problem: \", PROBLEM_ID)\n", + "print(\"Artifact dir:\", ARTIFACT_DIR)" + ], + "id": "cell-8" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 3. Load the EngiBench problem\n\nSame API you used in Notebook 00 — every problem exposes `.dataset`,\n`.conditions_keys`, and `.design_space`." + ], + "id": "cell-9" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=SEED)\ntrain_ds = problem.dataset[\"train\"]\ntest_ds = problem.dataset[\"test\"]\n\ncondition_keys = problem.conditions_keys\ndesign_shape = problem.design_space.shape\nn_conds = len(condition_keys)\n\nprint(f\"Problem : {type(problem).__name__}\")\nprint(f\"Design shape : {design_shape}\")\nprint(f\"Condition keys : {condition_keys}\")\nprint(f\"Train examples : {len(train_ds)}\")\nprint(f\"Test examples : {len(test_ds)}\")" + ], + "id": "cell-10" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Quick look at a few training designs\nshow_design_gallery(problem.dataset, problem, n=4, seed=SEED)" + ], + "id": "cell-11" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 4. FILL-IN 01-A: Prepare training data\n\nThe EngiBench dataset stores conditions and designs as separate columns.\nTo train a neural network we need to extract them into numeric arrays:\n\n1. **Conditions**: a `(N, n_conds)` array of floats — one row per sample, one column per condition key\n2. **Designs**: a `(N, H, W)` array of pixel values\n\nWe use the **full training set** so the model sees as many examples as possible.\nWe also rescale designs from `[0, 1]` to `[-1, 1]` because the generator uses a\n`tanh` output layer (which naturally outputs that range)." + ], + "id": "cell-12" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-A\n# Goal: extract conditions and designs from the full EngiBench training set.\n\nrng = np.random.default_rng(SEED)\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Stack all condition columns into one (N, n_conds) array\nconds_np = np.stack(\n [np.array(train_ds[k]).astype(np.float32) for k in condition_keys],\n axis=1,\n)\n\n# 2. Extract the optimal designs\ndesigns_np = np.array(train_ds[\"optimal_design\"]).astype(np.float32)\n\n# 3. Rescale designs from [0, 1] to [-1, 1]\ntargets_np = designs_np * 2.0 - 1.0\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nn_train = len(train_ds)\nassert conds_np.shape == (n_train, n_conds), (\n f\"Expected conditions shape ({n_train}, {n_conds}), got {conds_np.shape}\"\n)\nassert targets_np.shape == (n_train, *design_shape), (\n f\"Expected targets shape ({n_train}, {', '.join(map(str, design_shape))}), got {targets_np.shape}\"\n)\nassert targets_np.min() >= -1.0 and targets_np.max() <= 1.0, (\n f\"Targets should be in [-1, 1], got [{targets_np.min():.2f}, {targets_np.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: {n_train} samples, conditions {conds_np.shape}, targets {targets_np.shape}\")" + ], + "id": "cell-13" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 5. The Generator model\n\nWe use a **convolutional conditional generator** (cDCGAN) from EngiOpt. Unlike a\nsimple fully-connected network that treats the design as a flat vector of pixels,\nthis model uses **transposed convolutions** that upsample a small feature map\ninto a full-resolution design image — preserving spatial structure at every step.\n\n```\nnoise (32, 1, 1) ──► ConvT ──┐\n ├─► concat (256, 7, 7)\nconditions (4, 1, 1) ► ConvT ┘ │\n ▼\n ConvT 7×7 → 13×13\n ConvT 13×13 → 25×25\n ConvT 25×25 → 50×50\n ConvT 50×50 → 100×100 → resize → design\n```\n\nThis **convolutional inductive bias** is why CNN generators produce much sharper\ndesigns than MLP generators: each layer reasons about local spatial\nneighbourhoods rather than treating every pixel independently." + ], + "id": "cell-14" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Wrap the CNN generator so it accepts flat (B, dim) inputs\nfrom engiopt.workshops.dcc26.notebook_helpers import WorkshopGenerator\n\ncnn_gen = EngiOptCNNGenerator(\n latent_dim=LATENT_DIM,\n n_conds=n_conds,\n design_shape=design_shape,\n)\nmodel = WorkshopGenerator(cnn_gen).to(DEVICE)\n\nn_params = sum(p.numel() for p in model.parameters())\nprint(f\"Generator created: {n_params:,} parameters\")\nprint(f\"Input: noise ({LATENT_DIM}) + conditions ({n_conds}) = {LATENT_DIM + n_conds}\")\nprint(f\"Output: {' x '.join(map(str, design_shape))} design image\")" + ], + "id": "cell-15" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 6. FILL-IN 01-B: Train the model\n\nTraining is **supervised**: for each sample, the model sees random noise +\nconditions and tries to reproduce the optimal design. The loss measures\npixel-by-pixel error (MSE).\n\nWe provide a `train_supervised_generator()` helper that handles the training\nloop. Your job: **call it with the right arguments and experiment with\nsettings.**\n\n> **Try it:** After training with the default 8 epochs, change `EPOCHS` to 20\n> or 50 in the config cell above, re-run from there, and see how the loss and\n> designs change." + ], + "id": "cell-16" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-B\n", + "# Goal: train the generator. Experiment with EPOCHS and N_TRAIN.\n", + "\n", + "# Pick a few test conditions for snapshot visualization during training\n", + "snap_idx = rng.choice(len(test_ds), size=4, replace=False)\n", + "snap_conds = np.stack(\n", + " [np.array(test_ds[k])[snap_idx].astype(np.float32) for k in condition_keys],\n", + " axis=1,\n", + ")\n", + "snap_baselines = np.array(test_ds[\"optimal_design\"])[snap_idx].astype(np.float32)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "\n", + "# Call the training helper. It returns {\"losses\": [...], \"snapshots\": [...]}.\n", + "train_result = train_supervised_generator(\n", + " model,\n", + " conds_np,\n", + " targets_np,\n", + " TrainingConfig(\n", + " latent_dim=LATENT_DIM,\n", + " epochs=EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LR,\n", + " device=DEVICE,\n", + " snapshot_at_epochs=[1, max(1, EPOCHS // 2), EPOCHS],\n", + " ),\n", + " snapshot_conditions=snap_conds,\n", + ")\n", + "\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "train_losses = train_result[\"losses\"]\n", + "snapshots = train_result[\"snapshots\"]\n", + "\n", + "# Save checkpoint\n", + "th.save(model.state_dict(), CKPT_PATH)\n", + "\n", + "# CHECKPOINT\n", + "assert len(train_losses) == EPOCHS, f\"Expected {EPOCHS} loss values, got {len(train_losses)}\"\n", + "assert train_losses[-1] < train_losses[0], (\n", + " \"Loss did not decrease — check your training arguments.\"\n", + ")\n", + "print(f\"\\nCHECKPOINT passed: trained for {EPOCHS} epochs, final loss {train_losses[-1]:.6f}\")\n" + ], + "id": "cell-17" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training loss curve\n\nThe loss should decrease over epochs. A flat or increasing loss means something\nwent wrong. Note that even a decreasing loss does not guarantee good designs —\nMSE rewards blurry averages." + ], + "id": "cell-18" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Save training history\nimport pandas as pd\npd.DataFrame({\"epoch\": range(1, len(train_losses) + 1), \"loss\": train_losses}).to_csv(\n HISTORY_PATH, index=False,\n)\n\nshow_training_curve(train_losses, save_path=str(TRAIN_CURVE_PATH))" + ], + "id": "cell-19" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### How the generator learns\n\nBelow you can see what the generator produces at different points during\ntraining. Early outputs are random noise; later outputs start to resemble beam\nstructures. The ground-truth row shows what the model is trying to match." + ], + "id": "cell-20" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_training_progression(snapshots, baseline_designs=snap_baselines, n_show=4)" + ], + "id": "cell-21" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 7. FILL-IN 01-C: Generate designs from test conditions\n\nNow for the payoff: use your trained model to produce designs for **conditions\nit has never seen** (from the held-out test set).\n\nIf the model generalises, it should produce reasonable designs for new\nconditions without running the optimizer. The `generate_designs()` helper\nhandles the inference — you just need to:\n\n1. Pick test conditions from the EngiBench dataset\n2. Call the generator\n3. Also extract the ground-truth baselines for comparison" + ], + "id": "cell-22" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# FILL-IN CELL 01-C\n# Goal: generate N_SAMPLES designs conditioned on test-set conditions.\n\n# START FILL ---------------------------------------------------------------\n\n# 1. Sample test indices\ntest_idx = rng.choice(len(test_ds), size=N_SAMPLES, replace=False)\n\n# 2. Extract test conditions and ground-truth baseline designs\ntest_conds_np = np.stack(\n [np.array(test_ds[k])[test_idx].astype(np.float32) for k in condition_keys],\n axis=1,\n)\nbaseline_designs = np.array(test_ds[\"optimal_design\"])[test_idx].astype(np.float32)\n\n# 3. Generate designs using the trained model\ngen_designs = generate_designs(\n model, test_conds_np, latent_dim=LATENT_DIM, device=DEVICE,\n)\n\n# 4. Build condition records (list of dicts) for export\nconditions_records = [\n {k: float(test_conds_np[i, j]) for j, k in enumerate(condition_keys)}\n for i in range(N_SAMPLES)\n]\n\n# END FILL -----------------------------------------------------------------\n\n# CHECKPOINT\nassert gen_designs.shape == baseline_designs.shape, (\n f\"Shape mismatch: generated {gen_designs.shape} vs baseline {baseline_designs.shape}\"\n)\nassert len(conditions_records) == N_SAMPLES\nassert 0.0 <= gen_designs.min() and gen_designs.max() <= 1.0, (\n f\"Generated designs should be in [0, 1], got [{gen_designs.min():.2f}, {gen_designs.max():.2f}]\"\n)\nprint(f\"CHECKPOINT passed: generated {N_SAMPLES} designs, shape {gen_designs.shape}\")" + ], + "id": "cell-23" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 8. Visual comparison: Generated vs Ground Truth\n\nEach column shows the same conditions. Top row = your model's output; bottom row\n= the optimizer's solution from the dataset.\n\n**What to look for:**\n- **Blurriness:** generated designs are often blurry because MSE loss averages\n over possible solutions\n- **Structure:** do the generated designs have recognisable beam topology (load\n paths, supports)?\n- **Condition sensitivity:** do different conditions produce visibly different\n designs, or does the model output the same thing regardless?" + ], + "id": "cell-24" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_gen_vs_baseline(gen_designs, baseline_designs, conditions_records)" + ], + "id": "cell-25" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## 9. Export artifacts for Notebook 02\n\nNotebook 02 needs three files to run its evaluation pipeline:\n- `generated_designs.npy` — your model's output\n- `baseline_designs.npy` — ground-truth designs from the dataset\n- `conditions.json` — the conditions used for generation" + ], + "id": "cell-26" + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.save(ARTIFACT_DIR / \"generated_designs.npy\", gen_designs)\nnp.save(ARTIFACT_DIR / \"baseline_designs.npy\", baseline_designs)\nwith open(ARTIFACT_DIR / \"conditions.json\", \"w\") as f:\n json.dump(conditions_records, f, indent=2)\n\n# Verify\nrequired = [\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\"]\nmissing = [f for f in required if not (ARTIFACT_DIR / f).exists()]\nassert not missing, f\"Missing: {missing}\"\nprint(f\"Exported to {ARTIFACT_DIR}:\")\nfor f in required:\n print(f\" {f}\")" + ], + "id": "cell-27" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Discussion\n\n### What you have seen\n\nYou trained a neural network on a few hundred examples for a few epochs and used\nit to produce beam designs in milliseconds. The results are imperfect — and that\nis exactly the point.\n\n### Questions to think about\n\n1. **Why are the designs blurry?** MSE loss penalises pixel-wise error, which\n rewards the *average* of all plausible designs rather than any single sharp\n one. What alternative losses or model architectures might produce crisper\n output? (Think: adversarial loss, diffusion models, VAEs.)\n\n2. **Does the model respond to conditions?** Compare designs generated for very\n different volume fractions or load distributions. If they all look the same,\n the model may have learned the dataset mean rather than the\n condition → design relationship. What might help? (More training data? More\n epochs? A different architecture?)\n\n3. **From pixels to physics.** A design can *look* reasonable but fail under\n simulation — disconnected material, wrong volume fraction, stress\n concentrations. Notebook 02 will run the physics solver on your generated\n designs and quantify these failures.\n\n4. **The benchmarking motivation.** We do not know how bad these designs are\n until we *measure*. That is the role of a benchmark: providing standardised\n evaluation so we can compare methods, track progress, and avoid fooling\n ourselves with visual inspection alone.\n\n5. **What would you change?** If you had an hour instead of 30 minutes, what\n would you try? More data, more epochs, a different model, a different loss\n function? How would you decide whether it *actually* improved?" + ], + "id": "cell-28" + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n\n## Next\n\nProceed to **Notebook 02** to evaluate your generated designs with physics-based\nsimulation and compute benchmark metrics. Your exported artifacts are the input." + ], + "id": "cell-29" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/solutions/02_evaluate_metrics.ipynb b/workshops/dcc26/solutions/02_evaluate_metrics.ipynb new file mode 100644 index 00000000..6494e3b2 --- /dev/null +++ b/workshops/dcc26/solutions/02_evaluate_metrics.ipynb @@ -0,0 +1,1242 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 02 (Solution): Evaluate Your Generated Designs\n", + "\n", + "In Notebook 01 you trained a generative model and produced candidate beam designs.\n", + "Now comes the critical question: **are those designs actually any good?**\n", + "\n", + "In generative modeling for engineering, \"good\" is **not** a single number.\n", + "A design can look plausible yet fail simulation. It can perform well on one\n", + "objective yet violate a critical constraint. It can be high-quality but\n", + "identical to a training example -- memorised, not generalised.\n", + "\n", + "This notebook walks you through a **structured evaluation pipeline** that\n", + "diagnoses generative model quality from multiple complementary angles, each\n", + "revealing something the others miss." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A taxonomy of generative-model metrics\n", + "\n", + "```\n", + " How do we evaluate a generative model?\n", + " |\n", + " ┌───────────────┬───────────┼───────────────┬──────────────────┐\n", + " | | | | |\n", + " Visual Simulation Constraint Distributional Diversity &\n", + " Inspection Performance Satisfaction Similarity Coverage\n", + " | | | | |\n", + " \"Does it \"Does it \"Is it \"Does it \"Did we\n", + " look right?\" work?\" legal?\" match explore?\"\n", + " reality?\"\n", + " | | | | |\n", + " Residual Compliance Volfrac error MMD Pairwise L2\n", + " heatmaps histogram distribution (Gaussian DPP diversity\n", + " + scatter + feasibility kernel) NN novelty\n", + " + per-sample rate bars PCA embedding\n", + " gap bars\n", + "```\n", + "\n", + "No single metric tells the whole story. A model can ace one category and\n", + "fail another -- and *which failure matters most* depends on your application.\n", + "\n", + "| Category | Question | Beams2D metric | Why it matters |\n", + "|----------|----------|---------------|----------------|\n", + "| **Visual inspection** | Does it look like a real beam? | Residual heatmaps | Quick sanity check; catches gross failures |\n", + "| **Simulation performance** | Does the physics solver confirm it works? | Compliance gap vs baseline | The ground truth -- simulation is our oracle |\n", + "| **Constraint satisfaction** | Does it obey the engineering spec? | Volume fraction error | A stiff beam using too much material is invalid |\n", + "| **Distributional similarity** | Does the generator match the real data distribution? | MMD (Maximum Mean Discrepancy) | Detects mode collapse, unrealistic densities |\n", + "| **Diversity & coverage** | Did the model explore, or did it memorise? | Pairwise L2, DPP, NN novelty | A model outputting one beam 24 times is useless |\n", + "| **Optimization warmstarting** | Does it give the optimizer a head start? | IOG, COG, FOG | The ultimate downstream utility test |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### The evaluation pipeline at a glance\n", + "\n", + "```\n", + "Generated Designs Baseline Designs Training Designs\n", + " | | |\n", + " v v v\n", + " [ Visual Inspection ] [ Reference set ]\n", + " | | |\n", + " v v |\n", + " [ Simulate ] [ Simulate ] |\n", + " | | |\n", + " v v |\n", + " Objectives Objectives |\n", + " \\ / |\n", + " \\ / |\n", + " v v |\n", + " Simulation Metrics |\n", + " (gap, improvement rate) |\n", + " | |\n", + " v v\n", + " Constraint Metrics Distributional Metrics\n", + " (volfrac error, feasibility) (MMD, pixel distributions)\n", + " | |\n", + " v v\n", + " Diversity Metrics <──────────────── PCA Embedding\n", + " (pairwise L2, DPP, NN novelty)\n", + " |\n", + " v\n", + " Optimization Warmstarting\n", + " (IOG, COG, FOG trajectories)\n", + " |\n", + " v\n", + " Summary Dashboard\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Notebook map\n", + "\n", + "| Part | What you do | Key output |\n", + "|------|-------------|------------|\n", + "| Setup | Install deps, load artifacts | `gen_designs`, `baseline_designs`, `conditions` |\n", + "| Part 1 | Visual inspection (the eye test) | Residual heatmaps |\n", + "| Part 2 (Fill-in 02-A) | Per-sample simulation | `results` DataFrame |\n", + "| Part 3 | Constraint satisfaction analysis | Volfrac scatter + error distribution |\n", + "| Part 4 (Fill-in 02-B) | Distributional similarity (MMD) | `mmd_value` |\n", + "| Part 5 | Diversity & coverage | Pairwise heatmap + PCA embedding |\n", + "| Part 6 | Optimization warmstarting (demo) | Trajectory plots with IOG/COG/FOG |\n", + "| Part 7 (Fill-in 02-C) | Comprehensive summary dashboard | `summary_df` |\n", + "\n", + "### Legend\n", + "- `PUBLIC FILL-IN CELL` -- you write code here.\n", + "- `CHECKPOINT` -- run this assertion block to verify before moving on.\n", + "- `# START FILL` / `# END FILL` -- your edits go between these markers." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 0: Install dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess, sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def pip_install(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " pip_install([\"engibench[all]\", \"sqlitedict\", \"matplotlib\", \"tqdm\", \"tyro\", \"wandb\"])\n", + " pip_install([\"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"])\n", + " try:\n", + " import torch\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + " print(\"Install complete.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Step 1: Load artifacts from Notebook 01\n", + "\n", + "We need three files that Notebook 01 exported:\n", + "- `generated_designs.npy` -- the designs your model produced\n", + "- `baseline_designs.npy` -- optimised reference designs from the dataset\n", + "- `conditions.json` -- the boundary-condition configs for each sample\n", + "\n", + "The next cell contains a recovery function that **automatically rebuilds** these\n", + "artifacts if they are missing (e.g., if you jumped straight to NB02). You do not\n", + "need to read or understand that function -- just run it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Artifact recovery (runs only if NB01 artifacts are missing) ──────────\n", + "# This cell auto-builds NB01 artifacts so NB02 works standalone.\n", + "# You do NOT need to read this code -- just run the cell.\n", + "\n", + "import importlib\n", + "import json, random, sys, os\n", + "from pathlib import Path\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch as th\n", + "import torch.nn as nn\n", + "from torch.utils.data import DataLoader, TensorDataset\n", + "\n", + "# Workshop helpers from the installed EngiOpt package\n", + "import engiopt.workshops.dcc26.notebook_helpers as notebook_helpers # noqa: E402\n", + "importlib.reload(notebook_helpers)\n", + "from engiopt.workshops.dcc26.notebook_helpers import * # noqa: F401,F403\n", + "\n", + "from engibench.utils.all_problems import BUILTIN_PROBLEMS\n", + "\n", + "PROBLEM_ID = \"beams2d\"\n", + "\n", + "try:\n", + " from engiopt.cgan_2d.cgan_2d import Generator as EngiOptCGAN2DGenerator\n", + "except ModuleNotFoundError as exc:\n", + " raise ModuleNotFoundError(\n", + " \"Could not import engiopt. Run the install cell first; on Colab, restart runtime after install.\"\n", + " ) from exc\n", + "\n", + "\n", + "def _resolve_artifact_dir(create=False):\n", + " p = Path(\"/content/dcc26_artifacts\") if \"google.colab\" in sys.modules else Path(\"workshops/dcc26/artifacts\")\n", + " if create:\n", + " p.mkdir(parents=True, exist_ok=True)\n", + " return p\n", + "\n", + "\n", + "def _build_artifacts_locally(artifact_dir, seed=7, n_train=512, n_samples=24, epochs=8, batch_size=64, latent_dim=32):\n", + " \"\"\"Replicate the NB01 train+generate pipeline to produce evaluation artifacts.\"\"\"\n", + " print(\"Auto-building NB01 artifacts (this takes ~1 min)...\")\n", + " random.seed(seed); np.random.seed(seed); th.manual_seed(seed)\n", + " if th.cuda.is_available(): th.cuda.manual_seed_all(seed)\n", + " device = th.device(\"cuda\" if th.cuda.is_available() else \"cpu\")\n", + " problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=seed)\n", + " train_ds, test_ds = problem.dataset[\"train\"], problem.dataset[\"test\"]\n", + " ckeys = problem.conditions_keys\n", + " rng = np.random.default_rng(seed)\n", + " idx = rng.choice(len(train_ds), size=min(n_train, len(train_ds)), replace=False)\n", + " conds = np.stack([np.array(train_ds[k])[idx].astype(np.float32) for k in ckeys], axis=1)\n", + " designs = np.array(train_ds[\"optimal_design\"])[idx].astype(np.float32)\n", + " targets = designs * 2.0 - 1.0\n", + " model = EngiOptCGAN2DGenerator(latent_dim=latent_dim, n_conds=conds.shape[1], design_shape=problem.design_space.shape).to(device)\n", + " opt = th.optim.Adam(model.parameters(), lr=1e-3)\n", + " crit = nn.MSELoss()\n", + " dl = DataLoader(TensorDataset(th.tensor(conds), th.tensor(targets)), batch_size=batch_size, shuffle=True)\n", + " losses = []\n", + " for ep in range(epochs):\n", + " model.train(); ep_loss = 0.0\n", + " for cb, tb in dl:\n", + " cb, tb = cb.to(device), tb.to(device)\n", + " pred = model(th.randn(cb.shape[0], latent_dim, device=device), cb)\n", + " loss = crit(pred, tb); opt.zero_grad(); loss.backward(); opt.step()\n", + " ep_loss += loss.item()\n", + " avg = ep_loss / len(dl); losses.append(avg)\n", + " print(f\" epoch {ep+1:02d}/{epochs} loss={avg:.4f}\")\n", + " sc = min(n_samples, len(test_ds))\n", + " sel = rng.choice(len(test_ds), size=sc, replace=False)\n", + " tc = np.stack([np.array(test_ds[k])[sel].astype(np.float32) for k in ckeys], axis=1)\n", + " bl = np.array(test_ds[\"optimal_design\"])[sel].astype(np.float32)\n", + " model.eval()\n", + " with th.no_grad():\n", + " out = model(th.randn(sc, latent_dim, device=device), th.tensor(tc, device=device))\n", + " gd = ((out.clamp(-1, 1) + 1) / 2).clamp(0, 1).cpu().numpy().astype(np.float32)\n", + " cond_recs = []\n", + " for i in range(sc):\n", + " rec = {}\n", + " for j, k in enumerate(ckeys):\n", + " rec[k] = bool(tc[i, j]) if k == \"overhang_constraint\" else float(tc[i, j])\n", + " cond_recs.append(rec)\n", + " artifact_dir.mkdir(parents=True, exist_ok=True)\n", + " np.save(artifact_dir / \"generated_designs.npy\", gd)\n", + " np.save(artifact_dir / \"baseline_designs.npy\", bl)\n", + " with open(artifact_dir / \"conditions.json\", \"w\") as f: json.dump(cond_recs, f, indent=2)\n", + " pd.DataFrame({\"epoch\": range(1, len(losses)+1), \"train_loss\": losses}).to_csv(artifact_dir / \"training_history.csv\", index=False)\n", + " th.save({\"model\": model.state_dict(), \"condition_keys\": ckeys, \"latent_dim\": latent_dim}, artifact_dir / \"engiopt_cgan2d_generator_supervised.pt\")\n", + " print(\"Artifacts ready at\", artifact_dir)\n", + "\n", + "\n", + "ARTIFACT_DIR = _resolve_artifact_dir(create=True)\n", + "_required = [ARTIFACT_DIR / f for f in (\"generated_designs.npy\", \"baseline_designs.npy\", \"conditions.json\")]\n", + "if not all(p.exists() for p in _required):\n", + " _build_artifacts_locally(ARTIFACT_DIR)\n", + "\n", + "print(\"Artifact directory:\", ARTIFACT_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ── Load artifacts ───────────────────────────────────────────────────────\n", + "import json\n", + "import numpy as np\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "from scipy.spatial.distance import cdist\n", + "\n", + "gen_designs = np.load(ARTIFACT_DIR / \"generated_designs.npy\")\n", + "baseline_designs = np.load(ARTIFACT_DIR / \"baseline_designs.npy\")\n", + "with open(ARTIFACT_DIR / \"conditions.json\") as f:\n", + " conditions = json.load(f)\n", + "\n", + "print(f\"Generated designs : {gen_designs.shape} (values in [{gen_designs.min():.2f}, {gen_designs.max():.2f}])\")\n", + "print(f\"Baseline designs : {baseline_designs.shape}\")\n", + "print(f\"Condition records : {len(conditions)}\")\n", + "print(f\"Condition keys : {list(conditions[0].keys())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load a reference subset of training designs for distributional + novelty metrics\n", + "problem_ref = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "train_designs_full = np.array(problem_ref.dataset[\"train\"][\"optimal_design\"]).astype(np.float32)\n", + "ref_idx = np.random.default_rng(7).choice(\n", + " len(train_designs_full), size=min(1024, len(train_designs_full)), replace=False\n", + ")\n", + "train_reference = train_designs_full[ref_idx]\n", + "print(f\"Training reference set: {train_reference.shape[0]} designs\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 1: Visual Inspection -- The Eye Test\n", + "\n", + "Before computing any metric, **look at the designs**. Visual inspection catches\n", + "gross failures immediately: is the model producing solid blocks? random noise?\n", + "something that looks vaguely beam-like?\n", + "\n", + "We show three views:\n", + "1. **Side-by-side gallery** -- generated vs optimised baseline\n", + "2. **Pixel residual heatmaps** -- where exactly do the designs differ?\n", + "\n", + "Visual inspection is *necessary* but **not sufficient**. A design can look\n", + "plausible yet perform terribly in simulation, or violate constraints that\n", + "are invisible to the eye. The rest of this notebook quantifies what your\n", + "eyes cannot." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_residual_heatmaps(gen_designs, baseline_designs, n_show=6)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Takeaway:** The residual heatmaps reveal where the generator struggles most.\n", + "Bright regions = large pixel error. Notice how errors tend to cluster at\n", + "structural boundaries and fine features -- exactly the details that matter\n", + "most for physical performance.\n", + "\n", + "But pixels alone don't tell us about *compliance*, *constraint violations*, or\n", + "*diversity*. We need simulation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 2: Simulation Performance -- \"Does it work?\"\n", + "\n", + "The **physics simulator** is our oracle. For Beams2D, it computes the\n", + "*compliance* of each design under the given boundary conditions:\n", + "- **Lower compliance = stiffer beam = better design**\n", + "\n", + "We simulate both the generated design and its corresponding baseline\n", + "(the optimised design from the dataset) under **identical conditions**.\n", + "The difference tells us how far the generator is from optimal.\n", + "\n", + "> **Analogy:** Imagine you asked an architecture student to sketch a bridge.\n", + "> Visual inspection tells you the sketch looks bridge-like. But only a\n", + "> structural engineer (our simulator) can tell you whether it would actually\n", + "> stand up." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "problem = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "\n", + "# Feasibility tolerance: how close must volfrac be to the target?\n", + "VOLFRAC_TOL = 0.05\n", + "\n", + "# PUBLIC FILL-IN CELL 02-A\n", + "# Goal: build a list of dicts, one per sample, with objective + feasibility info.\n", + "#\n", + "# For each sample i, you have:\n", + "# g = gen_designs[i] -- generated design (2D numpy array)\n", + "# b = baseline_designs[i] -- baseline design (2D numpy array)\n", + "# cfg = conditions[i] -- dict with keys like 'volfrac', 'rmin', etc.\n", + "\n", + "rows = []\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "for i in range(len(gen_designs)):\n", + " g = gen_designs[i]\n", + " b = baseline_designs[i]\n", + " cfg = dict(conditions[i])\n", + "\n", + " # 1) Compute volume fractions (mean pixel value of each design)\n", + " g_vf = float(np.mean(g))\n", + " b_vf = float(np.mean(b))\n", + " target_vf = cfg[\"volfrac\"]\n", + "\n", + " # 2) Check feasibility: is |actual_vf - target_vf| <= VOLFRAC_TOL?\n", + " g_feasible = bool(abs(g_vf - target_vf) <= VOLFRAC_TOL)\n", + " b_feasible = bool(abs(b_vf - target_vf) <= VOLFRAC_TOL)\n", + "\n", + " # 3) Simulate both designs under identical conditions\n", + " problem.reset(seed=7 + i)\n", + " g_obj = float(problem.simulate(design=g, config=cfg)[0])\n", + " problem.reset(seed=7 + i)\n", + " b_obj = float(problem.simulate(design=b, config=cfg)[0])\n", + "\n", + " # 4) Record everything\n", + " rows.append({\n", + " \"sample\": i,\n", + " \"gen_obj\": g_obj,\n", + " \"base_obj\": b_obj,\n", + " \"gen_minus_base\": g_obj - b_obj,\n", + " \"gen_volfrac\": g_vf,\n", + " \"target_volfrac\": target_vf,\n", + " \"gen_feasible\": g_feasible,\n", + " \"base_feasible\": b_feasible,\n", + " })\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "results = pd.DataFrame(rows)\n", + "results.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-A\n", + "expected_cols = {\"sample\", \"gen_obj\", \"base_obj\", \"gen_minus_base\", \"gen_volfrac\",\n", + " \"target_volfrac\", \"gen_feasible\", \"base_feasible\"}\n", + "missing_cols = expected_cols - set(results.columns)\n", + "assert not missing_cols, f\"Missing columns: {missing_cols}\"\n", + "assert len(results) == len(gen_designs), f\"Expected {len(gen_designs)} rows, got {len(results)}\"\n", + "assert results[\"gen_obj\"].notna().all(), \"gen_obj contains NaN -- did you forget to simulate?\"\n", + "assert results[\"gen_feasible\"].dtype == bool, \"gen_feasible should be boolean\"\n", + "print(f\"Checkpoint 02-A passed: {len(results)} samples evaluated.\")\n", + "print(f\" Feasible generated: {results['gen_feasible'].sum()}/{len(results)}\")\n", + "print(f\" Feasible baseline: {results['base_feasible'].sum()}/{len(results)}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualising simulation results\n", + "\n", + "Three complementary views:\n", + "1. **Histogram** -- overall distribution of objectives (generated vs baseline)\n", + "2. **Scatter plot** -- per-sample pairing (points below diagonal = generated is better)\n", + "3. **Residual bar chart** -- per-sample gap, signed (green = generated outperforms)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_comparison(results)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_objective_residuals(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the simulation results\n", + "\n", + "- **Histogram overlap**: If the blue (generated) and orange (baseline) distributions\n", + " overlap heavily, the generator is competitive. If blue is shifted right (higher\n", + " compliance), the generator produces weaker designs.\n", + "\n", + "- **Scatter diagonal**: Points *below* the diagonal line mean the generated design\n", + " outperformed the optimised baseline for that sample -- a strong result.\n", + "\n", + "- **Residual bars**: The bar chart makes the per-sample gap immediately visible.\n", + " Consistent green bars = the model is competitive. Large red bars = specific\n", + " failure modes worth investigating (check the design images for those samples)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 3: Constraint Satisfaction -- \"Is it legal?\"\n", + "\n", + "A design that performs well but **violates constraints** is useless in practice.\n", + "For Beams2D, the key constraint is **volume fraction**: the design must use\n", + "a specific amount of material (neither too much nor too little).\n", + "\n", + "> **Analogy:** An architect who designs a beautiful building that exceeds the\n", + "> budget by 50% has not solved the problem -- they have created a new one.\n", + "\n", + "We already computed `gen_volfrac` and `target_volfrac` in the simulation loop.\n", + "Now let's visualise how well the generator satisfies this constraint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_volfrac_analysis(results, volfrac_tol=VOLFRAC_TOL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_feasibility_bars(results)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the constraint results\n", + "\n", + "- **Left scatter**: Points near the diagonal are feasible; points far from it\n", + " are violating the volume fraction constraint. The green band shows the\n", + " tolerance window.\n", + "\n", + "- **Error histogram**: A narrow distribution centered at zero means the generator\n", + " has learned to control material usage. A wide or biased distribution suggests\n", + " the model ignores the volume fraction condition.\n", + "\n", + "- **Feasibility rate**: The bar chart gives the bottom line. If the baseline\n", + " achieves ~100% feasibility but the generator is at 50%, there is a clear\n", + " conditioning failure.\n", + "\n", + "**Why this matters beyond beams:** In real engineering, constraints can be\n", + "stress limits, manufacturing tolerances, thermal budgets, or regulatory\n", + "requirements. A generative model that ignores constraints generates\n", + "*interesting but unusable* designs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 4: Distributional Similarity -- \"Does it match reality?\"\n", + "\n", + "The previous metrics evaluated designs **individually** (per-sample objective,\n", + "per-sample feasibility). But we also need to ask: does the *distribution*\n", + "of generated designs match the distribution of the ground-truth optimal designs\n", + "**for the same conditions**?\n", + "\n", + "### What is MMD?\n", + "\n", + "**Maximum Mean Discrepancy (MMD)** is a kernel-based distance between two\n", + "distributions. Intuitively:\n", + "\n", + "1. Map each design into a high-dimensional feature space via a Gaussian kernel\n", + "2. Compare the *mean embeddings* of the two sets\n", + "3. If the means match, the distributions are similar; if they diverge, they are different\n", + "\n", + "$$\\text{MMD}^2 = \\underbrace{\\mathbb{E}[k(x, x')]}_{\\text{gen-gen similarity}} + \\underbrace{\\mathbb{E}[k(y, y')]}_{\\text{base-base similarity}} - 2\\,\\underbrace{\\mathbb{E}[k(x, y)]}_{\\text{cross similarity}}$$\n", + "\n", + "- **MMD = 0**: generated and baseline distributions are identical\n", + "- **MMD > 0**: they differ (larger = more different)\n", + "- The kernel bandwidth $\\sigma$ controls the scale of comparison\n", + "\n", + "### Why compare generated vs baseline?\n", + "\n", + "Our generator is **conditional** -- it takes test conditions and produces\n", + "designs. The baseline contains the ground-truth optima for those *same*\n", + "test conditions. Comparing generated vs baseline directly measures whether\n", + "the generator has learned to produce the right designs for the right conditions.\n", + "\n", + "### Choosing sigma without test-data leakage\n", + "\n", + "The Gaussian kernel bandwidth $\\sigma$ determines what scale of difference\n", + "the kernel is sensitive to. We set it using the **median heuristic** on the\n", + "*training data only* -- the median pairwise distance among training designs.\n", + "This avoids leaking test information into the metric while ensuring the\n", + "kernel operates in a meaningful range.\n", + "\n", + "### Why MMD and not just \"average quality\"?\n", + "\n", + "A model could produce 24 copies of the single best design. Per-sample metrics\n", + "would look great! But the *distribution* would be nothing like the diverse\n", + "baseline set. MMD catches this." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visual intuition: where does each set place material?\n", + "show_spatial_distribution_comparison(gen_designs, baseline_designs, train_reference)\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-B\n", + "# Goal: compute MMD between generated designs and baseline designs (same conditions).\n", + "#\n", + "# MMD uses a Gaussian (RBF) kernel: k(x,y) = exp(-||x-y||^2 / (2*sigma^2))\n", + "#\n", + "# We set sigma from the TRAINING data (median heuristic) to avoid test-data leakage,\n", + "# then apply that fixed sigma to the gen-vs-baseline comparison.\n", + "#\n", + "# You have:\n", + "# gen_designs -- (N, H, W) numpy array of generated designs\n", + "# baseline_designs -- (N, H, W) numpy array of optimized designs (same conditions)\n", + "# train_reference -- (M, H, W) numpy array of training designs\n", + "# cdist -- from scipy.spatial.distance (already imported)\n", + "#\n", + "# Steps:\n", + "# 1. Flatten all design sets to 2D\n", + "# 2. Compute sigma from training pairwise distances (median heuristic)\n", + "# 3. Compute pairwise squared distances between generated and baseline\n", + "# 4. Apply Gaussian kernel: K = exp(-D / (2 * sigma^2))\n", + "# 5. MMD = mean(K_gg) + mean(K_bb) - 2 * mean(K_gb)\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# 1. Flatten\n", + "gen_flat = gen_designs.reshape(gen_designs.shape[0], -1)\n", + "base_flat = baseline_designs.reshape(baseline_designs.shape[0], -1)\n", + "ref_flat = train_reference.reshape(train_reference.shape[0], -1)\n", + "\n", + "# 2. Sigma from training data only (no test leakage)\n", + "D_ref = cdist(ref_flat, ref_flat, \"sqeuclidean\")\n", + "sigma = float(np.sqrt(np.median(D_ref)))\n", + "print(f\"Sigma (median heuristic on training data): {sigma:.2f}\")\n", + "\n", + "# 3. Pairwise squared distances for gen vs baseline\n", + "D_gg = cdist(gen_flat, gen_flat, \"sqeuclidean\")\n", + "D_bb = cdist(base_flat, base_flat, \"sqeuclidean\")\n", + "D_gb = cdist(gen_flat, base_flat, \"sqeuclidean\")\n", + "\n", + "# 4. Gaussian kernel\n", + "K_gg = np.exp(-D_gg / (2 * sigma**2))\n", + "K_bb = np.exp(-D_bb / (2 * sigma**2))\n", + "K_gb = np.exp(-D_gb / (2 * sigma**2))\n", + "\n", + "# 5. MMD\n", + "mmd_value = float(K_gg.mean() + K_bb.mean() - 2 * K_gb.mean())\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "print(f\"MMD(generated, baseline) = {mmd_value:.6f}\")\n", + "print(f\" K_gg mean (gen-gen similarity): {K_gg.mean():.6f}\")\n", + "print(f\" K_bb mean (base-base similarity): {K_bb.mean():.6f}\")\n", + "print(f\" K_gb mean (cross similarity): {K_gb.mean():.6f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-B\n", + "assert mmd_value is not None, \"mmd_value is None -- did you compute it?\"\n", + "assert isinstance(mmd_value, float), \"mmd_value should be a float\"\n", + "assert mmd_value >= 0, f\"MMD should be non-negative, got {mmd_value}\"\n", + "assert sigma is not None and sigma > 1, f\"sigma should be > 1 for 10k-dim data (got {sigma}); did you use the median heuristic?\"\n", + "assert K_gg is not None and K_gb is not None, \"Kernel matrices not computed\"\n", + "print(f\"Checkpoint 02-B passed: MMD = {mmd_value:.6f} (sigma = {sigma:.2f})\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Put MMD in context: compare against reference points (same training-derived sigma)\n", + "#\n", + "# 1. Training sample vs baseline: what you'd get by grabbing random training\n", + "# designs instead of conditioning on the test conditions.\n", + "# 2. Random noise vs baseline: worst case (meaningless generator).\n", + "\n", + "rng_mmd = np.random.default_rng(42)\n", + "\n", + "# Training sample vs baseline (no conditioning)\n", + "train_sample_idx = rng_mmd.choice(len(train_reference), size=len(baseline_designs), replace=False)\n", + "train_sample_flat = train_reference[train_sample_idx].reshape(len(baseline_designs), -1)\n", + "D_tt = cdist(train_sample_flat, train_sample_flat, \"sqeuclidean\")\n", + "D_tb = cdist(train_sample_flat, base_flat, \"sqeuclidean\")\n", + "mmd_train_base = float(\n", + " np.exp(-D_tt / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_tb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "# Random noise vs baseline\n", + "random_designs = rng_mmd.random(gen_designs.shape).astype(np.float32)\n", + "rand_flat = random_designs.reshape(random_designs.shape[0], -1)\n", + "D_rr = cdist(rand_flat, rand_flat, \"sqeuclidean\")\n", + "D_rb = cdist(rand_flat, base_flat, \"sqeuclidean\")\n", + "mmd_random_base = float(\n", + " np.exp(-D_rr / (2*sigma**2)).mean()\n", + " + K_bb.mean()\n", + " - 2 * np.exp(-D_rb / (2*sigma**2)).mean()\n", + ")\n", + "\n", + "print(f\"MMD reference points (sigma={sigma:.2f}, from training data):\")\n", + "print(f\" Generated vs Baseline: {mmd_value:.6f} (our model)\")\n", + "print(f\" Train sample vs Baseline: {mmd_train_base:.6f} (no conditioning)\")\n", + "print(f\" Random noise vs Baseline: {mmd_random_base:.6f} (worst case)\")\n", + "\n", + "show_mmd_comparison_bar(mmd_value, mmd_train_base, mmd_random_base)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PCA embedding: where do generated designs live relative to training data?\n", + "show_embedding_scatter(gen_designs, baseline_designs, train_reference)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the distributional similarity results\n", + "\n", + "- **Mean design images**: If the generated mean image looks similar to the\n", + " baseline mean image, the model has learned where material should go on\n", + " average for these conditions. Differences reveal spatial biases.\n", + "\n", + "- **Volume fraction distributions**: If the generated distribution is narrower\n", + " or shifted relative to baseline, the model isn't capturing the full range\n", + " of volume fractions needed for these test conditions.\n", + "\n", + "- **MMD in context**: The comparison bar chart places the generator's MMD on\n", + " a meaningful scale:\n", + " - **Train sample vs Baseline** (retrieval baseline): What you'd get by\n", + " grabbing random training designs instead of conditioning. If the\n", + " generator beats this, it has genuinely learned to condition.\n", + " - **Random vs Baseline** (worst case): Uniform noise -- the floor for\n", + " a non-functional generator.\n", + " A generator close to zero has matched the baseline distribution. A\n", + " generator near the train-sample bar is no better than memorising\n", + " training data without using the conditions.\n", + "\n", + "- **PCA embedding**: If generated designs (blue) cluster tightly in one\n", + " corner while training data (grey) spans a wide region, the model has\n", + " **mode collapse**. Ideally, blue points should overlap with the orange\n", + " baseline points (same conditions) while spanning a similar spread." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 5: Diversity & Coverage -- \"Did we explore?\"\n", + "\n", + "A generative model should produce **varied** designs, not 24 copies of the same\n", + "beam. We measure two complementary aspects:\n", + "\n", + "### Diversity (intra-set variation)\n", + "How different are the generated designs from *each other*?\n", + "- **Pairwise L2 distance**: Average Euclidean distance between all pairs of\n", + " generated designs. Higher = more diverse.\n", + "- **DPP diversity**: Determinantal Point Process log-determinant of the\n", + " similarity matrix. Captures both volume and spread of the set.\n", + "\n", + "### Novelty (distance to training data)\n", + "How different are the generated designs from the *training set*?\n", + "- **Nearest-neighbour distance**: For each generated design, find the closest\n", + " training example. If NN distance is near zero, the model may be memorising.\n", + " Higher = more novel.\n", + "\n", + "> **The diversity-quality trade-off:** A model that generates random noise\n", + "> would score very high on diversity but terribly on quality. We want designs\n", + "> that are diverse *and* feasible *and* performant. This is the fundamental\n", + "> tension in generative model evaluation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_pairwise_distance_heatmap(gen_designs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Compute diversity and novelty metrics\n", + "diversity_l2 = mean_pairwise_l2(gen_designs)\n", + "novelty_nn = mean_nn_distance_to_reference(gen_designs, train_reference)\n", + "\n", + "# Also compute for baseline as a reference point\n", + "baseline_diversity = mean_pairwise_l2(baseline_designs)\n", + "baseline_novelty = mean_nn_distance_to_reference(baseline_designs, train_reference)\n", + "\n", + "print(\"Diversity (mean pairwise L2):\")\n", + "print(f\" Generated: {diversity_l2:.2f}\")\n", + "print(f\" Baseline: {baseline_diversity:.2f}\")\n", + "print()\n", + "print(\"Novelty (mean NN distance to training):\")\n", + "print(f\" Generated: {novelty_nn:.2f}\")\n", + "print(f\" Baseline: {baseline_novelty:.2f}\")\n", + "print()\n", + "if diversity_l2 < baseline_diversity * 0.5:\n", + " print(\"Warning: Generated diversity is much lower than baseline -- possible mode collapse.\")\n", + "elif diversity_l2 > baseline_diversity * 1.5:\n", + " print(\"Note: Generated diversity exceeds baseline -- check if the extra variation is meaningful.\")\n", + "else:\n", + " print(\"Generated diversity is comparable to baseline diversity.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the diversity results\n", + "\n", + "- **Pairwise heatmap**: Uniform warm colours = good diversity (all designs differ\n", + " from each other). A block of cool/dark colours = a cluster of near-identical\n", + " designs (partial mode collapse).\n", + "\n", + "- **Diversity vs baseline**: The baseline designs come from an optimiser run on\n", + " diverse conditions, so they naturally vary. If the generator's diversity is\n", + " much lower, it is producing less variety than the problem demands.\n", + "\n", + "- **Novelty**: Very low NN distance means the generator is reproducing training\n", + " examples almost exactly. Some proximity is expected (it learned from them),\n", + " but near-zero distance suggests memorisation rather than generalisation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 6: Optimization Warmstarting -- \"Does it speed up search?\"\n", + "\n", + "The **ultimate downstream test** for a generative model in engineering design:\n", + "if we use its output as a *starting point* for topology optimisation, does the\n", + "optimiser converge faster or find better solutions than starting from scratch?\n", + "\n", + "### The optimality gap metrics\n", + "\n", + "Starting from a generated design, we run the problem's optimiser and track the\n", + "objective at each step:\n", + "\n", + "- **IOG (Initial Optimality Gap)** = objective at step 0 minus baseline optimum.\n", + " *How good is the starting point?*\n", + "\n", + "- **FOG (Final Optimality Gap)** = objective at final step minus baseline optimum.\n", + " *How good is the final result?*\n", + "\n", + "- **COG (Cumulative Optimality Gap)** = sum of all per-step gaps.\n", + " *How much total \"wasted effort\" occurred across the trajectory?*\n", + " The shaded area in the trajectory plot.\n", + "\n", + "```\n", + "Objective\n", + " ^\n", + " | * IOG = obj[0] - baseline\n", + " | \\ *\n", + " | \\ * * Shaded area = COG\n", + " | \\ * * *\n", + " | ─ ─ ─ ─ ─ ─ ─ ─ FOG = obj[-1] - baseline\n", + " | - - - - - - - - - - - ← baseline (optimised reference)\n", + " └────────────────────────> Step\n", + "```\n", + "\n", + "- IOG < 0 is ideal: the generated design is *already better* than the baseline\n", + "- FOG ≈ 0: the optimiser recovers to baseline quality regardless of start\n", + "- Small COG: the optimiser converges quickly from this warmstart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# DEMO: Optimization warmstarting on a small subset (3 samples)\n", + "# This runs the EngiBench optimiser from each generated design and tracks the trajectory.\n", + "# We use only 3 samples because optimization is slower than simulation.\n", + "\n", + "problem_opt = BUILTIN_PROBLEMS[PROBLEM_ID](seed=7)\n", + "n_opt_demo = min(3, len(gen_designs))\n", + "opt_data = []\n", + "\n", + "for i in range(n_opt_demo):\n", + " cfg = dict(conditions[i])\n", + "\n", + " # Run optimiser from generated design\n", + " problem_opt.reset(seed=7 + i)\n", + " _, opt_history = problem_opt.optimize(gen_designs[i], config=cfg)\n", + "\n", + " # Get baseline objective for reference\n", + " problem_opt.reset(seed=7 + i)\n", + " base_obj = float(problem_opt.simulate(baseline_designs[i], config=cfg)[0])\n", + "\n", + " # Extract objective trajectory\n", + " obj_trajectory = [float(step.obj_values) for step in opt_history]\n", + "\n", + " opt_data.append({\n", + " \"sample_idx\": i,\n", + " \"obj_trajectory\": obj_trajectory,\n", + " \"base_obj\": base_obj,\n", + " })\n", + " iog = obj_trajectory[0] - base_obj\n", + " fog = obj_trajectory[-1] - base_obj\n", + " cog = sum(o - base_obj for o in obj_trajectory)\n", + " print(f\"Sample {i}: IOG={iog:.1f} FOG={fog:.1f} COG={cog:.1f} ({len(opt_history)} steps)\")\n", + "\n", + "print(f\"\\nOptimization complete for {n_opt_demo} samples.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_optimization_trajectories(opt_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Reading the optimization trajectories\n", + "\n", + "- **Steep drop at step 0→1**: The generated design was far from optimal, but the\n", + " optimiser quickly improved it. This still counts as a useful warmstart if\n", + " the total trajectory (COG) is shorter than starting from scratch.\n", + "\n", + "- **Flat trajectory near baseline**: The generated design was already near-optimal\n", + " and the optimiser had little work to do. Best-case scenario.\n", + "\n", + "- **Trajectory above baseline throughout**: The generated design was so far from\n", + " optimal that even after optimisation it never reached baseline quality. This\n", + " suggests the model is producing designs in the wrong region of design space.\n", + "\n", + "**In practice**, you would run this on many more samples and average the IOG/COG/FOG\n", + "to get statistically robust estimates. For the workshop, 3 samples illustrate\n", + "the concept." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Part 7: Putting It All Together\n", + "\n", + "Now we aggregate all the metrics from Parts 2-6 into a single summary table.\n", + "This is the kind of table you would report in a paper or use to compare\n", + "different generative models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# PUBLIC FILL-IN CELL 02-C\n", + "# Goal: build a comprehensive summary dict and wrap it in a DataFrame.\n", + "\n", + "# START FILL ---------------------------------------------------------------\n", + "# Compute average IOG/FOG/COG from the optimization demo\n", + "avg_iog = float(np.mean([d[\"obj_trajectory\"][0] - d[\"base_obj\"] for d in opt_data]))\n", + "avg_fog = float(np.mean([d[\"obj_trajectory\"][-1] - d[\"base_obj\"] for d in opt_data]))\n", + "avg_cog = float(np.mean([sum(o - d[\"base_obj\"] for o in d[\"obj_trajectory\"]) for d in opt_data]))\n", + "\n", + "summary = {\n", + " # Simulation performance\n", + " \"n_samples\": len(results),\n", + " \"gen_obj_mean\": float(results[\"gen_obj\"].mean()),\n", + " \"base_obj_mean\": float(results[\"base_obj\"].mean()),\n", + " \"objective_gap_mean\": float(results[\"gen_minus_base\"].mean()),\n", + " \"improvement_rate\": float((results[\"gen_obj\"] < results[\"base_obj\"]).mean()),\n", + " # Constraint satisfaction\n", + " \"gen_feasible_rate\": float(results[\"gen_feasible\"].mean()),\n", + " \"base_feasible_rate\": float(results[\"base_feasible\"].mean()),\n", + " \"gen_violation_ratio\": float((~results[\"gen_feasible\"]).mean()),\n", + " \"base_violation_ratio\": float((~results[\"base_feasible\"]).mean()),\n", + " # Distributional similarity\n", + " \"mmd\": mmd_value,\n", + " # Diversity & novelty\n", + " \"gen_diversity_l2\": diversity_l2,\n", + " \"gen_novelty_to_train_l2\": novelty_nn,\n", + " # Optimization warmstarting (from demo subset)\n", + " \"avg_iog\": avg_iog,\n", + " \"avg_fog\": avg_fog,\n", + " \"avg_cog\": avg_cog,\n", + "}\n", + "\n", + "summary_df = pd.DataFrame([summary])\n", + "# END FILL -----------------------------------------------------------------\n", + "\n", + "# Show the summary transposed for readability (one metric per row)\n", + "display(summary_df.T.rename(columns={0: \"value\"}))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT 02-C\n", + "assert \"summary_df\" in dir() and summary_df is not None, \"Define summary_df\"\n", + "assert len(summary_df) == 1, \"summary_df should have exactly one row\"\n", + "required_keys = {\n", + " \"n_samples\", \"gen_obj_mean\", \"base_obj_mean\", \"objective_gap_mean\",\n", + " \"improvement_rate\", \"gen_feasible_rate\", \"base_feasible_rate\",\n", + " \"gen_violation_ratio\", \"base_violation_ratio\",\n", + " \"mmd\", \"gen_diversity_l2\", \"gen_novelty_to_train_l2\",\n", + " \"avg_iog\", \"avg_fog\", \"avg_cog\",\n", + "}\n", + "missing = required_keys - set(summary_df.columns)\n", + "assert not missing, f\"Missing summary columns: {missing}\"\n", + "assert summary_df[\"gen_obj_mean\"].notna().all(), \"gen_obj_mean is NaN\"\n", + "print(\"Checkpoint 02-C passed: comprehensive summary table is complete.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "show_metric_summary_dashboard(summary)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Export artifacts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results_path = ARTIFACT_DIR / \"per_sample_metrics.csv\"\n", + "summary_path = ARTIFACT_DIR / \"metrics_summary.csv\"\n", + "\n", + "results.to_csv(results_path, index=False)\n", + "summary_df.to_csv(summary_path, index=False)\n", + "\n", + "# Save objective histogram\n", + "hist_path = ARTIFACT_DIR / \"objective_histogram.png\"\n", + "fig, ax = plt.subplots(figsize=(7, 4))\n", + "ax.hist(results[\"gen_obj\"], bins=10, alpha=0.7, label=\"Generated\", color=\"#4C72B0\")\n", + "ax.hist(results[\"base_obj\"], bins=10, alpha=0.7, label=\"Baseline\", color=\"#DD8452\")\n", + "ax.set_xlabel(\"Compliance (lower is better)\")\n", + "ax.set_ylabel(\"Count\")\n", + "ax.set_title(\"Generated vs baseline objective distribution\")\n", + "ax.legend()\n", + "fig.tight_layout()\n", + "fig.savefig(hist_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save scatter plot\n", + "scatter_path = ARTIFACT_DIR / \"objective_scatter.png\"\n", + "fig, ax = plt.subplots(figsize=(5, 5))\n", + "ax.scatter(results[\"base_obj\"], results[\"gen_obj\"], alpha=0.8)\n", + "lo = min(results[\"base_obj\"].min(), results[\"gen_obj\"].min()) * 0.9\n", + "hi = max(results[\"base_obj\"].max(), results[\"gen_obj\"].max()) * 1.1\n", + "ax.plot([lo, hi], [lo, hi], \"--\", color=\"gray\", linewidth=1)\n", + "ax.set_xlabel(\"Baseline compliance\")\n", + "ax.set_ylabel(\"Generated compliance\")\n", + "ax.set_title(\"Per-sample objective comparison\")\n", + "fig.tight_layout()\n", + "fig.savefig(scatter_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "# Save design grid\n", + "grid_path = ARTIFACT_DIR / \"design_grid.png\"\n", + "fig, axes_grid = plt.subplots(2, min(6, len(gen_designs)), figsize=(14, 5))\n", + "for i in range(min(6, len(gen_designs))):\n", + " axes_grid[0, i].imshow(gen_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[0, i].set_title(f\"gen {i}\", fontsize=9)\n", + " axes_grid[0, i].axis(\"off\")\n", + " axes_grid[1, i].imshow(baseline_designs[i], cmap=\"gray\", vmin=0, vmax=1)\n", + " axes_grid[1, i].set_title(f\"base {i}\", fontsize=9)\n", + " axes_grid[1, i].axis(\"off\")\n", + "fig.tight_layout()\n", + "fig.savefig(grid_path, dpi=150)\n", + "plt.close(fig)\n", + "\n", + "print(\"Exported:\")\n", + "for p in [results_path, summary_path, hist_path, scatter_path, grid_path]:\n", + " print(f\" {p}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Discussion prompts\n", + "\n", + "Use these questions to prepare for the workshop breakout discussion. There are no\n", + "\"right\" answers -- the goal is to develop your own informed perspective.\n", + "\n", + "1. **Which metric category matters most for your domain?** In safety-critical\n", + " applications (aerospace, medical devices), constraint satisfaction is a hard\n", + " requirement. In early-stage concept exploration, diversity might matter more.\n", + " What about your own research area?\n", + "\n", + "2. **When do metrics disagree?** A model might score well on MMD (distributional\n", + " match) but poorly on per-sample objective (simulation performance). What does\n", + " that disagreement tell you? Which metric would you trust more?\n", + "\n", + "3. **Is diversity always good?** A model that produces wildly different designs\n", + " scores high on diversity -- but some of those designs might be nonsensical.\n", + " When does high diversity indicate a problem rather than a strength?\n", + "\n", + "4. **The warmstarting test.** If a model's IOG is poor (bad starting points) but\n", + " FOG is near zero (optimiser recovers), is the model useful? What if IOG is\n", + " great but the optimiser diverges (FOG increases)?\n", + "\n", + "5. **When would you trust these results for a paper?** We evaluated 24 samples\n", + " with a model trained for 8 epochs on 512 examples. What would need to change\n", + " to make these numbers publication-ready? (Think: sample size, training budget,\n", + " statistical significance, multiple seeds.)\n", + "\n", + "6. **Objective vs feasibility trade-off.** If your model produces designs with\n", + " great compliance but poor volume-fraction adherence, is that progress or a\n", + " failure? How would you communicate this nuance in a benchmark table?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Reflection: what did you learn in NB02?\n", + "\n", + "Before closing, write down your answers to these prompts:\n", + "\n", + "1. **What do the metrics tell you about your model?** Look at your summary table.\n", + " Where does the generator excel, and where does it fall short? Which metric\n", + " surprised you most?\n", + "\n", + "2. **Which visualisation was most informative?** Was it the residual heatmaps,\n", + " the PCA embedding, the optimization trajectories, or something else? Why?\n", + "\n", + "3. **What would a full benchmark study add?** A complete EngiBench evaluation\n", + " would test across multiple problems, multiple seeds, larger sample sizes, and\n", + " the full metric suite (MMD, DPP, IOG/COG/FOG, violation ratio). How would\n", + " that change your confidence in the conclusions?\n", + "\n", + "4. **How would you improve the generator?** Based on the diagnostic pattern you\n", + " see (which categories are strong vs weak), what would you change about the\n", + " model architecture, training procedure, or data pipeline?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "## Troubleshooting\n", + "\n", + "If a section fails, do not continue downstream. Fix the failing cell first, then\n", + "rerun it and its checkpoint before moving on. The notebook is staged so that\n", + "failures are localised." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.10.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb b/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb new file mode 100644 index 00000000..86c1b18f --- /dev/null +++ b/workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb @@ -0,0 +1,790 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Notebook 03 (Solution): Add a New Problem to EngiBench\n", + "\n", + "**Time budget: ~25 minutes** | 3 fill-in exercises | Mostly guided walkthrough\n", + "\n", + "In this notebook you will see how to wrap a **new simulator** as an EngiBench `Problem`,\n", + "so that every model in EngiOpt can immediately train on it with zero code changes.\n", + "\n", + "We will build a **planar 2-link robot manipulator co-design problem**: choose link\n", + "lengths, motor strength, and control gains so the arm reaches a target with minimal\n", + "tracking error and energy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Edit-safe start:** this notebook opens from GitHub in read-only source mode. Use **File -> Save a copy in Drive** before running edits so your changes stay in your own workspace." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Notebook map\n", + "\n", + "This notebook is a **guided walkthrough** with 3 small fill-in exercises.\n", + "Most code is pre-written -- your job is to **read, run, and understand** the\n", + "EngiBench Problem contract, then fill in 3 targeted methods.\n", + "\n", + "### Public exercise legend\n", + "- `PUBLIC FILL-IN CELL`: implement this method (skeleton + hints provided).\n", + "- `CHECKPOINT`: run and verify before continuing.\n", + "- Pre-written cells: read and run -- these are fully working code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The problem: Planar manipulator co-design\n", + "\n", + "Imagine a simple robot arm bolted to a table. It has **two rigid links**\n", + "connected by revolute joints, and it needs to reach a target point in 2D space.\n", + "\n", + "```\n", + " target\n", + " X (target_x, target_y)\n", + " /\n", + " / link 2 (length l2)\n", + " /\n", + " joint 2\n", + " /\n", + " / link 1 (length l1)\n", + " /\n", + " joint 1\n", + " *------------ table / base\n", + "```\n", + "\n", + "**What we design** (the design vector, 6 variables):\n", + "\n", + "| Index | Variable | Range | Meaning |\n", + "|-------|----------|-------|---------|\n", + "| 0 | `link1_m` | 0.25 -- 1.00 | Length of link 1 (meters) |\n", + "| 1 | `link2_m` | 0.20 -- 0.95 | Length of link 2 (meters) |\n", + "| 2 | `motor_strength` | 2.0 -- 30.0 | Motor torque multiplier |\n", + "| 3 | `kp` | 5.0 -- 120.0 | Proportional control gain |\n", + "| 4 | `kd` | 0.2 -- 18.0 | Derivative control gain |\n", + "| 5 | `damping` | 0.0 -- 1.5 | Joint damping coefficient |\n", + "\n", + "**Conditions** (set by the environment, not the designer):\n", + "- `target_x`, `target_y`: where the arm must reach\n", + "- `payload_kg`: mass at the end-effector\n", + "- `disturbance_scale`: random torque noise during simulation\n", + "\n", + "**Objectives** (both minimized):\n", + "1. `final_tracking_error_m`: how far the end-effector is from the target at the end\n", + "2. `actuation_energy_j`: total energy spent by the motors\n", + "\n", + "**Why this is a co-design problem**: we are simultaneously choosing the *hardware*\n", + "(link lengths, motor) and the *controller* (gains, damping). This is exactly the\n", + "kind of coupled design problem where generative models can help explore the space." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The EngiBench Problem contract\n", + "\n", + "Every problem in EngiBench implements the same interface. This is what makes it\n", + "possible to train **any** EngiOpt model on **any** problem with zero model code changes.\n", + "\n", + "The key pieces:\n", + "\n", + "| Attribute / Method | Purpose |\n", + "|---|---|\n", + "| `design_space` | A `gymnasium.spaces.Box` defining valid designs |\n", + "| `objectives` | Tuple of `(name, direction)` pairs |\n", + "| `conditions` | Dataclass of environmental conditions |\n", + "| `design_constraints` | List of constraint functions |\n", + "| `check_constraints(design, config)` | Returns list of violations (empty = feasible) |\n", + "| `simulate(design, config)` | Runs the simulator, returns objective values |\n", + "| `optimize(start, config)` | Simple optimizer, returns `(best_design, history)` |\n", + "| `render(design)` | Visualization for human inspection |\n", + "| `random_design()` | Sample a random valid design |\n", + "\n", + "In this notebook, most of these are **pre-written**. You will fill in 3 methods\n", + "that test your understanding of the contract." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Colab/local dependency bootstrap\n", + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # Set True to force install outside Colab\n", + "\n", + "\n", + "def pip_install(packages: list[str]):\n", + " cmd = [sys.executable, \"-m\", \"pip\", \"install\", *packages]\n", + " print(\"Running:\", \" \".join(cmd))\n", + " subprocess.check_call(cmd)\n", + "\n", + "\n", + "BASE_PACKAGES = [\"engibench[all]\", \"matplotlib\", \"gymnasium\", \"pybullet\"]\n", + "ENGIOPT_GIT = \"git+https://github.com/IDEALLab/EngiOpt.git@codex/dcc26-workshop-notebooks#egg=engiopt\"\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " print(\"Installing dependencies...\")\n", + " pip_install(BASE_PACKAGES)\n", + " pip_install([ENGIOPT_GIT])\n", + "\n", + " try:\n", + " import torch # noqa: F401\n", + " except Exception:\n", + " pip_install([\"torch\", \"torchvision\"])\n", + "\n", + " print(\"Dependency install complete.\")\n", + "else:\n", + " print(\"Skipping install (using current environment). Set FORCE_INSTALL=True to install here.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1 -- Imports\n", + "\n", + "These are the EngiBench building blocks we need to define a Problem." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from typing import Annotated\n", + "\n", + "import numpy as np\n", + "from gymnasium import spaces\n", + "\n", + "from engibench.constraint import bounded\n", + "from engibench.constraint import constraint\n", + "from engibench.core import ObjectiveDirection\n", + "from engibench.core import OptiStep\n", + "from engibench.core import Problem\n", + "\n", + "import pybullet as p" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2 -- Build the Problem class (guided walkthrough + 3 fill-ins)\n", + "\n", + "The cell below contains the **complete** `PlanarManipulatorCoDesignProblem` class.\n", + "Most methods are pre-written and working. **Three methods** are left for you to fill in.\n", + "\n", + "Read through the pre-written code to understand the structure, then complete:\n", + "\n", + "1. **Fill-in 03-A** (`simulate`): Merge config, clip design to bounds, call the rollout. A short wrapper method.\n", + "2. **Fill-in 03-B** (`random_design`): Sample a design from the design space. Essentially a one-liner.\n", + "3. **Fill-in 03-C** (`optimize`): Wire up a simple random-perturbation search loop using the hints provided.\n", + "\n", + "The pre-written methods handle all the PyBullet complexity -- you do NOT need to\n", + "understand robotics or physics simulation to complete the exercises." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Pre-written methods tour (read before filling in)\n", + "\n", + "Here is a quick guide to the pre-written methods you will see in the class:\n", + "\n", + "- **`__init__`**: Sets up the design space (6-dim Box), conditions, and constraints.\n", + "- **`_build_robot`**: Creates a 2-link arm in PyBullet with configurable link lengths and damping.\n", + "- **`_inverse_kinematics_2link`**: Given a target (x, y), computes the joint angles using the law of cosines. Standard closed-form 2-link IK.\n", + "- **`_forward_kinematics_2link`**: Given joint angles, computes end-effector (x, y). Simple trig.\n", + "- **`_rollout`**: Runs the full PyBullet simulation -- sets up PD control to track the target, applies disturbances, records tracking error and energy at each step.\n", + "- **`optimize`**: Random search over the design space -- tries perturbations, keeps the best.\n", + "- **`render`**: 4-panel matplotlib figure showing design variables, end-effector path, tracking error, and joint torques." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class PlanarManipulatorCoDesignProblem(Problem[np.ndarray]):\n", + " \"\"\"Robotics co-design problem: choose arm geometry + controller to reach a target.\n", + "\n", + " This wraps a PyBullet physics simulation as an EngiBench Problem so that\n", + " any EngiOpt generative model can train on it.\n", + " \"\"\"\n", + "\n", + " version = 0\n", + " objectives = (\n", + " (\"final_tracking_error_m\", ObjectiveDirection.MINIMIZE),\n", + " (\"actuation_energy_j\", ObjectiveDirection.MINIMIZE),\n", + " )\n", + "\n", + " @dataclass\n", + " class Conditions:\n", + " target_x: Annotated[float, bounded(lower=0.20, upper=1.35)] = 0.85\n", + " target_y: Annotated[float, bounded(lower=0.05, upper=1.20)] = 0.45\n", + " payload_kg: Annotated[float, bounded(lower=0.0, upper=2.0)] = 0.8\n", + " disturbance_scale: Annotated[float, bounded(lower=0.0, upper=0.30)] = 0.05\n", + "\n", + " @dataclass\n", + " class Config(Conditions):\n", + " sim_steps: Annotated[int, bounded(lower=60, upper=1200)] = 240\n", + " dt: Annotated[float, bounded(lower=1e-4, upper=0.05)] = 1.0 / 120.0\n", + " torque_limit: Annotated[float, bounded(lower=1.0, upper=50.0)] = 12.0\n", + " max_iter: Annotated[int, bounded(lower=1, upper=300)] = 60\n", + "\n", + " dataset_id = \"IDEALLab/planar_manipulator_codesign_v0\" # placeholder\n", + " container_id = None\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # __init__ (pre-written)\n", + " # ------------------------------------------------------------------ #\n", + " def __init__(self, seed: int = 0, **kwargs):\n", + " super().__init__(seed=seed)\n", + " self.config = self.Config(**kwargs)\n", + " self.conditions = self.Conditions(\n", + " target_x=self.config.target_x,\n", + " target_y=self.config.target_y,\n", + " payload_kg=self.config.payload_kg,\n", + " disturbance_scale=self.config.disturbance_scale,\n", + " )\n", + "\n", + " # Design vector = [link1_m, link2_m, motor_strength, kp, kd, damping]\n", + " self.design_space = spaces.Box(\n", + " low=np.array([0.25, 0.20, 2.0, 5.0, 0.2, 0.0], dtype=np.float32),\n", + " high=np.array([1.00, 0.95, 30.0, 120.0, 18.0, 1.5], dtype=np.float32),\n", + " dtype=np.float32,\n", + " )\n", + "\n", + " # --- Constraints ------------------------------------------------\n", + " # These use the @constraint decorator from EngiBench.\n", + " # A constraint function receives (design, **config_kwargs).\n", + " # It should ASSERT what must be true. If the assert fails,\n", + " # check_constraints() catches it and reports a violation.\n", + "\n", + " @constraint\n", + " def reachable_workspace(design: np.ndarray, target_x: float, target_y: float, **_) -> None:\n", + " l1, l2 = float(design[0]), float(design[1])\n", + " r = float(np.sqrt(target_x**2 + target_y**2))\n", + " assert l1 + l2 >= r + 0.03, f\"target radius {r:.3f} exceeds reach {l1 + l2:.3f}\"\n", + "\n", + " @constraint\n", + " def gain_consistency(design: np.ndarray, **_) -> None:\n", + " kp, kd = float(design[3]), float(design[4])\n", + " assert kd <= 2.2 * np.sqrt(max(kp, 1e-6)), f\"kd={kd:.3f} too high for kp={kp:.3f}\"\n", + "\n", + " self.design_constraints = [reachable_workspace, gain_consistency]\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _build_robot (pre-written -- PyBullet internals)\n", + " # ------------------------------------------------------------------ #\n", + " def _build_robot(self, l1: float, l2: float, payload_kg: float, damping: float) -> tuple[int, int]:\n", + " \"\"\"Create a 2-link planar arm in PyBullet. Returns (robot_id, ee_link_index).\"\"\"\n", + " p.resetSimulation()\n", + " p.setGravity(0, 0, -9.81)\n", + "\n", + " link_masses = [0.5 + 0.2 * payload_kg, 0.35 + 0.25 * payload_kg]\n", + " link_collision = [-1, -1]\n", + " link_visual = [\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.025, length=l1, rgbaColor=[0.2, 0.5, 0.9, 1.0]),\n", + " p.createVisualShape(p.GEOM_CAPSULE, radius=0.020, length=l2, rgbaColor=[0.9, 0.4, 0.2, 1.0]),\n", + " ]\n", + " qx = p.getQuaternionFromEuler([0.0, np.pi / 2.0, 0.0])\n", + "\n", + " robot = p.createMultiBody(\n", + " baseMass=0.0,\n", + " baseCollisionShapeIndex=-1,\n", + " baseVisualShapeIndex=-1,\n", + " basePosition=[0, 0, 0],\n", + " linkMasses=link_masses,\n", + " linkCollisionShapeIndices=link_collision,\n", + " linkVisualShapeIndices=link_visual,\n", + " linkPositions=[[0, 0, 0], [l1, 0, 0]],\n", + " linkOrientations=[qx, qx],\n", + " linkInertialFramePositions=[[l1 / 2.0, 0, 0], [l2 / 2.0, 0, 0]],\n", + " linkInertialFrameOrientations=[[0, 0, 0, 1], [0, 0, 0, 1]],\n", + " linkParentIndices=[0, 1],\n", + " linkJointTypes=[p.JOINT_REVOLUTE, p.JOINT_REVOLUTE],\n", + " linkJointAxis=[[0, 0, 1], [0, 0, 1]],\n", + " )\n", + "\n", + " for j in [0, 1]:\n", + " p.changeDynamics(robot, j, linearDamping=0.0, angularDamping=float(damping))\n", + "\n", + " return robot, 1\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _inverse_kinematics_2link (pre-written -- standard 2-link IK)\n", + " # ------------------------------------------------------------------ #\n", + " def _inverse_kinematics_2link(self, x: float, y: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Closed-form IK for a 2-link planar arm using the law of cosines.\"\"\"\n", + " r2 = x * x + y * y\n", + " c2 = (r2 - l1 * l1 - l2 * l2) / (2.0 * l1 * l2)\n", + " c2 = float(np.clip(c2, -1.0, 1.0))\n", + " s2 = float(np.sqrt(max(0.0, 1.0 - c2 * c2)))\n", + " q2 = float(np.arctan2(s2, c2))\n", + " q1 = float(np.arctan2(y, x) - np.arctan2(l2 * s2, l1 + l2 * c2))\n", + " return q1, q2\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _forward_kinematics_2link (pre-written -- simple trig)\n", + " # ------------------------------------------------------------------ #\n", + " def _forward_kinematics_2link(self, q1: float, q2: float, l1: float, l2: float) -> tuple[float, float]:\n", + " \"\"\"Compute end-effector (x, y) from joint angles and link lengths.\"\"\"\n", + " x = l1 * np.cos(q1) + l2 * np.cos(q1 + q2)\n", + " y = l1 * np.sin(q1) + l2 * np.sin(q1 + q2)\n", + " return float(x), float(y)\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # _rollout (pre-written -- runs the full PyBullet simulation)\n", + " # ------------------------------------------------------------------ #\n", + " def _rollout(self, design: np.ndarray, cfg: dict, return_trace: bool = False):\n", + " \"\"\"Run PyBullet simulation with PD control. Returns objective vector.\"\"\"\n", + " l1, l2, motor_strength, kp, kd, damping = [float(v) for v in design]\n", + "\n", + " cid = p.connect(p.DIRECT)\n", + " try:\n", + " robot, _ = self._build_robot(l1, l2, cfg[\"payload_kg\"], damping)\n", + " q1_t, q2_t = self._inverse_kinematics_2link(cfg[\"target_x\"], cfg[\"target_y\"], l1, l2)\n", + "\n", + " err_trace = []\n", + " tau_trace = []\n", + " ee_trace = []\n", + " energy = 0.0\n", + "\n", + " for _step in range(int(cfg[\"sim_steps\"])):\n", + " for j, q_t in enumerate([q1_t, q2_t]):\n", + " p.setJointMotorControl2(\n", + " bodyUniqueId=robot,\n", + " jointIndex=j,\n", + " controlMode=p.POSITION_CONTROL,\n", + " targetPosition=q_t,\n", + " positionGain=float(kp) / 120.0,\n", + " velocityGain=float(kd) / 50.0,\n", + " force=float(cfg[\"torque_limit\"]) * float(motor_strength),\n", + " )\n", + "\n", + " if cfg[\"disturbance_scale\"] > 0:\n", + " disturb = self.np_random.normal(0.0, cfg[\"disturbance_scale\"], size=2)\n", + " p.applyExternalTorque(robot, 0, [0, 0, float(disturb[0])], p.LINK_FRAME)\n", + " p.applyExternalTorque(robot, 1, [0, 0, float(disturb[1])], p.LINK_FRAME)\n", + "\n", + " p.stepSimulation()\n", + "\n", + " js0 = p.getJointState(robot, 0)\n", + " js1 = p.getJointState(robot, 1)\n", + " q1, q2 = float(js0[0]), float(js1[0])\n", + " dq1, dq2 = float(js0[1]), float(js1[1])\n", + " tau1, tau2 = float(js0[3]), float(js1[3])\n", + "\n", + " ee_x, ee_y = self._forward_kinematics_2link(q1, q2, l1, l2)\n", + " err = float(np.sqrt((ee_x - cfg[\"target_x\"]) ** 2 + (ee_y - cfg[\"target_y\"]) ** 2))\n", + "\n", + " err_trace.append(err)\n", + " tau_trace.append((tau1, tau2))\n", + " ee_trace.append((ee_x, ee_y))\n", + " energy += (abs(tau1 * dq1) + abs(tau2 * dq2)) * float(cfg[\"dt\"])\n", + "\n", + " final_error = float(err_trace[-1])\n", + " obj = np.array([final_error, float(energy)], dtype=np.float32)\n", + "\n", + " if return_trace:\n", + " trace = {\n", + " \"ee_trace\": np.array(ee_trace, dtype=np.float32),\n", + " \"err_trace\": np.array(err_trace, dtype=np.float32),\n", + " \"tau_trace\": np.array(tau_trace, dtype=np.float32),\n", + " \"target\": np.array([cfg[\"target_x\"], cfg[\"target_y\"]], dtype=np.float32),\n", + " \"design\": np.array(design, dtype=np.float32),\n", + " \"objectives\": obj,\n", + " }\n", + " return obj, trace\n", + "\n", + " return obj\n", + " finally:\n", + " p.disconnect(cid)\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-A: simulate\n", + " # ================================================================== #\n", + " def simulate(self, design: np.ndarray, config: dict | None = None) -> np.ndarray:\n", + " \"\"\"Run the simulator and return objective values.\n", + "\n", + " This is the main entry point that EngiOpt models call.\n", + " It should:\n", + " 1. Merge self.config defaults with any overrides from `config`\n", + " 2. Clip the design to the valid bounds\n", + " 3. Call self._rollout() and return the result\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " clipped = np.clip(design, self.design_space.low, self.design_space.high)\n", + " return self._rollout(clipped, cfg, return_trace=False)\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-B: random_design\n", + " # ================================================================== #\n", + " # Note: check_constraints() is inherited from Problem. It calls each\n", + " # function in self.design_constraints and collects assertion failures.\n", + " # The constraints are defined in __init__ above -- look at them!\n", + " #\n", + " # This fill-in is about random_design(), which is used by the optimizer\n", + " # and by dataset generation to sample starting points.\n", + "\n", + " def random_design(self):\n", + " \"\"\"Return (design, reward) where design is sampled uniformly from bounds.\n", + "\n", + " Convention: reward = -1 (dummy value, since we have not simulated yet).\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " design = self.np_random.uniform(self.design_space.low, self.design_space.high).astype(np.float32)\n", + " return design, -1.0\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ================================================================== #\n", + " # PUBLIC FILL-IN CELL 03-C: optimize\n", + " # ================================================================== #\n", + " def optimize(self, starting_point: np.ndarray, config: dict | None = None):\n", + " \"\"\"Simple random-perturbation optimizer.\n", + "\n", + " Returns (best_design, history) where history is a list of OptiStep.\n", + " Each OptiStep records the best objective values seen so far at that step.\n", + "\n", + " Algorithm:\n", + " 1. Start from starting_point, evaluate it\n", + " 2. For each iteration: perturb the best design with Gaussian noise,\n", + " clip to bounds, check constraints, simulate, keep if better\n", + " 3. \"Better\" = lower score, where score = error + 0.02 * energy\n", + " \"\"\"\n", + " # START FILL -------------------------------------------------------\n", + " cfg = {**self.config.__dict__, **(config or {})}\n", + " x = np.clip(starting_point, self.design_space.low, self.design_space.high).astype(np.float32)\n", + " best = x.copy()\n", + " best_obj = self.simulate(best, cfg)\n", + " best_score = float(best_obj[0] + 0.02 * best_obj[1])\n", + " history = [OptiStep(obj_values=best_obj, step=0)]\n", + " step_scale = np.array([0.05, 0.05, 2.5, 8.0, 1.2, 0.08], dtype=np.float32)\n", + "\n", + " for i in range(int(cfg.get(\"max_iter\", 60))):\n", + " candidate = best + self.np_random.normal(size=best.shape).astype(np.float32) * step_scale\n", + " candidate = np.clip(candidate, self.design_space.low, self.design_space.high)\n", + " violations = self.check_constraints(candidate, config=cfg)\n", + " if violations:\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + " continue\n", + " cand_obj = self.simulate(candidate, cfg)\n", + " cand_score = float(cand_obj[0] + 0.02 * cand_obj[1])\n", + " if cand_score < best_score:\n", + " best = candidate.copy()\n", + " best_obj = cand_obj\n", + " best_score = cand_score\n", + " history.append(OptiStep(obj_values=best_obj, step=i + 1))\n", + "\n", + " return best, history\n", + " # END FILL ---------------------------------------------------------\n", + "\n", + " # ------------------------------------------------------------------ #\n", + " # render (pre-written -- 4-panel visualization)\n", + " # ------------------------------------------------------------------ #\n", + " def render(self, design: np.ndarray, *, open_window: bool = False):\n", + " \"\"\"Create a 4-panel diagnostic figure for a given design.\"\"\"\n", + " import matplotlib.pyplot as plt\n", + "\n", + " cfg = self.config.__dict__\n", + " x = np.clip(design.astype(np.float32), self.design_space.low, self.design_space.high)\n", + " obj, trace = self._rollout(x, cfg, return_trace=True)\n", + "\n", + " ee = trace[\"ee_trace\"]\n", + " err = trace[\"err_trace\"]\n", + " target = trace[\"target\"]\n", + " tau = trace[\"tau_trace\"]\n", + "\n", + " fig, axes = plt.subplots(1, 4, figsize=(17, 4.2))\n", + "\n", + " labels = [\"link1\", \"link2\", \"motor\", \"kp\", \"kd\", \"damping\"]\n", + " axes[0].bar(labels, x, color=[\"#4c78a8\", \"#4c78a8\", \"#f58518\", \"#54a24b\", \"#e45756\", \"#72b7b2\"])\n", + " axes[0].set_title(\"Design variables\")\n", + " axes[0].tick_params(axis=\"x\", rotation=35)\n", + "\n", + " axes[1].plot(ee[:, 0], ee[:, 1], lw=2, label=\"end-effector path\")\n", + " axes[1].scatter([target[0]], [target[1]], c=\"red\", marker=\"x\", s=70, label=\"target\")\n", + " r = x[0] + x[1]\n", + " circle = plt.Circle((0, 0), r, color=\"gray\", fill=False, linestyle=\"--\", alpha=0.5)\n", + " axes[1].add_patch(circle)\n", + " axes[1].set_aspect(\"equal\", \"box\")\n", + " axes[1].set_title(\"Task-space trajectory\")\n", + " axes[1].set_xlabel(\"x [m]\")\n", + " axes[1].set_ylabel(\"y [m]\")\n", + " axes[1].legend(fontsize=8)\n", + "\n", + " axes[2].plot(err, color=\"#e45756\")\n", + " axes[2].set_title(\"Tracking error over time\")\n", + " axes[2].set_xlabel(\"step\")\n", + " axes[2].set_ylabel(\"error [m]\")\n", + " axes[2].grid(alpha=0.3)\n", + "\n", + " axes[3].plot(np.abs(tau[:, 0]), label=\"|tau1|\")\n", + " axes[3].plot(np.abs(tau[:, 1]), label=\"|tau2|\")\n", + " axes[3].set_title(\"Actuation effort\")\n", + " axes[3].set_xlabel(\"step\")\n", + " axes[3].set_ylabel(\"torque [Nm]\")\n", + " axes[3].legend(fontsize=8)\n", + " axes[3].grid(alpha=0.3)\n", + "\n", + " fig.suptitle(\n", + " f\"Objectives: final_error={obj[0]:.4f} m, energy={obj[1]:.3f} J\",\n", + " y=1.03,\n", + " )\n", + " fig.tight_layout()\n", + "\n", + " if open_window:\n", + " plt.show()\n", + " return fig, axes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CHECKPOINT: Quick sanity check before the smoke test\n", + "\n", + "Run this cell to verify the class can be instantiated and the pre-written\n", + "parts work. This does NOT require your fill-ins yet." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# CHECKPOINT -- class instantiation (does not call your fill-ins)\n", + "prob_test = PlanarManipulatorCoDesignProblem(seed=0)\n", + "print(\"design_space:\", prob_test.design_space)\n", + "print(\"objectives:\", prob_test.objectives)\n", + "print(\"num constraints:\", len(prob_test.design_constraints))\n", + "print(\"conditions:\", prob_test.conditions)\n", + "print()\n", + "print(\"Class instantiation OK.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 3 -- Smoke test\n", + "\n", + "Run this after completing **all 3 fill-ins** above.\n", + "\n", + "What success looks like:\n", + "- Non-empty optimization history\n", + "- Finite objective values (no NaN or Inf)\n", + "- A 4-panel figure renders without error\n", + "\n", + "**How to read the 4-panel figure**: Inspect the panels for (1) design parameter\n", + "values, (2) the end-effector path in task space with the target marked,\n", + "(3) tracking error decreasing over simulation steps, and (4) joint torque\n", + "profiles showing actuation effort." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Smoke test (run after implementing all 3 PUBLIC FILL-IN blocks)\n", + "problem = PlanarManipulatorCoDesignProblem(\n", + " seed=42,\n", + " target_x=0.9,\n", + " target_y=0.45,\n", + " payload_kg=0.8,\n", + " disturbance_scale=0.04,\n", + " sim_steps=220,\n", + " max_iter=40,\n", + ")\n", + "start, _ = problem.random_design()\n", + "\n", + "cfg = {\n", + " \"target_x\": 0.9,\n", + " \"target_y\": 0.45,\n", + " \"payload_kg\": 0.8,\n", + " \"disturbance_scale\": 0.04,\n", + " \"sim_steps\": 220,\n", + " \"dt\": 1.0 / 120.0,\n", + " \"torque_limit\": 12.0,\n", + " \"max_iter\": 40,\n", + "}\n", + "\n", + "print(\"design space:\", problem.design_space)\n", + "print(\"objectives:\", problem.objectives)\n", + "print(\"conditions:\", problem.conditions)\n", + "\n", + "viol = problem.check_constraints(start, config=cfg)\n", + "print(\"constraint violations:\", len(viol))\n", + "\n", + "obj0 = problem.simulate(start, config=cfg)\n", + "opt_design, history = problem.optimize(start, config=cfg)\n", + "objf = problem.simulate(opt_design, config=cfg)\n", + "\n", + "print(\"initial objectives [tracking_error_m, energy_J]:\", obj0.tolist())\n", + "print(\"final objectives [tracking_error_m, energy_J]:\", objf.tolist())\n", + "print(\"optimization steps:\", len(history))\n", + "\n", + "# CHECKPOINT\n", + "assert len(history) > 0, \"Optimization history should not be empty\"\n", + "assert np.all(np.isfinite(obj0)), \"Initial objective contains non-finite values\"\n", + "assert np.all(np.isfinite(objf)), \"Final objective contains non-finite values\"\n", + "print(\"All assertions passed.\")\n", + "\n", + "problem.render(opt_design)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The power of a standardized interface\n", + "\n", + "Notice what just happened: we wrapped a completely new simulator (PyBullet robotics)\n", + "as an EngiBench `Problem`, and it exposes the same interface as `beams2d`,\n", + "`heatconduction2d`, or any other problem in the benchmark.\n", + "\n", + "This means that **every generative model in EngiOpt** -- the CGAN you trained in\n", + "Notebook 01, the diffusion models, the VAEs -- could be trained on this manipulator\n", + "problem **with zero model code changes**. You would only need to point the training\n", + "script at the new problem ID.\n", + "\n", + "That is the core value proposition of EngiBench: **decouple the problem from the\n", + "method** so researchers can focus on one or the other without rewriting glue code." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contributing to EngiBench: what you need\n", + "\n", + "If you have an engineering problem from your own domain that you would like to\n", + "contribute to the benchmark, here is the checklist:\n", + "\n", + "1. **Design space**: Define a `gymnasium.spaces.Box` (or `Dict`) for the design variables, with physically meaningful bounds.\n", + "\n", + "2. **Simulator**: Implement `simulate(design, config) -> objective_values`. This is the core -- it maps a design to measurable performance. Must be deterministic for a given seed.\n", + "\n", + "3. **Constraints**: Define constraint functions using the `@constraint` decorator. Each should `assert` what must be true for a design to be feasible.\n", + "\n", + "4. **Dataset**: Generate a dataset of (design, conditions, objectives) tuples and host it on HuggingFace. This is what generative models train on.\n", + "\n", + "5. **Render method**: A visualization that helps humans interpret designs. Not strictly required for training, but essential for debugging and papers.\n", + "\n", + "6. **Metadata**: Version number, objective names and directions, condition ranges, and a docstring explaining the problem physics.\n", + "\n", + "See the [EngiBench contribution guide](https://github.com/IDEALLab/EngiBench) for the full template and review process." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Takeaways\n", + "\n", + "Before closing, reflect on these questions:\n", + "\n", + "1. **What are the minimum requirements** for adding a new problem to EngiBench? Which methods and attributes are essential vs. nice-to-have?\n", + "\n", + "2. **Which part of the Problem interface** was most intuitive? Which was least intuitive? (For example: design_space, constraints, simulate, render, optimize...)\n", + "\n", + "3. **What engineering problem from YOUR domain** could you contribute as a benchmark? What would the design vector look like? What would you simulate?" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Optional extension -- Train an EngiOpt model on this problem\n", + "\n", + "The solutions notebook contains a full optional extension that:\n", + "\n", + "1. Generates a feasible dataset from simulator rollouts\n", + "2. Trains `engiopt.cgan_1d` (the same model architecture from Notebook 01) on the manipulator problem\n", + "3. Compares generated designs vs. a random baseline\n", + "\n", + "This demonstrates the key point: because our manipulator problem uses the standard\n", + "EngiBench interface, we can reuse EngiOpt model code directly.\n", + "\n", + "To try it yourself, see the **solutions notebook**:\n", + "`workshops/dcc26/solutions/03_add_new_problem_scaffold.ipynb`\n", + "\n", + "The essential idea in ~10 lines of pseudocode:\n", + "\n", + "```python\n", + "# 1. Generate dataset\n", + "for _ in range(N_SAMPLES):\n", + " design, _ = problem.random_design()\n", + " if problem.check_constraints(design, cfg) == []:\n", + " obj = problem.simulate(design, cfg)\n", + " dataset.append((design, conditions, obj))\n", + "\n", + "# 2. Train CGAN on top-performing designs\n", + "generator = cgan1d.Generator(latent_dim=8, n_conds=4, design_shape=(6,), ...)\n", + "# ... standard GAN training loop ...\n", + "\n", + "# 3. Generate + evaluate\n", + "new_design = generator(z, conditions)\n", + "obj = problem.simulate(new_design, cfg) # same interface!\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Troubleshooting\n", + "\n", + "- **`NotImplementedError`**: You have not yet filled in one of the 3 exercises. Check `simulate()`, `random_design()`, and `optimize()`.\n", + "- **`AssertionError` in smoke test**: Your fill-in runs but produces incorrect values. Re-read the hints in the `# START FILL` block.\n", + "- **PyBullet connection error**: Make sure `pybullet` is installed. On Colab, the bootstrap cell handles this.\n", + "- **If a section fails, do not continue downstream.** Fix locally first, then rerun." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + }, + "accelerator": "GPU", + "colab": { + "provenance": [], + "gpuType": "T4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/workshops/dcc26/solutions/04_heat_exchanger_design_problem.ipynb b/workshops/dcc26/solutions/04_heat_exchanger_design_problem.ipynb new file mode 100644 index 00000000..d2f411c6 --- /dev/null +++ b/workshops/dcc26/solutions/04_heat_exchanger_design_problem.ipynb @@ -0,0 +1,820 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "29dd7a28", + "metadata": {}, + "source": [ + "\n", + "\n", + "# Notebook 04 Solution - Wrapping a Heat Exchanger Design Problem\n", + "\n", + "**Capstone idea:** take a small, recognizable engineering simulator and wrap it in the same interface you used in the earlier notebooks.\n", + "\n", + "In Notebooks 00-02, `beams2d` was already packaged for you. Here we build a new benchmark-shaped problem from scratch: a compact counterflow heat exchanger.\n", + "\n", + "> Colab users: click **File -> Save a copy in Drive** before editing so your changes persist.\n" + ] + }, + { + "cell_type": "markdown", + "id": "f2dd9fcf", + "metadata": {}, + "source": [ + "## Where we are in the workshop\n", + "\n", + "The earlier notebooks used an existing EngiBench problem. This notebook flips the direction: instead of asking \"how do I train and evaluate a model on a benchmark?\", we ask:\n", + "\n", + "**What does a simulator need before it can become a reusable benchmark?**\n", + "\n", + "A heat exchanger is a good capstone because it is not another topology-optimization image. The design is a small vector of geometric choices, the physics is thermal-fluid system performance, and the constraints are things engineers actually care about: heat duty, pressure drop, size, and manufacturability.\n" + ] + }, + { + "cell_type": "markdown", + "id": "dfbaa22a", + "metadata": {}, + "source": [ + "## The problem: compact heat exchanger sizing\n", + "\n", + "Imagine a colleague says:\n", + "\n", + "> I have a hot stream and a cold stream. I need at least 5 kW of heat transfer, but I cannot allow more than 35 kPa pressure drop on the cold side. Can an optimizer or generative model propose useful exchanger geometries?\n", + "\n", + "We will use a deliberately small design vector:\n", + "\n", + "| Design variable | Meaning | Typical effect |\n", + "|---|---|---|\n", + "| `tube_diameter_m` | inner tube diameter | larger diameter lowers pressure drop but can lower velocity and heat transfer coefficient |\n", + "| `tube_length_m` | length of each tube | more area, more pressure drop |\n", + "| `n_tubes` | number of parallel tubes | more area and lower velocity, but larger/costlier exchanger |\n", + "\n", + "The simulator estimates heat transfer with the effectiveness-NTU method and pressure drop with a Darcy friction-factor model. When `ht` and `fluids` are installed, we use their implementations for those two standard engineering calculations. If not, the notebook falls back to the same textbook formulas so the story still runs.\n" + ] + }, + { + "cell_type": "markdown", + "id": "6dd4af6d", + "metadata": {}, + "source": [ + "## Install dependencies (Colab / fresh env only)\n", + "\n", + "The notebook is self-contained, but Colab should install `ht` and `fluids` so we demonstrate wrapping real Python engineering libraries.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a1199faf", + "metadata": {}, + "outputs": [], + "source": [ + "import subprocess\n", + "import sys\n", + "\n", + "IN_COLAB = \"google.colab\" in sys.modules\n", + "FORCE_INSTALL = False # set True to force install in a local notebook runtime\n", + "\n", + "if IN_COLAB or FORCE_INSTALL:\n", + " def _pip(pkgs):\n", + " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *pkgs])\n", + "\n", + " _pip([\"numpy\", \"pandas\", \"matplotlib\", \"scipy\", \"ht\", \"fluids\"])\n", + " print(\"Install complete. If imports fail, restart the runtime and rerun from the top.\")\n", + "else:\n", + " print(\"Using current environment. Set FORCE_INSTALL=True to install optional libraries here.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81e3012b", + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import annotations\n", + "\n", + "from dataclasses import dataclass\n", + "from types import SimpleNamespace\n", + "import math\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "try:\n", + " from ht.hx import NTU_from_UA, effectiveness_from_NTU\n", + " HT_AVAILABLE = True\n", + "except Exception:\n", + " HT_AVAILABLE = False\n", + "\n", + "try:\n", + " from fluids.friction import friction_factor\n", + " FLUIDS_AVAILABLE = True\n", + "except Exception:\n", + " FLUIDS_AVAILABLE = False\n", + "\n", + "print(\"ht available: \", HT_AVAILABLE)\n", + "print(\"fluids available: \", FLUIDS_AVAILABLE)\n" + ] + }, + { + "cell_type": "markdown", + "id": "3780e38c", + "metadata": {}, + "source": [ + "---\n", + "## 1 - What is fixed by the benchmark?\n", + "\n", + "A reusable design benchmark has to pin down a contract:\n", + "\n", + "1. **Design space:** what the algorithm is allowed to output.\n", + "2. **Conditions:** what scenario the design must work under.\n", + "3. **Simulator:** how a candidate design is scored.\n", + "4. **Constraints:** when a candidate is invalid or suspect.\n", + "5. **Baseline optimizer:** a simple reference method to compare against.\n", + "6. **Renderer:** a canonical way to inspect the design.\n", + "\n", + "The class below is intentionally small, but it has the same moving parts as an EngiBench `Problem`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7390f22e", + "metadata": {}, + "outputs": [], + "source": [ + "@dataclass\n", + "class Box:\n", + " \"\"\"Tiny stand-in for a Gymnasium Box design space.\"\"\"\n", + "\n", + " low: np.ndarray\n", + " high: np.ndarray\n", + " labels: tuple[str, ...]\n", + "\n", + " @property\n", + " def shape(self):\n", + " return self.low.shape\n", + "\n", + " def sample(self, rng):\n", + " return rng.uniform(self.low, self.high).astype(float)\n", + "\n", + " def clip(self, x):\n", + " return np.clip(np.asarray(x, dtype=float), self.low, self.high)\n", + "\n", + " def contains(self, x):\n", + " x = np.asarray(x, dtype=float)\n", + " return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\n", + "\n", + "\n", + "@dataclass\n", + "class OptiStep:\n", + " obj_values: np.ndarray\n", + " step: int\n", + " design: np.ndarray\n" + ] + }, + { + "cell_type": "markdown", + "id": "0b55f9f0", + "metadata": {}, + "source": [ + "---\n", + "## 2 - The thermal-fluid model\n", + "\n", + "This is the physics core. We use:\n", + "\n", + "- heat capacity rates: `C = mdot * cp`\n", + "- heat-transfer area: `A = pi * D * L * n_tubes`\n", + "- overall conductance: `UA = U * A`\n", + "- effectiveness-NTU method for counterflow heat exchange\n", + "- Darcy-Weisbach pressure drop on the cold side\n", + "\n", + "The design lesson is not that this is the world's most detailed exchanger model. The lesson is that a benchmark should make all assumptions explicit and executable.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91b5882c", + "metadata": {}, + "outputs": [], + "source": [ + "def _fallback_effectiveness_from_ntu(ntu: float, cr: float) -> float:\n", + " \"\"\"Counterflow heat-exchanger effectiveness from NTU and capacity ratio.\"\"\"\n", + " ntu = max(float(ntu), 0.0)\n", + " cr = min(max(float(cr), 1e-9), 0.999999)\n", + " if abs(1.0 - cr) < 1e-6:\n", + " return ntu / (1.0 + ntu)\n", + " numerator = 1.0 - math.exp(-ntu * (1.0 - cr))\n", + " denominator = 1.0 - cr * math.exp(-ntu * (1.0 - cr))\n", + " return numerator / denominator\n", + "\n", + "\n", + "def _fallback_friction_factor(re: float, relative_roughness: float) -> float:\n", + " \"\"\"Darcy friction factor: laminar exact, turbulent Haaland approximation.\"\"\"\n", + " re = max(float(re), 1e-9)\n", + " if re < 2300:\n", + " return 64.0 / re\n", + " term = (relative_roughness / 3.7) ** 1.11 + 6.9 / re\n", + " return 1.0 / (-1.8 * math.log10(term)) ** 2\n", + "\n", + "\n", + "def _ntu_from_ua(ua: float, c_min: float) -> float:\n", + " if HT_AVAILABLE:\n", + " return float(NTU_from_UA(UA=ua, Cmin=c_min))\n", + " return float(ua / max(c_min, 1e-12))\n", + "\n", + "\n", + "def _effectiveness(ntu: float, cr: float) -> float:\n", + " if HT_AVAILABLE:\n", + " return float(effectiveness_from_NTU(NTU=ntu, Cr=cr, subtype=\"counterflow\"))\n", + " return _fallback_effectiveness_from_ntu(ntu, cr)\n", + "\n", + "\n", + "def _friction_factor(re: float, eD: float) -> float:\n", + " if FLUIDS_AVAILABLE:\n", + " return float(friction_factor(Re=re, eD=eD, Darcy=True))\n", + " return _fallback_friction_factor(re, eD)\n" + ] + }, + { + "cell_type": "markdown", + "id": "d6d31f9b", + "metadata": {}, + "source": [ + "---\n", + "## 3 - Wrap the simulator as a problem\n", + "\n", + "Read this class the way you would read a real benchmark implementation. The important question is not \"do I like these exact constants?\" It is:\n", + "\n", + "**Can another lab run the same design under the same conditions and get the same metrics?**\n", + "\n", + "That is what a benchmark wrapper buys us.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "529cba00", + "metadata": {}, + "outputs": [], + "source": [ + "class HeatExchangerDesignProblem:\n", + " \"\"\"Small EngiBench-style heat-exchanger design problem.\"\"\"\n", + "\n", + " objectives = (\n", + " (\"heat_shortfall_W\", \"MINIMIZE\"),\n", + " (\"pumping_power_W\", \"MINIMIZE\"),\n", + " (\"area_m2\", \"MINIMIZE\"),\n", + " )\n", + "\n", + " design_space = Box(\n", + " low=np.array([0.006, 0.50, 2.0]),\n", + " high=np.array([0.030, 6.00, 40.0]),\n", + " labels=(\"tube_diameter_m\", \"tube_length_m\", \"n_tubes\"),\n", + " )\n", + "\n", + " default_conditions = {\n", + " \"hot_in_C\": 80.0,\n", + " \"cold_in_C\": 20.0,\n", + " \"hot_mdot_kg_s\": 0.32,\n", + " \"cold_mdot_kg_s\": 0.24,\n", + " \"required_heat_W\": 5000.0,\n", + " \"max_cold_dp_kPa\": 35.0,\n", + " \"hot_side_h_W_m2K\": 180.0,\n", + " }\n", + "\n", + " def __init__(self, seed: int = 7, **condition_overrides):\n", + " self.seed = seed\n", + " self.rng = np.random.default_rng(seed)\n", + " self.conditions = {**self.default_conditions, **condition_overrides}\n", + "\n", + " # Constant properties keep the notebook focused. A production problem\n", + " # could call CoolProp here for temperature-dependent properties.\n", + " self.cold = SimpleNamespace(rho=997.0, cp=4180.0, mu=1.0e-3, k=0.60, pr=7.0)\n", + " self.hot = SimpleNamespace(rho=850.0, cp=2200.0, mu=3.0e-3, k=0.13, pr=50.0)\n", + " self.wall_k_W_mK = 16.0\n", + " self.wall_thickness_m = 0.001\n", + " self.roughness_m = 1.5e-5\n", + " self.pump_efficiency = 0.65\n", + "\n", + " def reset(self, seed: int | None = None):\n", + " if seed is not None:\n", + " self.seed = seed\n", + " self.rng = np.random.default_rng(self.seed)\n", + "\n", + " def unpack_design(self, design):\n", + " d, length, n_tubes = self.design_space.clip(design)\n", + " return float(d), float(length), int(round(float(n_tubes)))\n", + "\n", + " def _inside_heat_transfer_coefficient(self, diameter_m, n_tubes, cold_mdot):\n", + " area_per_tube = math.pi * diameter_m**2 / 4.0\n", + " velocity = cold_mdot / (self.cold.rho * area_per_tube * max(n_tubes, 1))\n", + " reynolds = self.cold.rho * velocity * diameter_m / self.cold.mu\n", + " if reynolds < 2300:\n", + " nusselt = 3.66\n", + " else:\n", + " nusselt = 0.023 * reynolds**0.8 * self.cold.pr**0.4\n", + " h_inside = nusselt * self.cold.k / diameter_m\n", + " return h_inside, velocity, reynolds\n", + "\n", + " def simulate(self, design, config: dict | None = None) -> np.ndarray:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " diameter_m, length_m, n_tubes = self.unpack_design(design)\n", + "\n", + " h_inside, velocity, reynolds = self._inside_heat_transfer_coefficient(\n", + " diameter_m, n_tubes, cfg[\"cold_mdot_kg_s\"]\n", + " )\n", + " h_outside = float(cfg[\"hot_side_h_W_m2K\"])\n", + " u_overall = 1.0 / (\n", + " 1.0 / h_inside\n", + " + self.wall_thickness_m / self.wall_k_W_mK\n", + " + 1.0 / h_outside\n", + " )\n", + "\n", + " area_m2 = math.pi * diameter_m * length_m * n_tubes\n", + " ua = u_overall * area_m2\n", + "\n", + " c_hot = cfg[\"hot_mdot_kg_s\"] * self.hot.cp\n", + " c_cold = cfg[\"cold_mdot_kg_s\"] * self.cold.cp\n", + " c_min = min(c_hot, c_cold)\n", + " c_max = max(c_hot, c_cold)\n", + " cr = c_min / c_max\n", + " ntu = _ntu_from_ua(ua, c_min)\n", + " eps = _effectiveness(ntu, cr)\n", + "\n", + " q_max = c_min * (cfg[\"hot_in_C\"] - cfg[\"cold_in_C\"])\n", + " q_W = eps * q_max\n", + " heat_shortfall_W = max(cfg[\"required_heat_W\"] - q_W, 0.0)\n", + "\n", + " eD = self.roughness_m / diameter_m\n", + " f_darcy = _friction_factor(reynolds, eD)\n", + " minor_loss_K = 1.5\n", + " cold_dp_Pa = (f_darcy * length_m / diameter_m + minor_loss_K) * 0.5 * self.cold.rho * velocity**2\n", + " pumping_power_W = cold_dp_Pa * (cfg[\"cold_mdot_kg_s\"] / self.cold.rho) / self.pump_efficiency\n", + "\n", + " cold_out_C = cfg[\"cold_in_C\"] + q_W / c_cold\n", + " hot_out_C = cfg[\"hot_in_C\"] - q_W / c_hot\n", + "\n", + " self.last_details = {\n", + " \"diameter_m\": diameter_m,\n", + " \"length_m\": length_m,\n", + " \"n_tubes\": n_tubes,\n", + " \"area_m2\": area_m2,\n", + " \"U_W_m2K\": u_overall,\n", + " \"UA_W_K\": ua,\n", + " \"NTU\": ntu,\n", + " \"effectiveness\": eps,\n", + " \"heat_transfer_W\": q_W,\n", + " \"heat_shortfall_W\": heat_shortfall_W,\n", + " \"cold_dp_kPa\": cold_dp_Pa / 1000.0,\n", + " \"pumping_power_W\": pumping_power_W,\n", + " \"cold_velocity_m_s\": velocity,\n", + " \"cold_reynolds\": reynolds,\n", + " \"cold_out_C\": cold_out_C,\n", + " \"hot_out_C\": hot_out_C,\n", + " }\n", + "\n", + " return np.array([heat_shortfall_W, pumping_power_W, area_m2], dtype=float)\n", + "\n", + " def check_constraints(self, design, config: dict | None = None) -> list[str]:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " violations = []\n", + " x = np.asarray(design, dtype=float)\n", + " if not self.design_space.contains(x):\n", + " violations.append(\"design is outside geometry bounds\")\n", + "\n", + " self.simulate(x, cfg)\n", + " d = self.last_details\n", + " if d[\"heat_shortfall_W\"] > 1e-6:\n", + " violations.append(\"required heat duty is not met\")\n", + " if d[\"cold_dp_kPa\"] > cfg[\"max_cold_dp_kPa\"]:\n", + " violations.append(\"cold-side pressure drop exceeds limit\")\n", + " if d[\"cold_velocity_m_s\"] < 0.20:\n", + " violations.append(\"cold-side velocity is very low; fouling risk\")\n", + " if d[\"cold_velocity_m_s\"] > 3.00:\n", + " violations.append(\"cold-side velocity is high; erosion/noise risk\")\n", + " min_approach_C = 2.0\n", + " if cfg[\"hot_in_C\"] - d[\"cold_out_C\"] < min_approach_C:\n", + " violations.append(\"hot-in to cold-out terminal approach is too small\")\n", + " if d[\"hot_out_C\"] - cfg[\"cold_in_C\"] < min_approach_C:\n", + " violations.append(\"hot-out to cold-in terminal approach is too small\")\n", + " return violations\n", + "\n", + " def random_design(self):\n", + " return self.design_space.sample(self.rng), -1.0\n", + "\n", + " def score(self, design, config: dict | None = None) -> float:\n", + " cfg = {**self.conditions, **(config or {})}\n", + " obj = self.simulate(design, cfg)\n", + " d = self.last_details\n", + " pressure_penalty = max(d[\"cold_dp_kPa\"] - cfg[\"max_cold_dp_kPa\"], 0.0) / cfg[\"max_cold_dp_kPa\"]\n", + " velocity_penalty = max(0.20 - d[\"cold_velocity_m_s\"], 0.0) + max(d[\"cold_velocity_m_s\"] - 3.00, 0.0)\n", + " return (\n", + " obj[0] / cfg[\"required_heat_W\"]\n", + " + 0.02 * obj[1]\n", + " + 0.08 * obj[2]\n", + " + 10.0 * pressure_penalty\n", + " + 2.0 * velocity_penalty\n", + " )\n", + "\n", + " def optimize(self, starting_point=None, config: dict | None = None, n_candidates: int = 600):\n", + " if starting_point is None:\n", + " starting_point, _ = self.random_design()\n", + " best = self.design_space.clip(starting_point)\n", + " best_score = self.score(best, config)\n", + " best_obj = self.simulate(best, config)\n", + " history = [OptiStep(obj_values=best_obj, step=0, design=best.copy())]\n", + "\n", + " for step in range(1, n_candidates + 1):\n", + " candidate = self.design_space.sample(self.rng)\n", + " candidate_score = self.score(candidate, config)\n", + " if candidate_score < best_score:\n", + " best = candidate.copy()\n", + " best_score = candidate_score\n", + " best_obj = self.simulate(best, config)\n", + " if step % 20 == 0:\n", + " history.append(OptiStep(obj_values=best_obj.copy(), step=step, design=best.copy()))\n", + " return best, history\n", + "\n", + " def render(self, design, config: dict | None = None):\n", + " self.simulate(design, config)\n", + " d = self.last_details\n", + " labels = self.design_space.labels\n", + " values = self.design_space.clip(design)\n", + "\n", + " fig, axes = plt.subplots(1, 3, figsize=(14, 4))\n", + "\n", + " # Schematic panel\n", + " ax = axes[0]\n", + " ax.set_title(\"Counterflow tube bundle\")\n", + " ax.plot([0.08, 0.92], [0.65, 0.65], color=\"#b91c1c\", linewidth=6, solid_capstyle=\"round\")\n", + " ax.plot([0.92, 0.08], [0.35, 0.35], color=\"#2563eb\", linewidth=6, solid_capstyle=\"round\")\n", + " for y in np.linspace(0.40, 0.60, 5):\n", + " ax.plot([0.16, 0.84], [y, y], color=\"0.25\", linewidth=1.5, alpha=0.8)\n", + " ax.text(0.08, 0.74, f\"hot in {self.conditions['hot_in_C']:.0f} C\", color=\"#b91c1c\")\n", + " ax.text(0.70, 0.24, f\"cold in {self.conditions['cold_in_C']:.0f} C\", color=\"#2563eb\")\n", + " ax.text(0.08, 0.08, f\"D={d['diameter_m']*1000:.1f} mm, L={d['length_m']:.2f} m, tubes={d['n_tubes']}\")\n", + " ax.set_xlim(0, 1); ax.set_ylim(0, 1); ax.axis(\"off\")\n", + "\n", + " # Objective panel\n", + " ax = axes[1]\n", + " names = [\"Q delivered\", \"Q required\"]\n", + " vals = [d[\"heat_transfer_W\"] / 1000, self.conditions[\"required_heat_W\"] / 1000]\n", + " ax.bar(names, vals, color=[\"#0f766e\", \"#525252\"])\n", + " ax.set_ylabel(\"kW\")\n", + " ax.set_title(\"Heat duty\")\n", + " ax.grid(axis=\"y\", alpha=0.25)\n", + "\n", + " # Constraint / tradeoff panel\n", + " ax = axes[2]\n", + " names = [\"dp\", \"limit\", \"area\", \"pump\"]\n", + " vals = [d[\"cold_dp_kPa\"], self.conditions[\"max_cold_dp_kPa\"], d[\"area_m2\"], d[\"pumping_power_W\"]]\n", + " colors = [\"#7c3aed\", \"#525252\", \"#ea580c\", \"#0891b2\"]\n", + " ax.bar(names, vals, color=colors)\n", + " ax.set_title(\"Pressure, size, power\")\n", + " ax.set_ylabel(\"mixed units\")\n", + " ax.grid(axis=\"y\", alpha=0.25)\n", + "\n", + " fig.suptitle(\"HeatExchangerDesignProblem.render(design)\", fontsize=14)\n", + " fig.tight_layout()\n", + " return fig\n" + ] + }, + { + "cell_type": "markdown", + "id": "a81bb6b7", + "metadata": {}, + "source": [ + "---\n", + "## 4 - Instantiate and inspect the problem\n", + "\n", + "This is the moment where a simulator starts to feel like a benchmark: we can inspect the design space, the objectives, and the operating scenario before running any optimization.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b4af766", + "metadata": {}, + "outputs": [], + "source": [ + "problem = HeatExchangerDesignProblem(seed=4)\n", + "\n", + "print(\"Design variables:\")\n", + "for label, lo, hi in zip(problem.design_space.labels, problem.design_space.low, problem.design_space.high):\n", + " print(f\" {label:18s}: {lo:.4g} to {hi:.4g}\")\n", + "\n", + "print(\"\\nObjectives:\")\n", + "for name, direction in problem.objectives:\n", + " print(f\" {name:18s}: {direction}\")\n", + "\n", + "print(\"\\nConditions:\")\n", + "for k, v in problem.conditions.items():\n", + " print(f\" {k:18s}: {v}\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "0e2400c0", + "metadata": {}, + "source": [ + "---\n", + "## 5 - Simulate one candidate\n", + "\n", + "A benchmark is more than a dataset. It should let us ask: *if a model gives me this design, what happens under this scenario?*\n", + "\n", + "Try changing the three values below. Bigger is not always better: longer tubes and more tubes add area, but pressure drop and pumping power can fight back.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2c7df7ed", + "metadata": {}, + "outputs": [], + "source": [ + "candidate = np.array([\n", + " 0.014, # tube diameter [m]\n", + " 3.20, # tube length [m]\n", + " 14.0, # number of tubes, rounded inside simulator\n", + "])\n", + "\n", + "obj = problem.simulate(candidate)\n", + "violations = problem.check_constraints(candidate)\n", + "\n", + "print(\"Objective vector [heat_shortfall_W, pumping_power_W, area_m2]:\")\n", + "print(np.round(obj, 4))\n", + "\n", + "print(\"\\nDetails:\")\n", + "for key in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\", \"effectiveness\", \"cold_reynolds\", \"cold_velocity_m_s\", \"hot_out_C\", \"cold_out_C\"]:\n", + " print(f\" {key:18s}: {problem.last_details[key]:.4g}\")\n", + "\n", + "print(\"\\nConstraint violations:\")\n", + "print(violations if violations else \" none\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e445feca", + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(candidate)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "1843b62d", + "metadata": {}, + "source": [ + "---\n", + "## 6 - Why constraints are separate from objectives\n", + "\n", + "A design can have a small area and low pumping power because it simply fails to transfer enough heat. That is why `simulate()` and `check_constraints()` answer different questions.\n", + "\n", + "- `simulate()` says how the design performs.\n", + "- `check_constraints()` says whether the performance is acceptable for this benchmark scenario.\n", + "\n", + "This separation is exactly what made Notebook 02 useful: a design can look plausible and still fail engineering checks.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8a2735f", + "metadata": {}, + "outputs": [], + "source": [ + "examples = {\n", + " \"too small\": np.array([0.010, 0.70, 4.0]),\n", + " \"pressure-heavy\": np.array([0.0065, 5.50, 3.0]),\n", + " \"reasonable\": candidate,\n", + "}\n", + "\n", + "rows = []\n", + "for name, x in examples.items():\n", + " obj = problem.simulate(x)\n", + " rows.append({\n", + " \"case\": name,\n", + " \"D_mm\": problem.last_details[\"diameter_m\"] * 1000,\n", + " \"L_m\": problem.last_details[\"length_m\"],\n", + " \"n_tubes\": problem.last_details[\"n_tubes\"],\n", + " \"Q_kW\": problem.last_details[\"heat_transfer_W\"] / 1000,\n", + " \"shortfall_W\": obj[0],\n", + " \"dp_kPa\": problem.last_details[\"cold_dp_kPa\"],\n", + " \"pump_W\": obj[1],\n", + " \"area_m2\": obj[2],\n", + " \"violations\": \"; \".join(problem.check_constraints(x)) or \"none\",\n", + " })\n", + "\n", + "pd.DataFrame(rows)\n" + ] + }, + { + "cell_type": "markdown", + "id": "3b153a4c", + "metadata": {}, + "source": [ + "---\n", + "## 7 - A tiny baseline optimizer\n", + "\n", + "For a real EngiBench contribution, the optimizer should be documented carefully: what it optimizes, how long it runs, and whether it is meant to be strong or just a baseline.\n", + "\n", + "Here we use random search because it is transparent. The point is not that random search is clever. The point is that every benchmark needs a reference method that everyone can rerun.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0f61c42", + "metadata": {}, + "outputs": [], + "source": [ + "problem.reset(seed=12)\n", + "start, _ = problem.random_design()\n", + "best_design, history = problem.optimize(starting_point=start, n_candidates=800)\n", + "\n", + "print(\"Start design:\", dict(zip(problem.design_space.labels, np.round(start, 4))))\n", + "problem.simulate(start)\n", + "print(\"Start details:\", {k: round(problem.last_details[k], 4) for k in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\"]})\n", + "print(\"Start violations:\", problem.check_constraints(start) or \"none\")\n", + "\n", + "print(\"\\nBest design:\", dict(zip(problem.design_space.labels, np.round(best_design, 4))))\n", + "problem.simulate(best_design)\n", + "print(\"Best details:\", {k: round(problem.last_details[k], 4) for k in [\"heat_transfer_W\", \"cold_dp_kPa\", \"pumping_power_W\", \"area_m2\"]})\n", + "print(\"Best violations:\", problem.check_constraints(best_design) or \"none\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "474be227", + "metadata": {}, + "outputs": [], + "source": [ + "fig, axes = plt.subplots(1, 2, figsize=(12, 4))\n", + "\n", + "steps = [h.step for h in history]\n", + "shortfall = [h.obj_values[0] for h in history]\n", + "pump = [h.obj_values[1] for h in history]\n", + "area = [h.obj_values[2] for h in history]\n", + "\n", + "axes[0].plot(steps, shortfall, marker=\"o\", label=\"heat shortfall [W]\")\n", + "axes[0].set_xlabel(\"candidate evaluations\")\n", + "axes[0].set_ylabel(\"W\")\n", + "axes[0].set_title(\"Best heat-duty shortfall so far\")\n", + "axes[0].grid(alpha=0.25)\n", + "\n", + "axes[1].plot(steps, pump, marker=\"o\", label=\"pumping power [W]\")\n", + "axes[1].plot(steps, area, marker=\"s\", label=\"area [m2]\")\n", + "axes[1].set_xlabel(\"candidate evaluations\")\n", + "axes[1].set_title(\"Competing costs of the best design\")\n", + "axes[1].legend()\n", + "axes[1].grid(alpha=0.25)\n", + "\n", + "fig.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7432cc93", + "metadata": {}, + "outputs": [], + "source": [ + "fig = problem.render(best_design)\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "66dc54ad", + "metadata": {}, + "source": [ + "---\n", + "## 8 - Change the operating scenario\n", + "\n", + "Conditions are the input side of the benchmark. The same design can be good for one scenario and bad for another.\n", + "\n", + "This is what makes conditional design interesting: an inverse model should not just produce \"a heat exchanger\". It should produce a heat exchanger for *this* duty, *these* flow rates, and *this* pressure-drop limit.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0f8175d", + "metadata": {}, + "outputs": [], + "source": [ + "scenarios = [\n", + " {\"name\": \"base\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 35.0},\n", + " {\"name\": \"harder duty\", \"required_heat_W\": 7000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 35.0},\n", + " {\"name\": \"tight dp\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.24, \"max_cold_dp_kPa\": 15.0},\n", + " {\"name\": \"more flow\", \"required_heat_W\": 5000.0, \"cold_mdot_kg_s\": 0.40, \"max_cold_dp_kPa\": 35.0},\n", + "]\n", + "\n", + "rows = []\n", + "for scenario in scenarios:\n", + " cfg = {k: v for k, v in scenario.items() if k != \"name\"}\n", + " problem.reset(seed=30)\n", + " best, _ = problem.optimize(config=cfg, n_candidates=700)\n", + " obj = problem.simulate(best, cfg)\n", + " rows.append({\n", + " \"scenario\": scenario[\"name\"],\n", + " \"D_mm\": problem.last_details[\"diameter_m\"] * 1000,\n", + " \"L_m\": problem.last_details[\"length_m\"],\n", + " \"n_tubes\": problem.last_details[\"n_tubes\"],\n", + " \"Q_kW\": problem.last_details[\"heat_transfer_W\"] / 1000,\n", + " \"required_kW\": cfg[\"required_heat_W\"] / 1000,\n", + " \"dp_kPa\": problem.last_details[\"cold_dp_kPa\"],\n", + " \"dp_limit_kPa\": cfg[\"max_cold_dp_kPa\"],\n", + " \"area_m2\": obj[2],\n", + " \"violations\": \"; \".join(problem.check_constraints(best, cfg)) or \"none\",\n", + " })\n", + "\n", + "scenario_df = pd.DataFrame(rows)\n", + "scenario_df\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e9965a9", + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(figsize=(8, 4))\n", + "x = np.arange(len(scenario_df))\n", + "ax.bar(x - 0.2, scenario_df[\"Q_kW\"], width=0.4, label=\"delivered\")\n", + "ax.bar(x + 0.2, scenario_df[\"required_kW\"], width=0.4, label=\"required\")\n", + "ax.set_xticks(x)\n", + "ax.set_xticklabels(scenario_df[\"scenario\"], rotation=15, ha=\"right\")\n", + "ax.set_ylabel(\"heat duty [kW]\")\n", + "ax.set_title(\"Different conditions lead to different designs\")\n", + "ax.legend()\n", + "ax.grid(axis=\"y\", alpha=0.25)\n", + "fig.tight_layout()\n", + "plt.show()\n" + ] + }, + { + "cell_type": "markdown", + "id": "4b59b4eb", + "metadata": {}, + "source": [ + "---\n", + "## 9 - What would make this a real EngiBench problem?\n", + "\n", + "This notebook is a workshop wrapper, not a polished repository contribution. To turn it into a real EngiBench problem, we would still need:\n", + "\n", + "1. A module under `engibench/problems/heat_exchanger/` with a `v0.py` implementation.\n", + "2. A documented dataset of optimized designs across sampled conditions.\n", + "3. Tests that every design in the dataset simulates and passes constraints.\n", + "4. A stronger baseline optimizer and fixed evaluation budget.\n", + "5. Clear citations for the heat-transfer and pressure-drop correlations.\n", + "6. Documentation and a canonical render image.\n", + "\n", + "The important thing is that the shape is now visible. Once the simulator is wrapped, any model in EngiOpt can treat this like another conditional design problem.\n" + ] + }, + { + "cell_type": "markdown", + "id": "639d215c", + "metadata": {}, + "source": [ + "## Reflection\n", + "\n", + "Before closing the capstone, discuss:\n", + "\n", + "1. Which parts of this problem are **conditions** and which are **design variables**?\n", + "2. Is pressure drop an objective, a constraint, or both? What changes if you move it?\n", + "3. What data would you generate before training a conditional design model?\n", + "4. Which simplification in this notebook would matter most for a publication-grade benchmark?\n", + "5. How is this different from the heat-conduction topology problems already in EngiBench?\n" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.x" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/workshops/dcc26/utils/__init__.py b/workshops/dcc26/utils/__init__.py new file mode 100644 index 00000000..e9c9c076 --- /dev/null +++ b/workshops/dcc26/utils/__init__.py @@ -0,0 +1,3 @@ +"""Utilities for DCC26 workshop notebooks.""" + +from .notebook_helpers import * # noqa: F401,F403 diff --git a/workshops/dcc26/utils/notebook_helpers.py b/workshops/dcc26/utils/notebook_helpers.py new file mode 100644 index 00000000..144786c1 --- /dev/null +++ b/workshops/dcc26/utils/notebook_helpers.py @@ -0,0 +1,3 @@ +"""Backward-compatible shim for the packaged DCC26 notebook helpers.""" + +from engiopt.workshops.dcc26.notebook_helpers import * # noqa: F401,F403