From 1bd56d52520041e24a17c5152787ce990f888269 Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 11:05:53 +0100
Subject: [PATCH 1/9] Improve benchmark runner printing (#5429)
* More sensible print and run functions.
* Avoid permanent modifications in _subprocess_runner.
---
benchmarks/bm_runner.py | 73 ++++++++++++++++++++++-------------------
1 file changed, 39 insertions(+), 34 deletions(-)
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
index f3efb0ea31..9b997bc87c 100644
--- a/benchmarks/bm_runner.py
+++ b/benchmarks/bm_runner.py
@@ -32,17 +32,26 @@
)
-def _subprocess_run_print(args, **kwargs):
+def echo(echo_string: str):
# Use subprocess for printing to reduce chance of printing out of sequence
# with the subsequent calls.
- subprocess.run(["echo", f"BM_RUNNER DEBUG: {' '.join(args)}"])
+ subprocess.run(["echo", f"BM_RUNNER DEBUG: {echo_string}"])
+
+
+def _subprocess_runner(args, asv=False, **kwargs):
+ # Avoid permanent modifications if the same arguments are used more than once.
+ args = args.copy()
+ kwargs = kwargs.copy()
+ if asv:
+ args.insert(0, "asv")
+ kwargs["cwd"] = BENCHMARKS_DIR
+ echo(" ".join(args))
return subprocess.run(args, **kwargs)
-def _subprocess_run_asv(args, **kwargs):
- args.insert(0, "asv")
- kwargs["cwd"] = BENCHMARKS_DIR
- return _subprocess_run_print(args, **kwargs)
+def _subprocess_runner_capture(args, **kwargs) -> str:
+ result = _subprocess_runner(args, capture_output=True, **kwargs)
+ return result.stdout.decode()
def _check_requirements(package: str) -> None:
@@ -65,12 +74,12 @@ def _prep_data_gen_env() -> None:
python_version = "3.11"
data_gen_var = "DATA_GEN_PYTHON"
if data_gen_var in environ:
- print("Using existing data generation environment.")
+ echo("Using existing data generation environment.")
else:
- print("Setting up the data generation environment ...")
+ echo("Setting up the data generation environment ...")
# Get Nox to build an environment for the `tests` session, but don't
# run the session. Will re-use a cached environment if appropriate.
- _subprocess_run_print(
+ _subprocess_runner(
[
"nox",
f"--noxfile={root_dir / 'noxfile.py'}",
@@ -86,10 +95,10 @@ def _prep_data_gen_env() -> None:
).resolve()
environ[data_gen_var] = str(data_gen_python)
- print("Installing Mule into data generation environment ...")
+ echo("Installing Mule into data generation environment ...")
mule_dir = data_gen_python.parents[1] / "resources" / "mule"
if not mule_dir.is_dir():
- _subprocess_run_print(
+ _subprocess_runner(
[
"git",
"clone",
@@ -97,7 +106,7 @@ def _prep_data_gen_env() -> None:
str(mule_dir),
]
)
- _subprocess_run_print(
+ _subprocess_runner(
[
str(data_gen_python),
"-m",
@@ -107,7 +116,7 @@ def _prep_data_gen_env() -> None:
]
)
- print("Data generation environment ready.")
+ echo("Data generation environment ready.")
def _setup_common() -> None:
@@ -116,10 +125,10 @@ def _setup_common() -> None:
_prep_data_gen_env()
- print("Setting up ASV ...")
- _subprocess_run_asv(["machine", "--yes"])
+ echo("Setting up ASV ...")
+ _subprocess_runner(["machine", "--yes"], asv=True)
- print("Setup complete.")
+ echo("Setup complete.")
def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
@@ -132,17 +141,15 @@ def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
asv_command = (
f"compare {before} {after} --factor={COMPARE_FACTOR} --split"
)
- _subprocess_run_asv(asv_command.split(" "))
+ _subprocess_runner(asv_command.split(" "), asv=True)
if overnight_mode:
# Record performance shifts.
# Run the command again but limited to only showing performance
# shifts.
- shifts = _subprocess_run_asv(
- [*asv_command.split(" "), "--only-changed"],
- capture_output=True,
- text=True,
- ).stdout
+ shifts = _subprocess_runner_capture(
+ [*asv_command.split(" "), "--only-changed"], asv=True
+ )
if shifts:
# Write the shifts report to a file.
# Dir is used by .github/workflows/benchmarks.yml,
@@ -221,13 +228,11 @@ def func(args: argparse.Namespace) -> None:
commit_range = f"{args.first_commit}^^.."
asv_command = ASV_HARNESS.format(posargs=commit_range)
- _subprocess_run_asv([*asv_command.split(" "), *args.asv_args])
+ _subprocess_runner([*asv_command.split(" "), *args.asv_args], asv=True)
# git rev-list --first-parent is the command ASV uses.
git_command = f"git rev-list --first-parent {commit_range}"
- commit_string = _subprocess_run_print(
- git_command.split(" "), capture_output=True, text=True
- ).stdout
+ commit_string = _subprocess_runner_capture(git_command.split(" "))
commit_list = commit_string.rstrip().split("\n")
_asv_compare(*reversed(commit_list), overnight_mode=True)
@@ -260,16 +265,16 @@ def func(args: argparse.Namespace) -> None:
_setup_common()
git_command = f"git merge-base HEAD {args.base_branch}"
- merge_base = _subprocess_run_print(
- git_command.split(" "), capture_output=True, text=True
- ).stdout[:8]
+ merge_base = _subprocess_runner_capture(git_command.split(" "))[:8]
with NamedTemporaryFile("w") as hashfile:
hashfile.writelines([merge_base, "\n", "HEAD"])
hashfile.flush()
commit_range = f"HASHFILE:{hashfile.name}"
asv_command = ASV_HARNESS.format(posargs=commit_range)
- _subprocess_run_asv([*asv_command.split(" "), *args.asv_args])
+ _subprocess_runner(
+ [*asv_command.split(" "), *args.asv_args], asv=True
+ )
_asv_compare(merge_base, "HEAD")
@@ -326,14 +331,14 @@ def csperf(
asv_command = asv_command.replace(" --strict", "")
# Only do a single round.
asv_command = re.sub(r"rounds=\d", "rounds=1", asv_command)
- _subprocess_run_asv([*asv_command.split(" "), *args.asv_args])
+ _subprocess_runner([*asv_command.split(" "), *args.asv_args], asv=True)
asv_command = f"publish {commit_range} --html-dir={publish_subdir}"
- _subprocess_run_asv(asv_command.split(" "))
+ _subprocess_runner(asv_command.split(" "), asv=True)
# Print completion message.
location = BENCHMARKS_DIR / ".asv"
- print(
+ echo(
f'New ASV results for "{run_type}".\n'
f'See "{publish_subdir}",'
f'\n or JSON files under "{location / "results"}".'
@@ -380,7 +385,7 @@ def add_arguments(self) -> None:
@staticmethod
def func(args: argparse.Namespace) -> None:
_setup_common()
- _subprocess_run_asv([args.asv_sub_command, *args.asv_args])
+ _subprocess_runner([args.asv_sub_command, *args.asv_args], asv=True)
def main():
From 58d195a176e8af656758667436ee4dc0bc43ebee Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 13:40:57 +0100
Subject: [PATCH 2/9] Post on demand benchmark results as comment (big
refactor) (#5430)
* On demand benchmarking.
* Correct gh query.
* Correct assignee spacing.
* What's new entry.
---
.github/workflows/benchmarks_report.yml | 83 ++++++
.../{benchmark.yml => benchmarks_run.yml} | 68 ++---
.gitignore | 1 +
benchmarks/bm_runner.py | 260 +++++++++++++++---
docs/src/whatsnew/latest.rst | 4 +
5 files changed, 329 insertions(+), 87 deletions(-)
create mode 100644 .github/workflows/benchmarks_report.yml
rename .github/workflows/{benchmark.yml => benchmarks_run.yml} (59%)
diff --git a/.github/workflows/benchmarks_report.yml b/.github/workflows/benchmarks_report.yml
new file mode 100644
index 0000000000..cffa1b1ef4
--- /dev/null
+++ b/.github/workflows/benchmarks_report.yml
@@ -0,0 +1,83 @@
+# Post any reports generated by benchmarks_run.yml .
+# Separated for security:
+# https://securitylab.github.com/research/github-actions-preventing-pwn-requests/
+
+name: benchmarks-report
+run-name: Report benchmark results
+
+on:
+ workflow_run:
+ workflows: [benchmarks-run]
+ types:
+ - completed
+
+jobs:
+ download:
+ runs-on: ubuntu-latest
+ outputs:
+ reports_exist: ${{ steps.unzip.outputs.reports_exist }}
+ steps:
+ - name: Download artifact
+ id: download-artifact
+ # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#using-data-from-the-triggering-workflow
+ uses: actions/github-script@v6
+ with:
+ script: |
+ let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: context.payload.workflow_run.id,
+ });
+ let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
+ return artifact.name == "benchmark_reports"
+ })[0];
+ if (typeof matchArtifact != 'undefined') {
+ let download = await github.rest.actions.downloadArtifact({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ artifact_id: matchArtifact.id,
+ archive_format: 'zip',
+ });
+ let fs = require('fs');
+ fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/benchmark_reports.zip`, Buffer.from(download.data));
+ };
+
+ - name: Unzip artifact
+ id: unzip
+ run: |
+ if test -f "benchmark_reports.zip"; then
+ reports_exist=1
+ unzip benchmark_reports.zip -d benchmark_reports
+ else
+ reports_exist=0
+ fi
+ echo "reports_exist=$reports_exist" >> "$GITHUB_OUTPUT"
+
+ - name: Store artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: benchmark_reports
+ path: benchmark_reports
+
+ post_reports:
+ runs-on: ubuntu-latest
+ needs: download
+ if: needs.download.outputs.reports_exist == 1
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+
+ - name: Download artifact
+ uses: actions/download-artifact@v3
+ with:
+ name: benchmark_reports
+ path: .github/workflows/benchmark_reports
+
+ - name: Set up Python
+ # benchmarks/bm_runner.py only needs builtins to run.
+ uses: actions/setup-python@v3
+
+ - name: Post reports
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: python benchmarks/bm_runner.py _gh_post
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmarks_run.yml
similarity index 59%
rename from .github/workflows/benchmark.yml
rename to .github/workflows/benchmarks_run.yml
index 5be56c1d80..e48e8b54b4 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmarks_run.yml
@@ -1,6 +1,9 @@
-# Use ASV to check for performance regressions in the last 24 hours' commits.
+# Use ASV to check for performance regressions, either:
+# - In the last 24 hours' commits.
+# - Introduced by this pull request.
-name: benchmark-check
+name: benchmarks-run
+run-name: Run benchmarks
on:
schedule:
@@ -74,12 +77,17 @@ jobs:
- name: Benchmark this pull request
if: ${{ github.event.label.name == 'benchmark_this' }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ PR_NUMBER: ${{ github.event.number }}
run: |
git checkout ${{ github.head_ref }}
python benchmarks/bm_runner.py branch origin/${{ github.base_ref }}
- name: Run overnight benchmarks
if: ${{ github.event_name != 'pull_request' }}
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
first_commit=${{ inputs.first_commit }}
if [ "$first_commit" == "" ]
@@ -92,57 +100,15 @@ jobs:
python benchmarks/bm_runner.py overnight $first_commit
fi
- - name: Create issues for performance shifts
- if: ${{ github.event_name != 'pull_request' }}
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: |
- if [ -d benchmarks/.asv/performance-shifts ]
- then
- cd benchmarks/.asv/performance-shifts
- for commit_file in *
- do
- commit="${commit_file%.*}"
- pr_number=$(git log "$commit"^! --oneline | grep -o "#[0-9]*" | tail -1 | cut -c 2-)
- author=$(gh pr view $pr_number --json author -q '.["author"]["login"]' --repo $GITHUB_REPOSITORY)
- merger=$(gh pr view $pr_number --json mergedBy -q '.["mergedBy"]["login"]' --repo $GITHUB_REPOSITORY)
- # Find a valid assignee from author/merger/nothing.
- if curl -s https://api.github.com/users/$author | grep -q '"type": "User"'; then
- assignee=$author
- elif curl -s https://api.github.com/users/$merger | grep -q '"type": "User"'; then
- assignee=$merger
- else
- assignee=""
- fi
- title="Performance Shift(s): \`$commit\`"
- body="
- Benchmark comparison has identified performance shifts at
-
- * commit $commit (#$pr_number).
-
- Please review the report below and \
- take corrective/congratulatory action as appropriate \
- :slightly_smiling_face:
-
-
- Performance shift report
-
- \`\`\`
- $(cat $commit_file)
- \`\`\`
-
-
-
- Generated by GHA run [\`${{github.run_id}}\`](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
- "
- gh issue create --title "$title" --body "$body" --assignee $assignee --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY
- done
- fi
+ - name: Upload any benchmark reports
+ uses: actions/upload-artifact@v3
+ with:
+ name: benchmark_reports
+ path: .github/workflows/benchmark_reports
- name: Archive asv results
if: ${{ always() }}
uses: actions/upload-artifact@v3
with:
- name: asv-report
- path: |
- benchmarks/.asv/results
+ name: asv-raw-results
+ path: benchmarks/.asv/results
diff --git a/.gitignore b/.gitignore
index 4d0b474e8a..42d02d8c71 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,6 +32,7 @@ pip-cache
# asv data, environments, results
.asv
benchmarks/.data
+.github/workflows/benchmark_reports
#Translations
*.mo
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
index 9b997bc87c..27bb7fd78b 100644
--- a/benchmarks/bm_runner.py
+++ b/benchmarks/bm_runner.py
@@ -15,8 +15,10 @@
from os import environ
from pathlib import Path
import re
+import shlex
import subprocess
from tempfile import NamedTemporaryFile
+from textwrap import dedent
from typing import Literal
# The threshold beyond which shifts are 'notable'. See `asv compare`` docs
@@ -24,6 +26,9 @@
COMPARE_FACTOR = 1.2
BENCHMARKS_DIR = Path(__file__).parent
+ROOT_DIR = BENCHMARKS_DIR.parent
+# Storage location for reports used in GitHub actions.
+GH_REPORT_DIR = ROOT_DIR.joinpath(".github", "workflows", "benchmark_reports")
# Common ASV arguments for all run_types except `custom`.
ASV_HARNESS = (
@@ -51,7 +56,7 @@ def _subprocess_runner(args, asv=False, **kwargs):
def _subprocess_runner_capture(args, **kwargs) -> str:
result = _subprocess_runner(args, capture_output=True, **kwargs)
- return result.stdout.decode()
+ return result.stdout.decode().rstrip()
def _check_requirements(package: str) -> None:
@@ -70,7 +75,6 @@ def _prep_data_gen_env() -> None:
Create/access a separate, unchanging environment for generating test data.
"""
- root_dir = BENCHMARKS_DIR.parent
python_version = "3.11"
data_gen_var = "DATA_GEN_PYTHON"
if data_gen_var in environ:
@@ -82,7 +86,7 @@ def _prep_data_gen_env() -> None:
_subprocess_runner(
[
"nox",
- f"--noxfile={root_dir / 'noxfile.py'}",
+ f"--noxfile={ROOT_DIR / 'noxfile.py'}",
"--session=tests",
"--install-only",
f"--python={python_version}",
@@ -91,7 +95,7 @@ def _prep_data_gen_env() -> None:
# Find the environment built above, set it to be the data generation
# environment.
data_gen_python = next(
- (root_dir / ".nox").rglob(f"tests*/bin/python{python_version}")
+ (ROOT_DIR / ".nox").rglob(f"tests*/bin/python{python_version}")
).resolve()
environ[data_gen_var] = str(data_gen_python)
@@ -132,32 +136,187 @@ def _setup_common() -> None:
def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
- """Run through a list of commits comparing each one to the next."""
+ """
+ Run through a list of commits comparing each one to the next.
+ """
commits = [commit[:8] for commit in commits]
- shifts_dir = BENCHMARKS_DIR / ".asv" / "performance-shifts"
for i in range(len(commits) - 1):
before = commits[i]
after = commits[i + 1]
asv_command = (
f"compare {before} {after} --factor={COMPARE_FACTOR} --split"
)
- _subprocess_runner(asv_command.split(" "), asv=True)
- if overnight_mode:
- # Record performance shifts.
- # Run the command again but limited to only showing performance
- # shifts.
- shifts = _subprocess_runner_capture(
- [*asv_command.split(" "), "--only-changed"], asv=True
+ comparison = _subprocess_runner_capture(
+ asv_command.split(" "), asv=True
+ )
+ echo(comparison)
+ shifts = _subprocess_runner_capture(
+ [*asv_command.split(" "), "--only-changed"], asv=True
+ )
+
+ if shifts or (not overnight_mode):
+ # For the overnight run: only post if there are shifts.
+ _gh_create_reports(after, comparison, shifts)
+
+
+def _gh_create_reports(
+ commit_sha: str, results_full: str, results_shifts: str
+) -> None:
+ """
+ If running under GitHub Actions: record the results in report(s).
+
+ Posting the reports is done by :func:`_gh_post_reports`, which must be run
+ within a separate action to comply with GHA's security limitations.
+ """
+ if "GITHUB_ACTIONS" not in environ:
+ # Only run when within GHA.
+ return
+
+ pr_number = environ.get("PR_NUMBER", None)
+ on_pull_request = pr_number is not None
+ run_id = environ["GITHUB_RUN_ID"]
+ repo = environ["GITHUB_REPOSITORY"]
+ gha_run_link = (
+ f"[`{run_id}`](https://github.com/{repo}/actions/runs/{run_id})"
+ )
+
+ GH_REPORT_DIR.mkdir(exist_ok=True)
+ commit_dir = GH_REPORT_DIR / commit_sha
+ commit_dir.mkdir()
+ command_path = commit_dir / "command.txt"
+ body_path = commit_dir / "body.txt"
+
+ performance_report = dedent(
+ (
+ """
+ ### Performance Benchmark Report: {commit_sha}
+
+
+ Performance shifts
+
+ ```
+ {results_shifts}
+ ```
+
+
+
+
+ Full benchmark results
+
+ ```
+ {results_full}
+ ```
+
+
+
+ Generated by GHA run {gha_run_link}
+ """
+ )
+ )
+ performance_report = performance_report.format(
+ commit_sha=commit_sha,
+ results_shifts=results_shifts,
+ results_full=results_full,
+ gha_run_link=gha_run_link,
+ )
+
+ if on_pull_request:
+ # Command to post the report as a comment on the active PR.
+ body_path.write_text(performance_report)
+ command = (
+ f"gh pr comment {pr_number} "
+ f"--body-file {body_path.absolute()} "
+ f"--repo {repo}"
+ )
+ command_path.write_text(command)
+
+ else:
+ # Command to post the report as new issue.
+ commit_msg = _subprocess_runner_capture(
+ f"git log {commit_sha}^! --oneline".split(" ")
+ )
+ # Intended for benchmarking commits on trunk - should include a PR
+ # number due to our squash policy.
+ pr_tag_match = re.search("#[0-9]*", commit_msg)
+
+ assignee = ""
+ pr_tag = "pull request number unavailable"
+ if pr_tag_match is not None:
+ pr_tag = pr_tag_match.group(0)
+
+ for login_type in ("author", "mergedBy"):
+ gh_query = f'.["{login_type}"]["login"]'
+ command = (
+ f"gh pr view {pr_tag[1:]} "
+ f"--json {login_type} -q '{gh_query}' "
+ f"--repo {repo}"
+ )
+ login = _subprocess_runner_capture(command.split(" "))
+
+ command = [
+ "curl",
+ "-s",
+ f"https://api.github.com/users/{login}",
+ ]
+ login_info = _subprocess_runner_capture(command)
+ is_user = '"type": "User"' in login_info
+ if is_user:
+ assignee = login
+ break
+
+ title = f"Performance Shift(s): `{commit_sha}`"
+ body = dedent(
+ (
+ f"""
+ Benchmark comparison has identified performance shifts at:
+
+ * commit {commit_sha} ({pr_tag}).
+
+
+ Please review the report below and
+ take corrective/congratulatory action as appropriate
+ :slightly_smiling_face:
+
+ """
)
- if shifts:
- # Write the shifts report to a file.
- # Dir is used by .github/workflows/benchmarks.yml,
- # but not cached - intended to be discarded after run.
- shifts_dir.mkdir(exist_ok=True, parents=True)
- shifts_path = (shifts_dir / after).with_suffix(".txt")
- with shifts_path.open("w") as shifts_file:
- shifts_file.write(shifts)
+ )
+ body += performance_report
+ body_path.write_text(body)
+
+ command = (
+ "gh issue create "
+ f'--title "{title}" '
+ f"--body-file {body_path.absolute()} "
+ '--label "Bot" '
+ '--label "Type: Performance" '
+ f"--repo {repo}"
+ )
+ if assignee:
+ command += f" --assignee {assignee}"
+ command_path.write_text(command)
+
+
+def _gh_post_reports() -> None:
+ """
+ If running under GitHub Actions: post pre-prepared benchmark reports.
+
+ Reports are prepared by :func:`_gh_create_reports`, which must be run
+ within a separate action to comply with GHA's security limitations.
+ """
+ if "GITHUB_ACTIONS" not in environ:
+ # Only run when within GHA.
+ return
+
+ commit_dirs = [x for x in GH_REPORT_DIR.iterdir() if x.is_dir()]
+ for commit_dir in commit_dirs:
+ command_path = commit_dir / "command.txt"
+ command = command_path.read_text()
+
+ # Security: only accept certain commands to run.
+ assert command.startswith(("gh issue create", "gh pr comment"))
+
+ _subprocess_runner(shlex.split(command))
class _SubParserGenerator(ABC):
@@ -175,18 +334,21 @@ def __init__(self, subparsers: ArgumentParser.add_subparsers) -> None:
formatter_class=argparse.RawTextHelpFormatter,
)
self.add_arguments()
- self.subparser.add_argument(
- "asv_args",
- nargs=argparse.REMAINDER,
- help="Any number of arguments to pass down to ASV.",
- )
+ self.add_asv_arguments()
self.subparser.set_defaults(func=self.func)
@abstractmethod
def add_arguments(self) -> None:
- """All self.subparser.add_argument() calls."""
+ """All custom self.subparser.add_argument() calls."""
_ = NotImplemented
+ def add_asv_arguments(self) -> None:
+ self.subparser.add_argument(
+ "asv_args",
+ nargs=argparse.REMAINDER,
+ help="Any number of arguments to pass down to ASV.",
+ )
+
@staticmethod
@abstractmethod
def func(args: argparse.Namespace):
@@ -204,9 +366,8 @@ class Overnight(_SubParserGenerator):
name = "overnight"
description = (
"Benchmarks all commits between the input **first_commit** to ``HEAD``, "
- "comparing each to its parent for performance shifts. If a commit causes "
- "shifts, the output is saved to a file:\n"
- "``.asv/performance-shifts/``\n\n"
+ "comparing each to its parent for performance shifts. If running on"
+ "GitHub Actions: performance shift(s) will be reported in a new issue.\n"
"Designed for checking the previous 24 hours' commits, typically in a "
"scheduled script."
)
@@ -233,7 +394,7 @@ def func(args: argparse.Namespace) -> None:
# git rev-list --first-parent is the command ASV uses.
git_command = f"git rev-list --first-parent {commit_range}"
commit_string = _subprocess_runner_capture(git_command.split(" "))
- commit_list = commit_string.rstrip().split("\n")
+ commit_list = commit_string.split("\n")
_asv_compare(*reversed(commit_list), overnight_mode=True)
@@ -242,7 +403,9 @@ class Branch(_SubParserGenerator):
description = (
"Performs the same operations as ``overnight``, but always on two commits "
"only - ``HEAD``, and ``HEAD``'s merge-base with the input "
- "**base_branch**. Output from this run is never saved to a file. Designed "
+ "**base_branch**. If running on GitHub Actions: performance "
+ "comparisons will be posted in a comment on the relevant pull request.\n"
+ "Designed "
"for testing if the active branch's changes cause performance shifts - "
"anticipating what would be caught by ``overnight`` once merged.\n\n"
"**For maximum accuracy, avoid using the machine that is running this "
@@ -264,11 +427,14 @@ def add_arguments(self) -> None:
def func(args: argparse.Namespace) -> None:
_setup_common()
- git_command = f"git merge-base HEAD {args.base_branch}"
+ git_command = "git rev-parse HEAD"
+ head_sha = _subprocess_runner_capture(git_command.split(" "))[:8]
+
+ git_command = f"git merge-base {head_sha} {args.base_branch}"
merge_base = _subprocess_runner_capture(git_command.split(" "))[:8]
with NamedTemporaryFile("w") as hashfile:
- hashfile.writelines([merge_base, "\n", "HEAD"])
+ hashfile.writelines([merge_base, "\n", head_sha])
hashfile.flush()
commit_range = f"HASHFILE:{hashfile.name}"
asv_command = ASV_HARNESS.format(posargs=commit_range)
@@ -276,7 +442,7 @@ def func(args: argparse.Namespace) -> None:
[*asv_command.split(" "), *args.asv_args], asv=True
)
- _asv_compare(merge_base, "HEAD")
+ _asv_compare(merge_base, head_sha)
class _CSPerf(_SubParserGenerator, ABC):
@@ -388,6 +554,28 @@ def func(args: argparse.Namespace) -> None:
_subprocess_runner([args.asv_sub_command, *args.asv_args], asv=True)
+class GhPost(_SubParserGenerator):
+ name = "_gh_post"
+ description = (
+ "Used by GitHub Actions to post benchmark reports that were prepared "
+ "during previous actions. Separated to comply with GitHub's security "
+ "requirements."
+ )
+ epilog = "Sole acceptable syntax: python bm_runner.py _gh_post"
+
+ @staticmethod
+ def func(args: argparse.Namespace) -> None:
+ _gh_post_reports()
+
+ # No arguments permitted for this subclass:
+
+ def add_arguments(self) -> None:
+ pass
+
+ def add_asv_arguments(self) -> None:
+ pass
+
+
def main():
parser = ArgumentParser(
description="Run the Iris performance benchmarks (using Airspeed Velocity).",
@@ -395,7 +583,7 @@ def main():
)
subparsers = parser.add_subparsers(required=True)
- for gen in (Overnight, Branch, CPerf, SPerf, Custom):
+ for gen in (Overnight, Branch, CPerf, SPerf, Custom, GhPost):
_ = gen(subparsers).subparser
parsed = parser.parse_args()
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 651f25e95e..094fbb184b 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -79,6 +79,10 @@ This document explains the changes made to Iris for this release
`"Xarray bridge" `_ facility.
(:pull:`5214`)
+#. `@trexfeathers`_ refactored benchmarking scripts to better support on-demand
+ benchmarking of pull requests. Results are now posted as a new comment.
+ (feature branch: :pull:`5430`)
+
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
From cdbec50b283965c38ec85bd04c5f296db41bd1df Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 14:08:27 +0100
Subject: [PATCH 3/9] Better comparison commits for PR benchmarking (#5431)
* Don't check out head_ref - benchmark the GH simulated merge commit instead.
* What's New entry.
---
.github/workflows/benchmarks_run.yml | 1 -
benchmarks/bm_runner.py | 3 ++-
docs/src/whatsnew/latest.rst | 6 ++++++
3 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml
index e48e8b54b4..e995e5141b 100644
--- a/.github/workflows/benchmarks_run.yml
+++ b/.github/workflows/benchmarks_run.yml
@@ -81,7 +81,6 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.number }}
run: |
- git checkout ${{ github.head_ref }}
python benchmarks/bm_runner.py branch origin/${{ github.base_ref }}
- name: Run overnight benchmarks
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
index 27bb7fd78b..a4733582fd 100644
--- a/benchmarks/bm_runner.py
+++ b/benchmarks/bm_runner.py
@@ -403,7 +403,8 @@ class Branch(_SubParserGenerator):
description = (
"Performs the same operations as ``overnight``, but always on two commits "
"only - ``HEAD``, and ``HEAD``'s merge-base with the input "
- "**base_branch**. If running on GitHub Actions: performance "
+ "**base_branch**. If running on GitHub Actions: HEAD will be GitHub's "
+ "merge commit and merge-base will be the merge target. Performance "
"comparisons will be posted in a comment on the relevant pull request.\n"
"Designed "
"for testing if the active branch's changes cause performance shifts - "
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 094fbb184b..1ef1ce59d7 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -83,6 +83,12 @@ This document explains the changes made to Iris for this release
benchmarking of pull requests. Results are now posted as a new comment.
(feature branch: :pull:`5430`)
+#. `@trexfeathers`_ changed pull request benchmarking to compare: GitHub's
+ simulated merge-commit (which it uses for all PR CI by default) versus the
+ merge target (e.g. `main`). This should provide the most 'atomic' account
+ of how the pull request changes affect performance. (feature branch:
+ :pull:`5431`)
+
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
From e5a45c27ed98f811a5cd9f83e54329756f5c361e Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 14:25:31 +0100
Subject: [PATCH 4/9] Warn via issue if overnight benchmarks fail (#5432)
* Include a warning step for overnight benchmarking.
* Fix for failure warning script.
* Better formatting of warning issue title.
* What's new entry.
---
.github/workflows/benchmarks_run.yml | 12 ++++++++++++
docs/src/whatsnew/latest.rst | 4 ++++
2 files changed, 16 insertions(+)
diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml
index e995e5141b..d29e251086 100644
--- a/.github/workflows/benchmarks_run.yml
+++ b/.github/workflows/benchmarks_run.yml
@@ -84,6 +84,7 @@ jobs:
python benchmarks/bm_runner.py branch origin/${{ github.base_ref }}
- name: Run overnight benchmarks
+ id: overnight
if: ${{ github.event_name != 'pull_request' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -99,6 +100,17 @@ jobs:
python benchmarks/bm_runner.py overnight $first_commit
fi
+ - name: Warn of failure
+ if: >
+ failure() &&
+ steps.overnight.outcome == 'failure'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ title="Overnight benchmark workflow failed: \`${{ github.run_id }}\`"
+ body="Generated by GHA run [\`${{github.run_id}}\`](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})"
+ gh issue create --title "$title" --body "$body" --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY
+
- name: Upload any benchmark reports
uses: actions/upload-artifact@v3
with:
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 1ef1ce59d7..91d582e037 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -89,6 +89,10 @@ This document explains the changes made to Iris for this release
of how the pull request changes affect performance. (feature branch:
:pull:`5431`)
+#. `@trexfeathers`_ added a catch to the overnight benchmark workflow to raise
+ an issue if the overnight run fails - this was previously an 'invisible'
+ problem. (feature branch: :pull:`5432`)
+
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
From 016fc37229023bed96f65b771d420d36222c9487 Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 14:41:05 +0100
Subject: [PATCH 5/9] Minor benchmark improvements (#5433)
* Use shlex.split() for bm_runner commands.
* Minor documentation clarifications.
---
.github/workflows/benchmarks_run.yml | 2 +-
benchmarks/bm_runner.py | 63 +++++++++++++++-------------
2 files changed, 36 insertions(+), 29 deletions(-)
diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml
index d29e251086..0ea53a423c 100644
--- a/.github/workflows/benchmarks_run.yml
+++ b/.github/workflows/benchmarks_run.yml
@@ -12,7 +12,7 @@ on:
workflow_dispatch:
inputs:
first_commit:
- description: "Argument to be passed to the overnight benchmark script."
+ description: "First commit to benchmark (see bm_runner.py > Overnight)."
required: false
type: string
pull_request:
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
index a4733582fd..880f52961c 100644
--- a/benchmarks/bm_runner.py
+++ b/benchmarks/bm_runner.py
@@ -143,16 +143,14 @@ def _asv_compare(*commits: str, overnight_mode: bool = False) -> None:
for i in range(len(commits) - 1):
before = commits[i]
after = commits[i + 1]
- asv_command = (
+ asv_command = shlex.split(
f"compare {before} {after} --factor={COMPARE_FACTOR} --split"
)
- comparison = _subprocess_runner_capture(
- asv_command.split(" "), asv=True
- )
+ comparison = _subprocess_runner_capture(asv_command, asv=True)
echo(comparison)
shifts = _subprocess_runner_capture(
- [*asv_command.split(" "), "--only-changed"], asv=True
+ [*asv_command, "--only-changed"], asv=True
)
if shifts or (not overnight_mode):
@@ -247,12 +245,12 @@ def _gh_create_reports(
for login_type in ("author", "mergedBy"):
gh_query = f'.["{login_type}"]["login"]'
- command = (
+ command = shlex.split(
f"gh pr view {pr_tag[1:]} "
f"--json {login_type} -q '{gh_query}' "
f"--repo {repo}"
)
- login = _subprocess_runner_capture(command.split(" "))
+ login = _subprocess_runner_capture(command)
command = [
"curl",
@@ -346,7 +344,7 @@ def add_asv_arguments(self) -> None:
self.subparser.add_argument(
"asv_args",
nargs=argparse.REMAINDER,
- help="Any number of arguments to pass down to ASV.",
+ help="Any number of arguments to pass down to the ASV benchmark command.",
)
@staticmethod
@@ -366,10 +364,11 @@ class Overnight(_SubParserGenerator):
name = "overnight"
description = (
"Benchmarks all commits between the input **first_commit** to ``HEAD``, "
- "comparing each to its parent for performance shifts. If running on"
+ "comparing each to its parent for performance shifts. If running on "
"GitHub Actions: performance shift(s) will be reported in a new issue.\n"
"Designed for checking the previous 24 hours' commits, typically in a "
- "scheduled script."
+ "scheduled script.\n"
+ "Uses `asv run`."
)
epilog = (
"e.g. python bm_runner.py overnight a1b23d4\n"
@@ -388,12 +387,14 @@ def func(args: argparse.Namespace) -> None:
_setup_common()
commit_range = f"{args.first_commit}^^.."
- asv_command = ASV_HARNESS.format(posargs=commit_range)
- _subprocess_runner([*asv_command.split(" "), *args.asv_args], asv=True)
+ asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
# git rev-list --first-parent is the command ASV uses.
- git_command = f"git rev-list --first-parent {commit_range}"
- commit_string = _subprocess_runner_capture(git_command.split(" "))
+ git_command = shlex.split(
+ f"git rev-list --first-parent {commit_range}"
+ )
+ commit_string = _subprocess_runner_capture(git_command)
commit_list = commit_string.split("\n")
_asv_compare(*reversed(commit_list), overnight_mode=True)
@@ -410,7 +411,8 @@ class Branch(_SubParserGenerator):
"for testing if the active branch's changes cause performance shifts - "
"anticipating what would be caught by ``overnight`` once merged.\n\n"
"**For maximum accuracy, avoid using the machine that is running this "
- "session. Run time could be >1 hour for the full benchmark suite.**"
+ "session. Run time could be >1 hour for the full benchmark suite.**\n"
+ "Uses `asv run`."
)
epilog = (
"e.g. python bm_runner.py branch upstream/main\n"
@@ -428,20 +430,20 @@ def add_arguments(self) -> None:
def func(args: argparse.Namespace) -> None:
_setup_common()
- git_command = "git rev-parse HEAD"
- head_sha = _subprocess_runner_capture(git_command.split(" "))[:8]
+ git_command = shlex.split("git rev-parse HEAD")
+ head_sha = _subprocess_runner_capture(git_command)[:8]
- git_command = f"git merge-base {head_sha} {args.base_branch}"
- merge_base = _subprocess_runner_capture(git_command.split(" "))[:8]
+ git_command = shlex.split(
+ f"git merge-base {head_sha} {args.base_branch}"
+ )
+ merge_base = _subprocess_runner_capture(git_command)[:8]
with NamedTemporaryFile("w") as hashfile:
hashfile.writelines([merge_base, "\n", head_sha])
hashfile.flush()
commit_range = f"HASHFILE:{hashfile.name}"
- asv_command = ASV_HARNESS.format(posargs=commit_range)
- _subprocess_runner(
- [*asv_command.split(" "), *args.asv_args], asv=True
- )
+ asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
_asv_compare(merge_base, head_sha)
@@ -453,7 +455,8 @@ class _CSPerf(_SubParserGenerator, ABC):
"Run the on-demand {} suite of benchmarks (part of the UK Met "
"Office NG-VAT project) for the ``HEAD`` of ``upstream/main`` only, "
"and publish the results to the input **publish_dir**, within a "
- "unique subdirectory for this run."
+ "unique subdirectory for this run.\n"
+ "Uses `asv run`."
)
epilog = (
"e.g. python bm_runner.py {0} my_publish_dir\n"
@@ -497,11 +500,15 @@ def csperf(
# Don't fail the whole run if memory blows on 1 benchmark.
asv_command = asv_command.replace(" --strict", "")
# Only do a single round.
- asv_command = re.sub(r"rounds=\d", "rounds=1", asv_command)
- _subprocess_runner([*asv_command.split(" "), *args.asv_args], asv=True)
+ asv_command = shlex.split(
+ re.sub(r"rounds=\d", "rounds=1", asv_command)
+ )
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
- asv_command = f"publish {commit_range} --html-dir={publish_subdir}"
- _subprocess_runner(asv_command.split(" "), asv=True)
+ asv_command = shlex.split(
+ f"publish {commit_range} --html-dir={publish_subdir}"
+ )
+ _subprocess_runner(asv_command, asv=True)
# Print completion message.
location = BENCHMARKS_DIR / ".asv"
From 25451fd2aa4e27cd2e7d5e5f8e509f6b7131e78f Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 14:53:06 +0100
Subject: [PATCH 6/9] Set benchmark runs to error if the subprocess errors
(#5434)
* Set benchmark runs to error if the subprocess errors.
* Still compare results even from a broken run.
* Still upload reports if overnight run fails.
* What's New entry.
---
.github/workflows/benchmarks_run.yml | 1 +
benchmarks/bm_runner.py | 13 +++++++++----
docs/src/whatsnew/latest.rst | 3 +++
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/benchmarks_run.yml b/.github/workflows/benchmarks_run.yml
index 0ea53a423c..a39c531a77 100644
--- a/.github/workflows/benchmarks_run.yml
+++ b/.github/workflows/benchmarks_run.yml
@@ -112,6 +112,7 @@ jobs:
gh issue create --title "$title" --body "$body" --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY
- name: Upload any benchmark reports
+ if: success() || steps.overnight.outcome == 'failure'
uses: actions/upload-artifact@v3
with:
name: benchmark_reports
diff --git a/benchmarks/bm_runner.py b/benchmarks/bm_runner.py
index 880f52961c..b3145fbdf1 100644
--- a/benchmarks/bm_runner.py
+++ b/benchmarks/bm_runner.py
@@ -51,6 +51,7 @@ def _subprocess_runner(args, asv=False, **kwargs):
args.insert(0, "asv")
kwargs["cwd"] = BENCHMARKS_DIR
echo(" ".join(args))
+ kwargs.setdefault("check", True)
return subprocess.run(args, **kwargs)
@@ -387,16 +388,20 @@ def func(args: argparse.Namespace) -> None:
_setup_common()
commit_range = f"{args.first_commit}^^.."
- asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
- _subprocess_runner([*asv_command, *args.asv_args], asv=True)
-
# git rev-list --first-parent is the command ASV uses.
git_command = shlex.split(
f"git rev-list --first-parent {commit_range}"
)
commit_string = _subprocess_runner_capture(git_command)
commit_list = commit_string.split("\n")
- _asv_compare(*reversed(commit_list), overnight_mode=True)
+
+ asv_command = shlex.split(ASV_HARNESS.format(posargs=commit_range))
+ try:
+ _subprocess_runner([*asv_command, *args.asv_args], asv=True)
+ finally:
+ # Designed for long running - want to compare/post any valid
+ # results even if some are broken.
+ _asv_compare(*reversed(commit_list), overnight_mode=True)
class Branch(_SubParserGenerator):
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 91d582e037..482377ee16 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -93,6 +93,9 @@ This document explains the changes made to Iris for this release
an issue if the overnight run fails - this was previously an 'invisible'
problem. (feature branch: :pull:`5432`)
+#. `@trexfeathers`_ set `bm_runner.py` to error when the called processes
+ error. This fixes an oversight introduced in :pull:`5215`. (feature branch:
+ :pull`5434`)
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
From 7d801d9a1d8268c72cdbdf2114814c29edd6e8a1 Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 15:09:57 +0100
Subject: [PATCH 7/9] Hard-code conda channel into asv_delegated_conda.py
(#5435)
* What's new entry.
* What's New entry.
* Hard-code conda channel into asv_delegated_conda.py .
* Fix some rebase confusion in the What's New.
---
benchmarks/asv.conf.json | 1 -
benchmarks/asv_delegated_conda.py | 4 ++++
docs/src/whatsnew/latest.rst | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
index faa7f6daee..fab5bcb44e 100644
--- a/benchmarks/asv.conf.json
+++ b/benchmarks/asv.conf.json
@@ -4,7 +4,6 @@
"project_url": "https://github.com/SciTools/iris",
"repo": "..",
"environment_type": "conda-delegated",
- "conda_channels": ["conda-forge", "defaults"],
"show_commit_url": "http://github.com/scitools/iris/commit/",
"branches": ["upstream/main"],
diff --git a/benchmarks/asv_delegated_conda.py b/benchmarks/asv_delegated_conda.py
index 250a4e032d..22a3110075 100644
--- a/benchmarks/asv_delegated_conda.py
+++ b/benchmarks/asv_delegated_conda.py
@@ -66,6 +66,8 @@ def __init__(
ignored.append("`requirements`")
if tagged_env_vars:
ignored.append("`tagged_env_vars`")
+ if conf.conda_channels:
+ ignored.append("conda_channels")
if conf.conda_environment_file:
ignored.append("`conda_environment_file`")
message = (
@@ -75,6 +77,8 @@ def __init__(
log.warning(message)
requirements = {}
tagged_env_vars = {}
+ # All that is required to create ASV's bare-bones environment.
+ conf.conda_channels = ["defaults"]
conf.conda_environment_file = None
super().__init__(conf, python, requirements, tagged_env_vars)
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 482377ee16..79f4273307 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -97,6 +97,7 @@ This document explains the changes made to Iris for this release
error. This fixes an oversight introduced in :pull:`5215`. (feature branch:
:pull`5434`)
+
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
core dev names are automatically included by the common_links.inc:
From 3125c3e59e603e6f0ee3865c9645f78cf37277bd Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 15:27:44 +0100
Subject: [PATCH 8/9] Inflate benchmark data to ensure laziness (#5436)
* Inflate benchmark data to ensure laziness.
* What's New entry.
---
benchmarks/benchmarks/experimental/ugrid/regions_combine.py | 2 +-
benchmarks/benchmarks/load/__init__.py | 2 +-
benchmarks/benchmarks/load/ugrid.py | 2 +-
benchmarks/benchmarks/save.py | 2 +-
docs/src/whatsnew/latest.rst | 3 +++
5 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/benchmarks/benchmarks/experimental/ugrid/regions_combine.py b/benchmarks/benchmarks/experimental/ugrid/regions_combine.py
index c5f8fb564e..16044c663a 100644
--- a/benchmarks/benchmarks/experimental/ugrid/regions_combine.py
+++ b/benchmarks/benchmarks/experimental/ugrid/regions_combine.py
@@ -30,7 +30,7 @@
class MixinCombineRegions:
# Characterise time taken + memory-allocated, for various stages of combine
# operations on cubesphere-like test data.
- params = [4, 500]
+ params = [50, 500]
param_names = ["cubesphere-N"]
def _parametrised_cache_filename(self, n_cubesphere, content_name):
diff --git a/benchmarks/benchmarks/load/__init__.py b/benchmarks/benchmarks/load/__init__.py
index 1b0ea696f6..3b2a83b1b1 100644
--- a/benchmarks/benchmarks/load/__init__.py
+++ b/benchmarks/benchmarks/load/__init__.py
@@ -27,7 +27,7 @@ class LoadAndRealise:
# For data generation
timeout = 600.0
params = [
- [(2, 2, 2), (1280, 960, 5), (2, 2, 1000)],
+ [(50, 50, 2), (1280, 960, 5), (2, 2, 1000)],
[False, True],
["FF", "PP", "NetCDF"],
]
diff --git a/benchmarks/benchmarks/load/ugrid.py b/benchmarks/benchmarks/load/ugrid.py
index 350a78e128..35c8754171 100644
--- a/benchmarks/benchmarks/load/ugrid.py
+++ b/benchmarks/benchmarks/load/ugrid.py
@@ -77,7 +77,7 @@ class DataRealisation:
warmup_time = 0.0
timeout = 300.0
- params = [1, int(2e5)]
+ params = [int(1e4), int(2e5)]
param_names = ["number of faces"]
def setup_common(self, **kwargs):
diff --git a/benchmarks/benchmarks/save.py b/benchmarks/benchmarks/save.py
index 3551c72528..d00c66a0ca 100644
--- a/benchmarks/benchmarks/save.py
+++ b/benchmarks/benchmarks/save.py
@@ -21,7 +21,7 @@
class NetcdfSave:
- params = [[1, 600], [False, True]]
+ params = [[50, 600], [False, True]]
param_names = ["cubesphere-N", "is_unstructured"]
def setup(self, n_cubesphere, is_unstructured):
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index 79f4273307..3a8e62b34f 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -97,6 +97,9 @@ This document explains the changes made to Iris for this release
error. This fixes an oversight introduced in :pull:`5215`. (feature branch:
:pull`5434`)
+#. `@trexfeathers`_ inflated some benchmark data sizes to compensate for
+ :pull:`5229`. (feature branch: :pull:`5436`)
+
.. comment
Whatsnew author names (@github name) in alphabetical order. Note that,
From 5bda4e3be83d651a26d1ba353813f834646a0d2f Mon Sep 17 00:00:00 2001
From: Martin Yeo <40734014+trexfeathers@users.noreply.github.com>
Date: Fri, 18 Aug 2023 16:01:30 +0100
Subject: [PATCH 9/9] Benchmark feature branch what's new entry (#5438)
* What's new entry.
* Correct user name @ESadek-MO.
* Missing colon.
---
docs/src/whatsnew/latest.rst | 28 +++++-----------------------
1 file changed, 5 insertions(+), 23 deletions(-)
diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst
index d7e8b17901..35e2158141 100644
--- a/docs/src/whatsnew/latest.rst
+++ b/docs/src/whatsnew/latest.rst
@@ -72,29 +72,11 @@ This document explains the changes made to Iris for this release
💼 Internal
===========
-#. N/A
-
-
-#. `@trexfeathers`_ refactored benchmarking scripts to better support on-demand
- benchmarking of pull requests. Results are now posted as a new comment.
- (feature branch: :pull:`5430`)
-
-#. `@trexfeathers`_ changed pull request benchmarking to compare: GitHub's
- simulated merge-commit (which it uses for all PR CI by default) versus the
- merge target (e.g. `main`). This should provide the most 'atomic' account
- of how the pull request changes affect performance. (feature branch:
- :pull:`5431`)
-
-#. `@trexfeathers`_ added a catch to the overnight benchmark workflow to raise
- an issue if the overnight run fails - this was previously an 'invisible'
- problem. (feature branch: :pull:`5432`)
-
-#. `@trexfeathers`_ set `bm_runner.py` to error when the called processes
- error. This fixes an oversight introduced in :pull:`5215`. (feature branch:
- :pull`5434`)
-
-#. `@trexfeathers`_ inflated some benchmark data sizes to compensate for
- :pull:`5229`. (feature branch: :pull:`5436`)
+#. `@trexfeathers`_ and `@ESadek-MO`_ (reviewer) performed a suite of fixes and
+ improvements for benchmarking, primarily to get
+ :ref:`on demand pull request benchmarking `
+ working properly. (Main pull request: :pull:`5437`, more detail:
+ :pull:`5430`, :pull:`5431`, :pull:`5432`, :pull:`5434`, :pull:`5436`)
.. comment