Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions .github/workflows/redo_regressions.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
name: Redo regressions

# start job only for PRs when a label is added.
on:
pull_request:
types: [labeled]

# some general variables
env:
# https://keras.io/getting_started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
PYTHONHASHSEED: "0"

jobs:
build:
if: contains(github.event.pull_request.labels.*.name, 'redo-regressions')
strategy:
matrix:
os: [ubuntu-latest]
python-version: ["3.11"]
include:
- os: ubuntu-latest
CONDA_OS: linux-64
fail-fast: false
runs-on: ${{ matrix.os }}
env:
NETRC_FILE: ${{ secrets.NETRC_FILE }}
NNPDF_SSH_KEY: ${{ secrets.NNPDF_SSH_KEY }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.ref }}

- uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
use-mamba: true
auto-update-conda: true
activate-environment: test
- name: Setup conda and install LHAPDF and pandoc
shell: bash -l {0}
run: |
echo "$NETRC_FILE" | base64 --decode > ~/.netrc
conda config --remove channels defaults
conda config --append channels conda-forge
conda config --prepend channels https://packages.nnpdf.science/public
conda config --set show_channel_urls true
conda install lhapdf pandoc
- name: Install nnpdf with testing and qed extras
shell: bash -l {0}
run: |
pip install .[qed,tests]
- name: Regenerate regressions
shell: bash -l {0}
run: |
pytest extra_tests/regression_checks.py --regenerate True
- name: Commit
shell: bash -l {0}
run: |
git config user.name "Redo regressions bot"
git add extra_tests/regression_fits/*
Comment thread
APJansen marked this conversation as resolved.
git status
git commit -m "Automatically regenerated regressions from PR ${{ github.event.number }}, branch ${{ github.event.pull_request.head.ref }}."
git push origin "${{ github.event.pull_request.head.ref }}"
14 changes: 14 additions & 0 deletions extra_tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
"""
This enables the use of a command line argument to set regenerate, which is used to autmate the
regeneration of the regression data.
"""


def pytest_addoption(parser):
parser.addoption("--regenerate", action="store", default=False)


def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.regenerate
if 'regenerate' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("regenerate", [option_value])
2 changes: 1 addition & 1 deletion extra_tests/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ In that case the following recipe shall be applied:
2. If the changes look due to numerics, run a full production-like fit (4.0 baseline, 100 replicas, etc)
3. Review and finish the PR normally, and then, before merge:
a. Rebase on top of master
b. Perform one commit with the update to the regression tests
b. Add the 'redo-regressions' label to the PR to automatically regenerate the regressions

If instead, the changes are supposed to change the numerical values of the result (e.g., a change in the treatment of seeds)
please document it in the release notes for the following tag.
10 changes: 6 additions & 4 deletions extra_tests/regression_checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@
Regression tests
"""

from n3fit.tests.test_fit import check_fit_results, EXE
import pathlib
from reportengine.compat import yaml
import shutil
import subprocess as sp

import pytest

from n3fit.tests.test_fit import EXE, check_fit_results
from reportengine.compat import yaml

REGRESSION_FOLDER = pathlib.Path(__file__).with_name("regression_fits")

Expand All @@ -29,7 +29,7 @@


@pytest.mark.parametrize("runcard,replica", runcard_and_replicas.items())
def test_regression_fit(tmp_path, runcard, replica):
def test_regression_fit(tmp_path, runcard, replica, regenerate):
runcard_name = f"{runcard}.yml"
runcard_file = REGRESSION_FOLDER / runcard_name
shutil.copy(runcard_file, tmp_path)
Expand All @@ -41,4 +41,6 @@ def test_regression_fit(tmp_path, runcard, replica):
sp.run(f"{EXE} {runcard_name} {replica}".split(), cwd=tmp_path, check=True)
old_json_file = REGRESSION_FOLDER / f"{runcard}_{replica}.json"

check_fit_results(tmp_path, runcard, replica, old_json_file, regenerate=False, rel_error=1e-2)
check_fit_results(
tmp_path, runcard, replica, old_json_file, regenerate=regenerate, rel_error=1e-2
)
26 changes: 13 additions & 13 deletions extra_tests/regression_fits/central_16.json
Original file line number Diff line number Diff line change
Expand Up @@ -63,32 +63,32 @@
1.1961095151498038
],
"integrability": [
0.0009203124718642486,
0.0009203124718646927,
0.0009203124718629718,
4.8332231357695044e-05,
0.4625758156180382,
0.006963429623282114
0.4625758156180373,
0.0069634296232816695
],
"timing": {
"walltime": {
"Total": 30.79029417037964,
"Total": 18.60609221458435,
"start": 0.0,
"replica_set": 0.430037260055542,
"replica_fitted": 30.790075302124023,
"replica_set_to_replica_fitted": 30.36003804206848
"replica_set": 0.3607151508331299,
"replica_fitted": 18.606019735336304,
"replica_set_to_replica_fitted": 18.245304584503174
},
"cputime": {
"Total": 58.881718801999995,
"Total": 20.6525949,
"start": 0.0,
"replica_set": 5.264737561999997,
"replica_fitted": 58.881475626,
"replica_set_to_replica_fitted": 53.616738064
"replica_set": 0.6791323,
"replica_fitted": 20.6525204,
"replica_set_to_replica_fitted": 19.9733881
}
},
"version": {
"tensorflow": "2.15.0, mkl=??",
"numpy": "1.24.4",
"nnpdf": "4.0.8.post113.dev0+e1b5f2cd5.dirty",
"validphys": "4.0.8.post113.dev0+e1b5f2cd5.dirty"
"nnpdf": "4.0.8.post168.dev0+5adb0639b",
"validphys": "4.0.8.post168.dev0+5adb0639b"
}
}
28 changes: 14 additions & 14 deletions extra_tests/regression_fits/diagonal_45.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,37 +58,37 @@
"arc_lengths": [
1.3182267178222185,
1.54441465374414,
1.179107351893619,
1.1791073518936193,
1.0015316488813237,
1.2179106362142496
],
"integrability": [
0.0008988642075570819,
0.000898864207557748,
0.0008988642075570263,
4.724649568210726e-05,
0.4626152291893959,
0.0070505898911492615
0.462615229189395,
0.007050589891148817
],
"timing": {
"walltime": {
"Total": 28.264042139053345,
"Total": 17.62032151222229,
"start": 0.0,
"replica_set": 0.4277496337890625,
"replica_fitted": 28.26386070251465,
"replica_set_to_replica_fitted": 27.836111068725586
"replica_set": 0.3764665126800537,
"replica_fitted": 17.620244026184082,
"replica_set_to_replica_fitted": 17.24377751350403
},
"cputime": {
"Total": 56.531395771,
"Total": 19.6138915,
"start": 0.0,
"replica_set": 5.303023015999997,
"replica_fitted": 56.53119243,
"replica_set_to_replica_fitted": 51.228169414
"replica_set": 0.6932296000000004,
"replica_fitted": 19.6138119,
"replica_set_to_replica_fitted": 18.9205823
}
},
"version": {
"tensorflow": "2.15.0, mkl=??",
"numpy": "1.24.4",
"nnpdf": "4.0.8.post113.dev0+e1b5f2cd5.dirty",
"validphys": "4.0.8.post113.dev0+e1b5f2cd5.dirty"
"nnpdf": "4.0.8.post168.dev0+5adb0639b",
"validphys": "4.0.8.post168.dev0+5adb0639b"
}
}
24 changes: 12 additions & 12 deletions extra_tests/regression_fits/feature_scaling_81.json
Original file line number Diff line number Diff line change
Expand Up @@ -56,39 +56,39 @@
"chi2": 1.4108951091766357,
"pos_state": "POS_VETO",
"arc_lengths": [
1.9705579690885775,
1.9705579690885773,
1.512324164715868,
1.2608150353204404,
1.1147020004036432,
2.925581113420889
],
"integrability": [
0.0038688098720734143,
0.0038688098720738584,
0.003868809872073914,
2.2851325695821822e-05,
0.05472333077341307,
0.006996778654865254
],
"timing": {
"walltime": {
"Total": 28.482715845108032,
"Total": 17.96898365020752,
"start": 0.0,
"replica_set": 0.42058467864990234,
"replica_fitted": 28.482331037521362,
"replica_set_to_replica_fitted": 28.06174635887146
"replica_set": 0.3639094829559326,
"replica_fitted": 17.968825101852417,
"replica_set_to_replica_fitted": 17.604915618896484
},
"cputime": {
"Total": 53.414345739000005,
"Total": 19.9091685,
"start": 0.0,
"replica_set": 5.313881866999999,
"replica_fitted": 53.413937684,
"replica_set_to_replica_fitted": 48.100055817
"replica_set": 0.6803557000000007,
"replica_fitted": 19.9090083,
"replica_set_to_replica_fitted": 19.2286526
}
},
"version": {
"tensorflow": "2.15.0, mkl=??",
"numpy": "1.24.4",
"nnpdf": "4.0.8.post113.dev0+e1b5f2cd5.dirty",
"validphys": "4.0.8.post113.dev0+e1b5f2cd5.dirty"
"nnpdf": "4.0.8.post168.dev0+5adb0639b",
"validphys": "4.0.8.post168.dev0+5adb0639b"
}
}
24 changes: 12 additions & 12 deletions extra_tests/regression_fits/flavour_29.json
Original file line number Diff line number Diff line change
Expand Up @@ -63,32 +63,32 @@
0.9907081134354656
],
"integrability": [
0.46764282882213526,
0.4676428288221359,
0.46764282882213587,
0.08090896345675005,
0.1003280878067017,
0.10032808780670167,
0.5176757127046587
],
"timing": {
"walltime": {
"Total": 9.50145959854126,
"Total": 7.240033864974976,
"start": 0.0,
"replica_set": 0.4137749671936035,
"replica_fitted": 9.501240968704224,
"replica_set_to_replica_fitted": 9.08746600151062
"replica_set": 0.37552475929260254,
"replica_fitted": 7.239804267883301,
"replica_set_to_replica_fitted": 6.864279508590698
},
"cputime": {
"Total": 17.63484543,
"Total": 7.8813866,
"start": 0.0,
"replica_set": 5.07142004,
"replica_fitted": 17.634602875,
"replica_set_to_replica_fitted": 12.563182835
"replica_set": 0.6917157000000005,
"replica_fitted": 7.8811553000000005,
"replica_set_to_replica_fitted": 7.1894396
}
},
"version": {
"tensorflow": "2.15.0, mkl=??",
"numpy": "1.24.4",
"nnpdf": "4.0.8.post113.dev0+e1b5f2cd5.dirty",
"validphys": "4.0.8.post113.dev0+e1b5f2cd5.dirty"
"nnpdf": "4.0.8.post168.dev0+5adb0639b",
"validphys": "4.0.8.post168.dev0+5adb0639b"
}
}
28 changes: 14 additions & 14 deletions extra_tests/regression_fits/no_lagrange_27.json
Original file line number Diff line number Diff line change
Expand Up @@ -58,37 +58,37 @@
"arc_lengths": [
1.3221023191915289,
1.4870345587210063,
1.1735323019190387,
1.1735323019190385,
1.0005950030908517,
1.221984399084671
],
"integrability": [
0.0009426986289349792,
0.0009426986289356454,
0.0009426986289332584,
5.00080153632565e-05,
0.47462750226259187,
0.007130859827156
0.474627502262591,
0.007130859827155556
],
"timing": {
"walltime": {
"Total": 33.483086824417114,
"Total": 21.26565957069397,
"start": 0.0,
"replica_set": 0.40949511528015137,
"replica_fitted": 33.48285508155823,
"replica_set_to_replica_fitted": 33.073359966278076
"replica_set": 0.365405797958374,
"replica_fitted": 21.265546083450317,
"replica_set_to_replica_fitted": 20.900140285491943
},
"cputime": {
"Total": 57.29150730599999,
"Total": 23.1746251,
"start": 0.0,
"replica_set": 5.309074099,
"replica_fitted": 57.291249822999994,
"replica_set_to_replica_fitted": 51.982175723999994
"replica_set": 0.6833834000000003,
"replica_fitted": 23.1745104,
"replica_set_to_replica_fitted": 22.491127
}
},
"version": {
"tensorflow": "2.15.0, mkl=??",
"numpy": "1.24.4",
"nnpdf": "4.0.8.post113.dev0+e1b5f2cd5.dirty",
"validphys": "4.0.8.post113.dev0+e1b5f2cd5.dirty"
"nnpdf": "4.0.8.post168.dev0+5adb0639b",
"validphys": "4.0.8.post168.dev0+5adb0639b"
}
}
Loading