From da1b703a8743f7948cb510357885b25ed9b8d4ff Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 4 Oct 2021 16:29:51 +0200 Subject: [PATCH 01/39] Add key_values to grid interface --- pineappl_py/src/grid.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 9839c544..6f8cf779 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -7,6 +7,7 @@ use super::subgrid::{PySubgridEnum, PySubgridParams}; use ndarray::{Array, Ix5}; +use std::collections::HashMap; use std::fs::File; use std::io::BufReader; @@ -124,6 +125,19 @@ impl PyGrid { ); } + /// Get metadata values stored in the grid. + /// + /// + /// Parameters + /// ---------- + /// key : str + /// key + /// value : str + /// value + pub fn key_values(&self) -> HashMap { + self.grid.key_values().unwrap().clone() + } + /// Set a metadata key-value pair in the grid. /// /// **Usage:** `yadism` From c323ab2e550c56b33421227d8cb8403a706ace3a Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 4 Oct 2021 17:12:17 +0200 Subject: [PATCH 02/39] Add get subgrid and syntactic sugar --- pineappl_py/pineappl/grid.py | 61 ++++++++++++++++++++++++++++++++++++ pineappl_py/src/grid.rs | 9 ++++++ 2 files changed, 70 insertions(+) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index b9756952..2dc077ce 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -61,6 +61,49 @@ def create(cls, lumi, orders, bin_limits, subgrid_params): orders = [o.raw for o in orders] return cls(PyGrid(lumi, orders, bin_limits, subgrid_params.raw)) + def subgrid(self, order, bin_, lumi): + """ + Retrieve the subgrid at the given position. + + Convenience wrapper for :meth:`pineappl.pineappl.PyGrid.set_subgrid()`. + + Parameters + ---------- + order : int + index of order + bin_ : int + index of bin + lumi : int + index of luminosity + + Returns + ------- + subgrid : Subgrid + subgrid content + """ + return self.raw.subgrid(order, bin_, lumi) + + def __getitem__(self, key): + """ + Retrieve the subgrid at the given position. + + Syntactic sugar for :meth:`subgrid` + + Parameters + ---------- + key : (int, int, int) + a 3-element integers tuple, consisting in `(order, bin, lumi)` + + Returns + ------- + subgrid : Subgrid + subgrid content + """ + if len(key) != 3: + raise ValueError("A tuple with `(order, bin, lumi)` is required as key.") + + return self.subgrid(*key) + def set_subgrid(self, order, bin_, lumi, subgrid): """ Set the subgrid at the given position. @@ -80,6 +123,24 @@ def set_subgrid(self, order, bin_, lumi, subgrid): """ self.raw.set_subgrid(order, bin_, lumi, subgrid.into()) + def __setitem__(self, key, subgrid): + """ + Set the subgrid at the given position. + + Syntactic sugar for :meth:`set_subgrid` + + Parameters + ---------- + key : (int, int, int) + a 3-element integers tuple, consisting in `(order, bin, lumi)` + subgrid : ImportOnlySubgridV1 + subgrid content + """ + if len(key) != 3: + raise ValueError("A tuple with `(order, bin, lumi)` is required as key.") + + self.set_subgrid(*key, subgrid) + def set_remapper(self, remapper): """ Set the normalizations. diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 6f8cf779..695e83df 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -152,6 +152,15 @@ impl PyGrid { self.grid.set_key_value(key, value); } + /// Retrieve a subgrid. + /// + /// **Usage:** `yadism` + pub fn subgrid(&self, order: usize, bin: usize, lumi: usize) -> PySubgridEnum { + PySubgridEnum { + subgrid_enum: self.grid.subgrid(order, bin, lumi).clone(), + } + } + /// Set a subgrid. /// /// **Usage:** `yadism` From 982168e8d7b6e11ed10d904b457240594d2d8d65 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 5 Oct 2021 14:25:05 +0200 Subject: [PATCH 03/39] Add separe target pids for eko convolution --- pineappl/src/grid.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 6e49e442..a8644a9e 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1141,6 +1141,7 @@ impl Grid { alphas: &[f64], (xir, xif): (f64, f64), pids: &[i32], + target_pids: &[i32], x_grid: Vec, q2_grid: Vec, operator: Array5, @@ -1178,7 +1179,6 @@ impl Grid { _ => unimplemented!(), }; - // create target luminosities let pids1 = if has_pdf1 { pids.to_vec() } else { @@ -1189,9 +1189,20 @@ impl Grid { } else { vec![initial_state_2] }; - let lumi: Vec<_> = pids1 + // create target luminosities + let tgt_pids1 = if has_pdf1 { + target_pids.to_vec() + } else { + vec![initial_state_1] + }; + let tgt_pids2 = if has_pdf2 { + target_pids.to_vec() + } else { + vec![initial_state_2] + }; + let lumi: Vec<_> = tgt_pids1 .iter() - .cartesian_product(pids2.iter()) + .cartesian_product(tgt_pids2.iter()) .map(|(a, b)| lumi_entry![*a, *b, 1.0]) .collect(); From d0c6493f17576e02209f3f58ca14a74733d5fa52 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 5 Oct 2021 14:49:45 +0200 Subject: [PATCH 04/39] Sync python interface with pids update --- pineappl_py/pineappl/grid.py | 1 + pineappl_py/src/grid.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index 2dc077ce..ff443987 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -242,6 +242,7 @@ def convolute_eko(self, operators): operators["q2_ref"], alphas_values, operators["targetpids"], + operators["inputpids"], operators["interpolation_xgrid"], q2grid, operator_grid.flatten(), diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 695e83df..8abf69b0 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -281,6 +281,7 @@ impl PyGrid { q2: f64, alphas: Vec, pids: Vec, + target_pids: Vec, x_grid: Vec, q2_grid: Vec, operator_flattened: Vec, @@ -294,6 +295,7 @@ impl PyGrid { &alphas, (1., 1.), &pids, + &target_pids, x_grid, q2_grid, operator.into_dimensionality::().unwrap(), From 18bee8f649461458aea3652c02c8602ce165d4fe Mon Sep 17 00:00:00 2001 From: Felix Hekhorn Date: Tue, 5 Oct 2021 17:17:05 +0200 Subject: [PATCH 05/39] Allow additional metadata in FkTable generation --- pineappl/src/grid.rs | 5 +++++ pineappl_py/pineappl/grid.py | 7 ++++++- pineappl_py/src/grid.rs | 3 +++ pineappl_py/tests/test_grid.py | 2 +- 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index a8644a9e..70196604 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1145,6 +1145,7 @@ impl Grid { x_grid: Vec, q2_grid: Vec, operator: Array5, + additional_metadata: HashMap, ) -> Option { // Check operator layout let dim = operator.shape(); @@ -1227,6 +1228,10 @@ impl Grid { subgrid_params: SubgridParams::default(), more_members: self.more_members.clone(), }; + // write additional metadata + for (key, value) in additional_metadata.iter() { + result.set_key_value(key, value); + } // collect source grid informations let eko_info = self.eko_info().unwrap(); diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index ff443987..30af7dc7 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -216,7 +216,7 @@ def convolute( xfx1, xfx2, alphas, order_mask, bin_indices, lumi_mask, xi ) - def convolute_eko(self, operators): + def convolute_eko(self, operators, additional_metadata=None): """ Create an FKTable with the EKO. @@ -226,12 +226,16 @@ def convolute_eko(self, operators): ---------- operators : dict EKO Output + additional_metadata : dict + additional metadata Returns ------ PyFkTable : raw grid as an FKTable """ + if additional_metadata is None: + additional_metadata = {} operator_grid = np.array( [op["operators"] for op in operators["Q2grid"].values()] ) @@ -247,6 +251,7 @@ def convolute_eko(self, operators): q2grid, operator_grid.flatten(), operator_grid.shape, + additional_metadata, ) ) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 8abf69b0..28111570 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -10,6 +10,7 @@ use ndarray::{Array, Ix5}; use std::collections::HashMap; use std::fs::File; use std::io::BufReader; +use std::collections::HashMap; use pyo3::prelude::*; @@ -286,6 +287,7 @@ impl PyGrid { q2_grid: Vec, operator_flattened: Vec, operator_shape: Vec, + additional_metadata: HashMap, ) -> PyFkTable { let operator = Array::from_shape_vec(operator_shape, operator_flattened).unwrap(); let evolved_grid = self @@ -299,6 +301,7 @@ impl PyGrid { x_grid, q2_grid, operator.into_dimensionality::().unwrap(), + additional_metadata ) .unwrap(); PyFkTable { diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index 2c79e5ad..5248fd62 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -145,6 +145,6 @@ def test_convolute_eko(self): } } } - g.set_key_value("lumi_id_types", "PDG") + g.set_key_value("lumi_id_types", "pdg_mc_ids") fk = g.convolute_eko(fake_eko) assert isinstance(fk.raw, pineappl.pineappl.PyFkTable) From 32d0b5120c3f57d3ec4f8bd068bb6a4570acc41d Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 12 Oct 2021 17:44:52 +0200 Subject: [PATCH 06/39] Introduce new Eko related structs --- pineappl/src/grid.rs | 135 ++++++++++++++++++++++----------- pineappl_py/pineappl/grid.py | 3 +- pineappl_py/src/grid.rs | 64 +++++++++++----- pineappl_py/tests/test_grid.py | 10 ++- 4 files changed, 142 insertions(+), 70 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 70196604..b12c11d7 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -208,13 +208,35 @@ impl MoreMembers { /// Information required to compute a compatible EKO. /// Members spell out specific characteristic of a suitable EKO. -pub struct EkoInfo { +pub struct GridAxes { /// is the interpolation grid in x used at the process scale pub x_grid: Vec, + /// are the parton ids used in the process + pub pids: Vec, /// is the intepolation grid in q2, spanning the q2 range covered by the process data pub muf2_grid: Vec, } +/// Extra information required about an EKO to convolute to a Grid +pub struct EkoInfo { + /// scale for the FkTable + pub muf2_0: f64, + /// alpha_s vector + pub alphas: Vec, + /// renormalization scale variations ratio + pub xir: f64, + /// factorization scale variations ratio + pub xif: f64, + /// x_grid of the final FKTable + pub target_x_grid: Vec, + /// pids of the final FKTable + pub target_pids: Vec, + /// axes shared with the process grid + pub grid_axes: GridAxes, + /// further data to add + pub additional_metadata: HashMap, +} + /// Main data structure of `PineAPPL`. This structure contains a `Subgrid` for each `LumiEntry`, /// bin, and coupling order it was created with. #[derive(Deserialize, Serialize)] @@ -1066,9 +1088,10 @@ impl Grid { /// Provide information used to compute a suitable EKO for the current grid. /// More specific, the `x_grid` and `muf2_grid` are extracted and checked. #[must_use] - pub fn eko_info(&self) -> Option { + pub fn axes(&self) -> Option { let mut muf2_grid = Vec::::new(); let mut x_grid = Vec::::new(); + let pids = Vec::::new(); let mut has_pdf1 = true; let mut has_pdf2 = true; @@ -1123,7 +1146,11 @@ impl Grid { } } - Some(EkoInfo { x_grid, muf2_grid }) + Some(GridAxes { + x_grid, + pids, // TODO: for the time being they are just empty, but we might use them for slicing the eko + muf2_grid, + }) } /// Applies an evolution kernel operator (EKO) to the grids to evolve them from different @@ -1135,26 +1162,15 @@ impl Grid { /// /// Panics if the parameters do not match with the given grid. #[must_use] - pub fn convolute_eko( - &self, - q2: f64, - alphas: &[f64], - (xir, xif): (f64, f64), - pids: &[i32], - target_pids: &[i32], - x_grid: Vec, - q2_grid: Vec, - operator: Array5, - additional_metadata: HashMap, - ) -> Option { + pub fn convolute_eko(&self, operator: Array5, eko_info: EkoInfo) -> Option { // Check operator layout let dim = operator.shape(); - assert_eq!(dim[0], q2_grid.len()); - assert_eq!(dim[1], pids.len()); - assert_eq!(dim[2], x_grid.len()); - assert_eq!(dim[3], pids.len()); - assert_eq!(dim[4], x_grid.len()); + assert_eq!(dim[0], eko_info.grid_axes.muf2_grid.len()); + assert_eq!(dim[1], eko_info.target_pids.len()); + assert_eq!(dim[2], eko_info.target_x_grid.len()); + assert_eq!(dim[3], eko_info.grid_axes.pids.len()); + assert_eq!(dim[4], eko_info.grid_axes.x_grid.len()); // swap axes around to optimize convolution let operator = operator.permuted_axes([3, 1, 4, 0, 2]); @@ -1181,23 +1197,23 @@ impl Grid { }; let pids1 = if has_pdf1 { - pids.to_vec() + eko_info.grid_axes.pids.to_vec() } else { vec![initial_state_1] }; let pids2 = if has_pdf2 { - pids.to_vec() + eko_info.grid_axes.pids.to_vec() } else { vec![initial_state_2] }; // create target luminosities let tgt_pids1 = if has_pdf1 { - target_pids.to_vec() + eko_info.target_pids.to_vec() } else { vec![initial_state_1] }; let tgt_pids2 = if has_pdf2 { - target_pids.to_vec() + eko_info.target_pids.to_vec() } else { vec![initial_state_2] }; @@ -1208,9 +1224,17 @@ impl Grid { .collect(); // create target subgrid dimensions - let tgt_q2_grid = vec![q2]; - let tgt_x1_grid = if has_pdf1 { x_grid.clone() } else { vec![1.0] }; - let tgt_x2_grid = if has_pdf2 { x_grid.clone() } else { vec![1.0] }; + let tgt_q2_grid = vec![eko_info.muf2_0]; + let tgt_x1_grid = if has_pdf1 { + eko_info.target_x_grid.clone() + } else { + vec![1.0] + }; + let tgt_x2_grid = if has_pdf2 { + eko_info.target_x_grid.clone() + } else { + vec![1.0] + }; // create target grid let mut result = Self { @@ -1229,12 +1253,12 @@ impl Grid { more_members: self.more_members.clone(), }; // write additional metadata - for (key, value) in additional_metadata.iter() { + for (key, value) in eko_info.additional_metadata.iter() { result.set_key_value(key, value); } // collect source grid informations - let eko_info = self.eko_info().unwrap(); + let grid_axes = self.axes().unwrap(); // Setup progress bar let bar = ProgressBar::new( @@ -1264,22 +1288,26 @@ impl Grid { let mut src_array = SparseArray3::::new( src_array_q2_grid.len(), - if has_pdf1 { eko_info.x_grid.len() } else { 1 }, - if has_pdf2 { eko_info.x_grid.len() } else { 1 }, + if has_pdf1 { grid_axes.x_grid.len() } else { 1 }, + if has_pdf2 { grid_axes.x_grid.len() } else { 1 }, ); // iterate over the source grid orders and add all of them together into // `src_array`, using the right powers of alphas for (order, powers) in self.orders.iter().enumerate() { - let logs = if (xir, xif) == (1.0, 1.0) { + let logs = if (eko_info.xir, eko_info.xif) == (1.0, 1.0) { if (powers.logxir > 0) || (powers.logxif > 0) { continue; } 1.0 } else { - (xir * xir).ln().powi(powers.logxir.try_into().unwrap()) - * (xif * xif).ln().powi(powers.logxif.try_into().unwrap()) + (eko_info.xir * eko_info.xir) + .ln() + .powi(powers.logxir.try_into().unwrap()) + * (eko_info.xif * eko_info.xif) + .ln() + .powi(powers.logxif.try_into().unwrap()) }; let src_subgrid = &self.subgrids[[order, bin, src_lumi]]; @@ -1289,13 +1317,13 @@ impl Grid { && !src_subgrid .x1_grid() .iter() - .zip(x_grid.iter()) + .zip(eko_info.grid_axes.x_grid.iter()) .all(|(a, b)| approx_eq!(f64, *a, *b, ulps = 128))) || (has_pdf2 && !src_subgrid .x2_grid() .iter() - .zip(x_grid.iter()) + .zip(eko_info.grid_axes.x_grid.iter()) .all(|(a, b)| approx_eq!(f64, *a, *b, ulps = 128))); for ((iq2, ix1, ix2), &value) in src_subgrid.iter() { @@ -1304,24 +1332,28 @@ impl Grid { .iter() .position(|&q2| q2 == scale) .unwrap(); - let als_iq2 = q2_grid + let als_iq2 = eko_info + .grid_axes + .muf2_grid .iter() - .position(|&q2| q2 == xir * xir * scale) + .position(|&q2| q2 == eko_info.xir * eko_info.xir * scale) .unwrap(); let ix1 = if invert_x && has_pdf1 { - eko_info.x_grid.len() - ix1 - 1 + eko_info.grid_axes.x_grid.len() - ix1 - 1 } else { ix1 }; let ix2 = if invert_x && has_pdf2 { - eko_info.x_grid.len() - ix2 - 1 + eko_info.grid_axes.x_grid.len() - ix2 - 1 } else { ix2 }; - src_array[[src_iq2, ix1, ix2]] += - alphas[als_iq2].powi(powers.alphas.try_into().unwrap()) * logs * value; + src_array[[src_iq2, ix1, ix2]] += eko_info.alphas[als_iq2] + .powi(powers.alphas.try_into().unwrap()) + * logs + * value; } } @@ -1336,7 +1368,14 @@ impl Grid { // Next we need to apply the tensor let eko_src_q2_indices: Vec<_> = src_array_q2_grid .iter() - .map(|&src_q2| q2_grid.iter().position(|&q2| q2 == src_q2).unwrap()) + .map(|&src_q2| { + eko_info + .grid_axes + .muf2_grid + .iter() + .position(|&q2| q2 == src_q2) + .unwrap() + }) .collect(); // Iterate target lumis for (tgt_lumi, (tgt_pid1_idx, tgt_pid2_idx)) in (0..pids1.len()) @@ -1346,7 +1385,10 @@ impl Grid { for (src_pid1, src_pid2, factor) in src_entries.entry().iter() { // find source lumi position let src_pid1_idx = if has_pdf1 { - pids.iter() + eko_info + .grid_axes + .pids + .iter() .position(|x| { // if `pid == 0` the gluon is meant if *src_pid1 == 0 { @@ -1360,7 +1402,10 @@ impl Grid { 0 }; let src_pid2_idx = if has_pdf2 { - pids.iter() + eko_info + .grid_axes + .pids + .iter() .position(|x| { // `pid == 0` is the gluon exception, which might be 0 or 21 if *src_pid2 == 0 { diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index 30af7dc7..44b23387 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -246,8 +246,9 @@ def convolute_eko(self, operators, additional_metadata=None): operators["q2_ref"], alphas_values, operators["targetpids"], + operators["targetgrid"], operators["inputpids"], - operators["interpolation_xgrid"], + operators["inputgrid"], q2grid, operator_grid.flatten(), operator_grid.shape, diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 28111570..2dbd2fdc 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,4 +1,4 @@ -use pineappl::grid::{EkoInfo, Grid, Ntuple, Order}; +use pineappl::grid::{EkoInfo, Grid, GridAxes, Ntuple, Order}; use super::bin::PyBinRemapper; use super::fk_table::PyFkTable; @@ -10,7 +10,6 @@ use ndarray::{Array, Ix5}; use std::collections::HashMap; use std::fs::File; use std::io::BufReader; -use std::collections::HashMap; use pyo3::prelude::*; @@ -190,11 +189,17 @@ impl PyGrid { /// ------- /// x_grid: list(float) /// interpolation grid + /// pids: list(int) + /// particle ids /// muf2_grid : list(float) /// factorization scale list - pub fn eko_info(&self) -> (Vec, Vec) { - let EkoInfo { x_grid, muf2_grid } = self.grid.eko_info().unwrap(); - (x_grid, muf2_grid) + pub fn axes(&self) -> (Vec, Vec, Vec) { + let GridAxes { + x_grid, + pids, + muf2_grid, + } = self.grid.axes().unwrap(); + (x_grid, pids, muf2_grid) } /// Convolute grid with pdf. @@ -258,7 +263,7 @@ impl PyGrid { /// /// Parameters /// ---------- - /// q2 : float + /// muf2_0 : float /// reference scale /// alphas : list(float) /// list with :math:`\alpha_s(Q2)` for the process scales @@ -266,12 +271,18 @@ impl PyGrid { /// sorting of the particles in the tensor /// x_grid : list(float) /// interpolation grid - /// q2_grid : list(float) + /// target_pids : list(int) + /// sorting of the particles in the tensor for final FkTable + /// target_x_grid : list(float) + /// final FKTable interpolation grid + /// muf2_grid : list(float) /// list of process scales /// operator_flattened : list(float) /// evolution tensor as a flat list /// operator_shape : list(int) /// shape of the evolution tensor + /// additional_metadata : dict(str: str) + /// further metadata /// /// Returns /// ------- @@ -279,30 +290,36 @@ impl PyGrid { /// produced FK table pub fn convolute_eko( &self, - q2: f64, + muf2_0: f64, alphas: Vec, pids: Vec, - target_pids: Vec, x_grid: Vec, - q2_grid: Vec, + target_pids: Vec, + target_x_grid: Vec, + muf2_grid: Vec, operator_flattened: Vec, operator_shape: Vec, additional_metadata: HashMap, ) -> PyFkTable { let operator = Array::from_shape_vec(operator_shape, operator_flattened).unwrap(); + let eko_info = EkoInfo { + muf2_0, + alphas, + xir: 0., + xif: 0., + target_pids, + target_x_grid, + grid_axes: GridAxes { + x_grid, + pids, + muf2_grid, + }, + additional_metadata, + }; + let evolved_grid = self .grid - .convolute_eko( - q2, - &alphas, - (1., 1.), - &pids, - &target_pids, - x_grid, - q2_grid, - operator.into_dimensionality::().unwrap(), - additional_metadata - ) + .convolute_eko(operator.into_dimensionality::().unwrap(), eko_info) .unwrap(); PyFkTable { fk_table: evolved_grid, @@ -346,6 +363,11 @@ impl PyGrid { self.grid.optimize(); } + /// Optimize grid content. + // pub fn merge(&mut self, mut other: Self) { + // self.grid.merge(other.grid); + // } + /// Extract the number of dimensions for bins. /// /// **Usage:** `pineko` diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index 5248fd62..4c7cccb5 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -97,7 +97,7 @@ def test_convolute(self): np.testing.assert_allclose(g.convolute(lambda pid,x,q2: 1, lambda pid,x,q2: 1., lambda q2: 1.), [5e6/9999,0.]) np.testing.assert_allclose(g.convolute(lambda pid,x,q2: 1, lambda pid,x,q2: 1., lambda q2: 2.), [2**3 * 5e6/9999,0.]) - def test_eko_info(self): + def test_axes(self): g = self.fake_grid() # add 2 DIS grids @@ -119,10 +119,11 @@ def test_eko_info(self): ) g.set_subgrid(0, 1, 0, subgrid) # now get the thing - ei = g.eko_info() + ei = g.axes() np.testing.assert_allclose(ei[0], xs) - np.testing.assert_allclose(ei[1], [90., 100.]) + np.testing.assert_allclose(ei[1], []) + np.testing.assert_allclose(ei[2], [90., 100.]) def test_io(self, tmp_path): g = self.fake_grid() @@ -137,6 +138,9 @@ def test_convolute_eko(self): fake_eko = { "q2_ref": 1., "targetpids": [1], + "targetgrid": [.1,1.], + "inputpids": [1], + "inputgrid": [.1,1.], "interpolation_xgrid": [.1,1.], "Q2grid": { 90: { From 7b786703eced9885daa082df738fd4570a681687 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 12 Oct 2021 18:21:18 +0200 Subject: [PATCH 07/39] Improve documentation strings --- pineappl/src/grid.rs | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index b12c11d7..8768f8e0 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -206,34 +206,36 @@ impl MoreMembers { } } -/// Information required to compute a compatible EKO. -/// Members spell out specific characteristic of a suitable EKO. +/// Information required to calculate the evolution kernel operators (EKO) to perform a conversion +/// of a [`Grid`] using [`Grid::convolute_eko`] to an [`FkTable`]. pub struct GridAxes { - /// is the interpolation grid in x used at the process scale + /// Interpolation grid in x of the `Grid`. pub x_grid: Vec, - /// are the parton ids used in the process + /// Parton IDs used in the grid. pub pids: Vec, - /// is the intepolation grid in q2, spanning the q2 range covered by the process data + /// Interpolation grid for the factorization scale of the `Grid`. pub muf2_grid: Vec, } -/// Extra information required about an EKO to convolute to a Grid +/// Extra information required to perform the conversion of a [`Grid`] to an [`FkTable`] using +/// [`Grid::convolute_eko`]. pub struct EkoInfo { - /// scale for the FkTable + /// Scale of the FkTable. pub muf2_0: f64, - /// alpha_s vector + /// Strong coupling constants for the factorization scales in the same ordering as given in + /// [`GridAxes`]. pub alphas: Vec, - /// renormalization scale variations ratio + /// Renormalization scale variation. pub xir: f64, - /// factorization scale variations ratio + /// Factorization scale variation. pub xif: f64, - /// x_grid of the final FKTable + /// Interpolation grid in x of the `FkTable`. pub target_x_grid: Vec, - /// pids of the final FKTable + /// Parton IDs for the `FkTable`. pub target_pids: Vec, /// axes shared with the process grid pub grid_axes: GridAxes, - /// further data to add + // TODO: replace this member with the actual data pub additional_metadata: HashMap, } From 558db3edd951d8bdf20c8d81ed90739093799324 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Wed, 13 Oct 2021 17:01:28 +0200 Subject: [PATCH 08/39] Add temporary axes test --- pineappl/src/grid.rs | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 8768f8e0..ea3ab703 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1778,4 +1778,38 @@ mod tests { "-2212" ); } + + // TODO: properly test axes returned + + #[test] + fn grid_axes() { + let mut grid = Grid::new( + vec![lumi_entry![21, 21, 1.0], lumi_entry![1, 2, 1.0]], + vec![Order { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }], + vec![0.0, 1.0], + SubgridParams::default(), + ); + + grid.fill( + 0, + 0.5, + 0, + &Ntuple { + x1: 0.1, + x2: 0.1, + q2: 20., + weight: 1., + }, + ); + + let axes = grid.axes().unwrap(); + assert_eq!(axes.x_grid, vec![]); + assert_eq!(axes.muf2_grid, vec![]); + assert_eq!(axes.pids, vec![]); + } } From b527d9dfde4546592b9492fe14c60442fbd615c3 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Thu, 14 Oct 2021 02:23:51 +0200 Subject: [PATCH 09/39] Add broken test for convolute_eko --- pineappl/src/grid.rs | 66 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 59 insertions(+), 7 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index ea3ab703..19c8ff38 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -15,7 +15,7 @@ use git_version::git_version; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; use lz_fear::{framed::DecompressionError::WrongMagic, LZ4FrameReader}; -use ndarray::{s, Array3, Array5, Dimension}; +use ndarray::{s, Array, Array2, Array3, Array5, Dimension}; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use std::borrow::Cow; @@ -235,7 +235,7 @@ pub struct EkoInfo { pub target_pids: Vec, /// axes shared with the process grid pub grid_axes: GridAxes, - // TODO: replace this member with the actual data + /// TODO: replace this member with the actual data pub additional_metadata: HashMap, } @@ -1781,9 +1781,18 @@ mod tests { // TODO: properly test axes returned - #[test] - fn grid_axes() { - let mut grid = Grid::new( + fn simple_grid() -> Grid { + let mut subgrid_params = SubgridParams::default(); + subgrid_params.set_x_order(1); + subgrid_params.set_x_bins(1); + subgrid_params.set_q2_order(1); + subgrid_params.set_q2_bins(1); + + let mut extra = ExtraSubgridParams::default(); + extra.set_x2_order(1); + extra.set_x2_bins(1); + + let mut grid = Grid::with_subgrid_type( vec![lumi_entry![21, 21, 1.0], lumi_entry![1, 2, 1.0]], vec![Order { alphas: 0, @@ -1792,8 +1801,11 @@ mod tests { logxif: 0, }], vec![0.0, 1.0], - SubgridParams::default(), - ); + subgrid_params, + extra, + "LagrangeSubgrid", + ) + .unwrap(); grid.fill( 0, @@ -1807,9 +1819,49 @@ mod tests { }, ); + grid + } + + #[test] + fn grid_axes() { + let grid = simple_grid(); + let axes = grid.axes().unwrap(); assert_eq!(axes.x_grid, vec![]); assert_eq!(axes.muf2_grid, vec![]); assert_eq!(axes.pids, vec![]); } + + #[test] + fn grid_convolute_eko() { + let grid = simple_grid(); + + let eko_info = EkoInfo { + muf2_0: 1., + alphas: vec![1.], + xir: 1., + xif: 1., + target_x_grid: vec![1.], + target_pids: vec![21, 1, 2], + grid_axes: GridAxes { + x_grid: vec![1.], + pids: vec![21, 1, 2], + muf2_grid: vec![1.], + }, + additional_metadata: HashMap::new(), + }; + let id = Array2::::eye(3); + let operator = Array::from_shape_vec( + (1, 3, 1, 3, 1), + id.clone() + .into_iter() + // .cartesian_product(id.iter()) + // .map(|(i, j)| i * j) + .collect(), + ) + .unwrap(); + let fk = grid.convolute_eko(operator, eko_info).unwrap(); + + assert_eq!(fk.bins(), 1); + } } From 36bf0c6bb7595358ef2fd29a9920d99b357bebc9 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Fri, 15 Oct 2021 09:24:20 +0200 Subject: [PATCH 10/39] Fix convolute_eko test --- pineappl/src/grid.rs | 87 ++++++++++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 19c8ff38..578212d2 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1781,18 +1781,29 @@ mod tests { // TODO: properly test axes returned - fn simple_grid() -> Grid { + fn simple_grid() -> (Grid, GridAxes) { + let muf2_grid = vec![20.]; + let x_grid = vec![0.1, 0.5, 1.]; + let mut subgrid_params = SubgridParams::default(); subgrid_params.set_x_order(1); subgrid_params.set_x_bins(1); subgrid_params.set_q2_order(1); subgrid_params.set_q2_bins(1); - let mut extra = ExtraSubgridParams::default(); - extra.set_x2_order(1); - extra.set_x2_bins(1); + let mut array = Array3::zeros((1, 3, 3)); + array[[0, 0, 0]] = 1.; + let sparse_array = SparseArray3::from_ndarray(&array, 0, 3); + let subgrid = ImportOnlySubgridV1::new( + sparse_array, + muf2_grid.clone(), + x_grid.clone(), + x_grid.clone(), + ) + .into(); - let mut grid = Grid::with_subgrid_type( + let pids = vec![21, 1, 2]; + let mut grid = Grid::new( vec![lumi_entry![21, 21, 1.0], lumi_entry![1, 2, 1.0]], vec![Order { alphas: 0, @@ -1802,61 +1813,59 @@ mod tests { }], vec![0.0, 1.0], subgrid_params, - extra, - "LagrangeSubgrid", - ) - .unwrap(); - - grid.fill( - 0, - 0.5, - 0, - &Ntuple { - x1: 0.1, - x2: 0.1, - q2: 20., - weight: 1., - }, ); - grid + grid.set_subgrid(0, 0, 0, subgrid); + + ( + grid, + GridAxes { + x_grid, + pids, + muf2_grid, + }, + ) } #[test] fn grid_axes() { - let grid = simple_grid(); + let (grid, axes) = simple_grid(); - let axes = grid.axes().unwrap(); - assert_eq!(axes.x_grid, vec![]); - assert_eq!(axes.muf2_grid, vec![]); - assert_eq!(axes.pids, vec![]); + let ret_axes = grid.axes().unwrap(); + assert_eq!(ret_axes.x_grid, axes.x_grid); + assert_eq!(ret_axes.muf2_grid, axes.muf2_grid); + assert_eq!(ret_axes.pids, vec![]); } #[test] fn grid_convolute_eko() { - let grid = simple_grid(); + let (grid, axes) = simple_grid(); + let target_x_grid = vec![1e-7, 1e-2, 1.]; + let target_pids = vec![21, 1, 2]; + + let mut additional_metadata = HashMap::new(); + additional_metadata.insert("lumi_id_types".to_owned(), "pdg_mc_ids".to_owned()); let eko_info = EkoInfo { muf2_0: 1., alphas: vec![1.], xir: 1., xif: 1., - target_x_grid: vec![1.], - target_pids: vec![21, 1, 2], + target_x_grid: target_x_grid.clone(), + target_pids: target_pids.clone(), grid_axes: GridAxes { - x_grid: vec![1.], - pids: vec![21, 1, 2], - muf2_grid: vec![1.], + x_grid: axes.x_grid.clone(), + pids: axes.pids.clone(), + muf2_grid: axes.muf2_grid.clone(), }, - additional_metadata: HashMap::new(), + additional_metadata, }; - let id = Array2::::eye(3); let operator = Array::from_shape_vec( - (1, 3, 1, 3, 1), - id.clone() - .into_iter() - // .cartesian_product(id.iter()) - // .map(|(i, j)| i * j) + (1, 3, 3, 3, 3), + (0..4) + .map(|_| (0..3)) + .multi_cartesian_product() + .map(|v| if v[0] == v[2] && v[1] == v[3] { 1. } else { 0. }) .collect(), ) .unwrap(); From 05336666cd0dffa69b5ef9a56255ca98df2e2cbc Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Fri, 15 Oct 2021 10:26:30 +0200 Subject: [PATCH 11/39] Remove the too flexible additional metadata from EkoInfo --- pineappl/src/grid.rs | 15 ++++++--------- pineappl_py/pineappl/grid.py | 6 ++---- pineappl_py/src/grid.rs | 5 ++--- 3 files changed, 10 insertions(+), 16 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 578212d2..140c9855 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -15,7 +15,7 @@ use git_version::git_version; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; use lz_fear::{framed::DecompressionError::WrongMagic, LZ4FrameReader}; -use ndarray::{s, Array, Array2, Array3, Array5, Dimension}; +use ndarray::{s, Array3, Array5, Dimension}; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use std::borrow::Cow; @@ -236,7 +236,7 @@ pub struct EkoInfo { /// axes shared with the process grid pub grid_axes: GridAxes, /// TODO: replace this member with the actual data - pub additional_metadata: HashMap, + pub lumi_id_types: String, } /// Main data structure of `PineAPPL`. This structure contains a `Subgrid` for each `LumiEntry`, @@ -1255,9 +1255,7 @@ impl Grid { more_members: self.more_members.clone(), }; // write additional metadata - for (key, value) in eko_info.additional_metadata.iter() { - result.set_key_value(key, value); - } + result.set_key_value("lumi_id_types", &eko_info.lumi_id_types); // collect source grid informations let grid_axes = self.axes().unwrap(); @@ -1843,8 +1841,7 @@ mod tests { let target_x_grid = vec![1e-7, 1e-2, 1.]; let target_pids = vec![21, 1, 2]; - let mut additional_metadata = HashMap::new(); - additional_metadata.insert("lumi_id_types".to_owned(), "pdg_mc_ids".to_owned()); + let lumi_id_types = "pdg_mc_ids".to_owned(); let eko_info = EkoInfo { muf2_0: 1., @@ -1858,9 +1855,9 @@ mod tests { pids: axes.pids.clone(), muf2_grid: axes.muf2_grid.clone(), }, - additional_metadata, + lumi_id_types, }; - let operator = Array::from_shape_vec( + let operator = ndarray::Array::from_shape_vec( (1, 3, 3, 3, 3), (0..4) .map(|_| (0..3)) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index 44b23387..ed7799cf 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -216,7 +216,7 @@ def convolute( xfx1, xfx2, alphas, order_mask, bin_indices, lumi_mask, xi ) - def convolute_eko(self, operators, additional_metadata=None): + def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): """ Create an FKTable with the EKO. @@ -234,8 +234,6 @@ def convolute_eko(self, operators, additional_metadata=None): PyFkTable : raw grid as an FKTable """ - if additional_metadata is None: - additional_metadata = {} operator_grid = np.array( [op["operators"] for op in operators["Q2grid"].values()] ) @@ -252,7 +250,7 @@ def convolute_eko(self, operators, additional_metadata=None): q2grid, operator_grid.flatten(), operator_grid.shape, - additional_metadata, + lumi_id_types, ) ) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 2dbd2fdc..c5561aba 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -7,7 +7,6 @@ use super::subgrid::{PySubgridEnum, PySubgridParams}; use ndarray::{Array, Ix5}; -use std::collections::HashMap; use std::fs::File; use std::io::BufReader; @@ -299,7 +298,7 @@ impl PyGrid { muf2_grid: Vec, operator_flattened: Vec, operator_shape: Vec, - additional_metadata: HashMap, + lumi_id_types: String, ) -> PyFkTable { let operator = Array::from_shape_vec(operator_shape, operator_flattened).unwrap(); let eko_info = EkoInfo { @@ -314,7 +313,7 @@ impl PyGrid { pids, muf2_grid, }, - additional_metadata, + lumi_id_types, }; let evolved_grid = self From 4c5f2e5f88de0d1a2be5a4cab0a4cfcdd33fae7e Mon Sep 17 00:00:00 2001 From: Felix Hekhorn Date: Mon, 18 Oct 2021 12:43:03 +0200 Subject: [PATCH 12/39] Fix bug in conv_eko, expose optimize --- pineappl/src/fk_table.rs | 9 +++++++++ pineappl/src/grid.rs | 5 ++++- pineappl_py/src/fk_table.rs | 8 ++++++++ pineappl_py/src/grid.rs | 4 ++-- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 3783387e..42305e02 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -136,6 +136,15 @@ impl FkTable { self.grid.write(writer) } + /// Optimize grid + /// + /// # Errors + /// + /// TODO + pub fn optimize(&mut self) { + self.grid.optimize() + } + /// Propagate convolute to grid pub fn convolute( &self, diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 140c9855..961c53f8 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1337,7 +1337,10 @@ impl Grid { .muf2_grid .iter() .position(|&q2| q2 == eko_info.xir * eko_info.xir * scale) - .unwrap(); + .expect(&format!( + "Couldn't find q2: {:?} with xir: {:?} and muf2_grid: {:?}", + scale, eko_info.xir, eko_info.grid_axes.muf2_grid + )); let ix1 = if invert_x && has_pdf1 { eko_info.grid_axes.x_grid.len() - ix1 - 1 diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 1577bdf6..82e21f41 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -89,6 +89,14 @@ impl PyFkTable { self.fk_table.write(File::create(path).unwrap()).unwrap(); } + /// Optimize grid. + /// + /// **Usage:** `pineko` + /// + pub fn optimize(&mut self) { + self.fk_table.optimize(); + } + /// Convolute grid with pdf. /// /// **Usage:** `pineko` diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index c5561aba..d1d9bfeb 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -304,8 +304,8 @@ impl PyGrid { let eko_info = EkoInfo { muf2_0, alphas, - xir: 0., - xif: 0., + xir: 1., + xif: 1., target_pids, target_x_grid, grid_axes: GridAxes { From 2cef20e395622de5ecb78ec5b6844add3be33fa9 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 18 Oct 2021 14:31:54 +0200 Subject: [PATCH 13/39] Update lumi_id_types docstring --- pineappl_py/pineappl/grid.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index ed7799cf..2247f82a 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -226,8 +226,9 @@ def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): ---------- operators : dict EKO Output - additional_metadata : dict - additional metadata + lumi_id_types : str + kind of lumi types (e.g. "pdg_mc_ids" for flavor basis, "evol" + for evolution basis) Returns ------ From 3030b7e95fa80f294cfae8632d49d0138b809824 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 18 Oct 2021 14:55:01 +0200 Subject: [PATCH 14/39] Always optimize FkTables --- pineappl/src/fk_table.rs | 9 --------- pineappl/src/grid.rs | 1 + 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 42305e02..3783387e 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -136,15 +136,6 @@ impl FkTable { self.grid.write(writer) } - /// Optimize grid - /// - /// # Errors - /// - /// TODO - pub fn optimize(&mut self) { - self.grid.optimize() - } - /// Propagate convolute to grid pub fn convolute( &self, diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 961c53f8..121b6f77 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1507,6 +1507,7 @@ impl Grid { bar.finish(); + result.optimize(); Some(FkTable::try_from(result).unwrap()) } } From d12b42c1b4ccb41a0cbdf79658a7dcfc0174c475 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 18 Oct 2021 15:08:11 +0200 Subject: [PATCH 15/39] Remove FkTable.optimize even from python interface --- pineappl_py/src/fk_table.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 82e21f41..1577bdf6 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -89,14 +89,6 @@ impl PyFkTable { self.fk_table.write(File::create(path).unwrap()).unwrap(); } - /// Optimize grid. - /// - /// **Usage:** `pineko` - /// - pub fn optimize(&mut self) { - self.fk_table.optimize(); - } - /// Convolute grid with pdf. /// /// **Usage:** `pineko` From 69b2a15e7414bb8ed2142750b77f05a2e98d467a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 7 Oct 2021 15:57:01 +0200 Subject: [PATCH 16/39] Remove use of `RefCell` This change asks the closure in the argument of `Subgrid::convolute` to be `&mut dyn FnMut` instead of `&dyn Fn`. The latter does not allow modifications to its captured environment, which is why a `RefCell` was needed to work around this limitation. Using an `FnMut` makes this much simpler as it doesn't have this limitation. --- pineappl/src/empty_subgrid.rs | 4 ++-- pineappl/src/grid.rs | 31 +++++++++++------------------ pineappl/src/import_only_subgrid.rs | 10 ++++++---- pineappl/src/lagrange_subgrid.rs | 24 ++++++++++++---------- pineappl/src/ntuple_subgrid.rs | 4 ++-- pineappl/src/subgrid.rs | 2 +- 6 files changed, 37 insertions(+), 38 deletions(-) diff --git a/pineappl/src/empty_subgrid.rs b/pineappl/src/empty_subgrid.rs index dfb70de0..29c0317e 100644 --- a/pineappl/src/empty_subgrid.rs +++ b/pineappl/src/empty_subgrid.rs @@ -16,7 +16,7 @@ impl Subgrid for EmptySubgridV1 { _: &[f64], _: &[f64], _: &[Mu2], - _: &dyn Fn(usize, usize, usize) -> f64, + _: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { 0.0 } @@ -65,7 +65,7 @@ mod tests { #[test] fn create_empty() { let mut subgrid = EmptySubgridV1::default(); - assert_eq!(subgrid.convolute(&[], &[], &[], &|_, _, _| 0.0), 0.0,); + assert_eq!(subgrid.convolute(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); assert!(subgrid.is_empty()); subgrid.merge(&mut EmptySubgridV1::default().into(), false); subgrid.scale(2.0); diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 121b6f77..60556fbe 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -19,7 +19,6 @@ use ndarray::{s, Array3, Array5, Dimension}; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; use std::borrow::Cow; -use std::cell::RefCell; use std::cmp::Ordering; use std::collections::HashMap; use std::convert::{TryFrom, TryInto}; @@ -347,9 +346,9 @@ impl Grid { let bin_sizes = self.bin_info().normalizations(); // prepare pdf and alpha caches - let pdf_cache1 = RefCell::new(FxHashMap::default()); - let pdf_cache2 = RefCell::new(FxHashMap::default()); - let alphas_cache = RefCell::new(FxHashMap::default()); + let mut pdf_cache1 = FxHashMap::default(); + let mut pdf_cache2 = FxHashMap::default(); + let mut alphas_cache = FxHashMap::default(); let mut last_xif = 0.0; let (mut mu2_grid, mut x1_grid, mut x2_grid) = self @@ -381,8 +380,8 @@ impl Grid { { // whenever the value `xif` changes we can clear the PDF cache if xif != last_xif { - pdf_cache1.borrow_mut().clear(); - pdf_cache2.borrow_mut().clear(); + pdf_cache1.clear(); + pdf_cache2.clear(); last_xif = xif; } // iterate subgrids @@ -417,20 +416,18 @@ impl Grid { if mu2_grid_changed { mu2_grid = new_mu2_grid; - alphas_cache.borrow_mut().clear(); + alphas_cache.clear(); } if mu2_grid_changed || (new_x1_grid != x1_grid) || (new_x2_grid != x2_grid) { x1_grid = new_x1_grid; x2_grid = new_x2_grid; - pdf_cache1.borrow_mut().clear(); - pdf_cache2.borrow_mut().clear(); + pdf_cache1.clear(); + pdf_cache2.clear(); } // pass convolute down, using caching - subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &|ix1, ix2, imu2| { - let mut pdf_cache1 = pdf_cache1.borrow_mut(); - let mut pdf_cache2 = pdf_cache2.borrow_mut(); + subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { let x1 = x1_grid[ix1]; let x2 = x2_grid[ix2]; let mu2 = &mu2_grid[imu2]; @@ -454,7 +451,6 @@ impl Grid { lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); } - let mut alphas_cache = alphas_cache.borrow_mut(); let alphas = alphas_cache .entry(xir_values.len() * imu2 + xir_index) .or_insert_with(|| alphas(xir * xir * mu2.ren)); @@ -504,9 +500,9 @@ impl Grid { ) -> Array3 { let normalization = self.bin_info().normalizations()[bin]; - let pdf_cache1 = RefCell::new(FxHashMap::default()); - let pdf_cache2 = RefCell::new(FxHashMap::default()); - let alphas_cache = RefCell::new(FxHashMap::default()); + let mut pdf_cache1 = FxHashMap::default(); + let mut pdf_cache2 = FxHashMap::default(); + let mut alphas_cache = FxHashMap::default(); let subgrid = &self.subgrids[[order, bin, lumi]]; let order = &self.orders[order]; @@ -527,8 +523,6 @@ impl Grid { let mut array = Array3::zeros((mu2_grid.len(), x1_grid.len(), x2_grid.len())); for ((imu2, ix1, ix2), value) in subgrid.iter() { - let mut pdf_cache1 = pdf_cache1.borrow_mut(); - let mut pdf_cache2 = pdf_cache2.borrow_mut(); let x1 = x1_grid[ix1]; let x2 = x2_grid[ix2]; let mu2 = &mu2_grid[imu2]; @@ -552,7 +546,6 @@ impl Grid { lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); } - let mut alphas_cache = alphas_cache.borrow_mut(); let alphas = alphas_cache .entry(imu2) .or_insert_with(|| alphas(xir * xir * mu2.ren)); diff --git a/pineappl/src/import_only_subgrid.rs b/pineappl/src/import_only_subgrid.rs index 2cd5da17..171bc0ba 100644 --- a/pineappl/src/import_only_subgrid.rs +++ b/pineappl/src/import_only_subgrid.rs @@ -47,7 +47,7 @@ impl Subgrid for ImportOnlySubgridV1 { _: &[f64], _: &[f64], _: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { self.array .indexed_iter() @@ -251,7 +251,7 @@ impl Subgrid for ImportOnlySubgridV2 { _: &[f64], _: &[f64], _: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { self.array .indexed_iter() @@ -397,7 +397,8 @@ mod tests { assert_eq!(grid1.iter().nth(3), Some(((0, 7, 1), &8.0))); // symmetric luminosity function - let lumi = &(|ix1, ix2, _| x[ix1] * x[ix2]) as &dyn Fn(usize, usize, usize) -> f64; + let lumi = + &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 0.228515625); @@ -482,7 +483,8 @@ mod tests { assert_eq!(grid1.iter().nth(3), Some(((0, 7, 1), &8.0))); // symmetric luminosity function - let lumi = &(|ix1, ix2, _| x[ix1] * x[ix2]) as &dyn Fn(usize, usize, usize) -> f64; + let lumi = + &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; assert_eq!(grid1.convolute(&x, &x, &mu2, lumi), 0.228515625); diff --git a/pineappl/src/lagrange_subgrid.rs b/pineappl/src/lagrange_subgrid.rs index b8ec64e3..602ba189 100644 --- a/pineappl/src/lagrange_subgrid.rs +++ b/pineappl/src/lagrange_subgrid.rs @@ -134,7 +134,7 @@ impl Subgrid for LagrangeSubgridV1 { x1: &[f64], x2: &[f64], _: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { self.grid.as_ref().map_or(0.0, |grid| { grid.indexed_iter() @@ -450,7 +450,7 @@ impl Subgrid for LagrangeSubgridV2 { x1: &[f64], x2: &[f64], _: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { self.grid.as_ref().map_or(0.0, |grid| { grid.indexed_iter() @@ -745,7 +745,7 @@ impl Subgrid for LagrangeSparseSubgridV1 { x1: &[f64], x2: &[f64], _: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { self.array .indexed_iter() @@ -958,7 +958,8 @@ mod tests { let x2 = grid.x2_grid(); let mu2 = grid.mu2_grid(); - let reference = grid.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let reference = + grid.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); let mut test = 0.0; @@ -1008,13 +1009,14 @@ mod tests { let x2 = grid1.x2_grid().into_owned(); let mu2 = grid1.mu2_grid().into_owned(); - let reference = grid1.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let reference = + grid1.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); // merge filled grid into empty one grid2.merge(&mut grid1.into(), false); assert!(!grid2.is_empty()); - let merged = grid2.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let merged = grid2.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert!(approx_eq!(f64, reference, merged, ulps = 8)); @@ -1045,7 +1047,7 @@ mod tests { grid2.merge(&mut grid3.into(), false); - let merged = grid2.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let merged = grid2.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert!(approx_eq!(f64, 2.0 * reference, merged, ulps = 8)); } @@ -1100,7 +1102,7 @@ mod tests { let x2 = grid.x2_grid(); let mu2 = grid.mu2_grid(); - let result = grid.convolute(&x1, &x2, &mu2, &|_, _, _| 1.0); + let result = grid.convolute(&x1, &x2, &mu2, &mut |_, _, _| 1.0); assert_eq!(result, 0.0); } @@ -1158,8 +1160,10 @@ mod tests { let sparse = LagrangeSparseSubgridV1::from(&dense); assert!(!sparse.is_empty()); - let reference = dense.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - let converted = sparse.convolute(&x1, &x2, &mu2, &|ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let reference = + dense.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let converted = + sparse.convolute(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); assert!(approx_eq!(f64, reference, converted, ulps = 8)); } diff --git a/pineappl/src/ntuple_subgrid.rs b/pineappl/src/ntuple_subgrid.rs index f8870ce0..4743bb71 100644 --- a/pineappl/src/ntuple_subgrid.rs +++ b/pineappl/src/ntuple_subgrid.rs @@ -25,7 +25,7 @@ impl Subgrid for NtupleSubgridV1 { _: &[f64], _: &[f64], _: &[Mu2], - _: &dyn Fn(usize, usize, usize) -> f64, + _: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64 { todo!(); } @@ -82,7 +82,7 @@ mod tests { #[test] #[should_panic] fn convolute() { - let _ = NtupleSubgridV1::new().convolute(&[], &[], &[], &|_, _, _| 0.0); + let _ = NtupleSubgridV1::new().convolute(&[], &[], &[], &mut |_, _, _| 0.0); } #[test] diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index 9ac3aa10..c2109974 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -63,7 +63,7 @@ pub trait Subgrid { x1: &[f64], x2: &[f64], mu2: &[Mu2], - lumi: &dyn Fn(usize, usize, usize) -> f64, + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, ) -> f64; /// Fills the subgrid with `weight` for the parton momentum fractions `x1` and `x2`, and the From 18ee483775516157d8a9787cfcc50740575a1362 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 13 Oct 2021 14:09:45 +0200 Subject: [PATCH 17/39] Add new struct `LumiCache` --- pineappl/src/grid.rs | 86 ++++++++++++++++++++++++++- pineappl/src/lumi.rs | 138 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 223 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 60556fbe..4fb203ee 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -5,7 +5,7 @@ use super::empty_subgrid::EmptySubgridV1; use super::fk_table::FkTable; use super::import_only_subgrid::ImportOnlySubgridV1; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::lumi::LumiEntry; +use super::lumi::{LumiCache, LumiEntry}; use super::lumi_entry; use super::ntuple_subgrid::NtupleSubgridV1; use super::sparse_array3::SparseArray3; @@ -479,6 +479,90 @@ impl Grid { bins } + /// TODO + /// + /// Panics + /// + /// TODO + pub fn convolute2( + &self, + lumi_cache: &mut LumiCache, + order_mask: &[bool], + bin_indices: &[usize], + lumi_mask: &[bool], + xi: &[(f64, f64)], + ) -> Vec { + let bin_indices = if bin_indices.is_empty() { + (0..self.bin_limits.bins()).collect() + } else { + bin_indices.to_vec() + }; + let mut bins: Vec = vec![0.0; bin_indices.len() * xi.len()]; + let normalizations = self.bin_info().normalizations(); + + for (xi_index, &(xir, xif)) in xi.iter().enumerate() { + for ((ord, bin, lumi), subgrid) in self.subgrids.indexed_iter() { + let order = &self.orders[ord]; + + if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { + continue; + } + + if (!order_mask.is_empty() && !order_mask[ord]) + || (!lumi_mask.is_empty() && !lumi_mask[lumi]) + { + continue; + } + + let bin_index = match bin_indices.iter().position(|&index| index == bin) { + Some(i) => i, + None => continue, + }; + + if subgrid.is_empty() { + continue; + } + + let lumi_entry = &self.lumi[lumi]; + let mu2_grid = subgrid.mu2_grid(); + let x1_grid = subgrid.x1_grid(); + let x2_grid = subgrid.x2_grid(); + + let mut value = + subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { + let x1 = x1_grid[ix1]; + let x2 = x2_grid[ix2]; + let mur2 = xir * xir * mu2_grid[imu2].ren; + let muf2 = xif * xif * mu2_grid[imu2].fac; + let mut lumi = 0.0; + + for entry in lumi_entry.entry() { + let xfx1 = lumi_cache.xfx1(entry.0, x1, muf2); + let xfx2 = lumi_cache.xfx2(entry.1, x2, muf2); + lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); + } + + let alphas = lumi_cache.alphas(mur2); + + lumi *= alphas.powi(order.alphas.try_into().unwrap()); + lumi + }); + + if order.logxir > 0 { + value *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); + } + + if order.logxif > 0 { + value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); + } + + bins[xi_index + xi.len() * bin_index] += value / normalizations[bin]; + } + } + + bins + } + /// Convolutes a single subgrid `(order, bin, lumi)` with the PDFs strong coupling given by /// `xfx1`, `xfx2` and `alphas`. The convolution result is fully differentially, such that the /// axes of the result correspond to the values given by the subgrid `q2`, `x1` and `x2` grid diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index e00bd029..e1eedf2a 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -1,5 +1,6 @@ //! Module for everything related to luminosity functions. +use super::grid::Grid; use serde::{Deserialize, Serialize}; /// This structure represens an entry of a luminosity function. Each entry consists of a tuple, @@ -93,3 +94,140 @@ macro_rules! lumi_entry { $crate::lumi::LumiEntry::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) }; } + +enum Pdfs<'a> { + Two { + xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, + xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, + }, + One { + xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, + }, +} + +/// A cache for evaluating PDFs. Methods like [`Grid::convolute2`] accept instances of this +/// `struct` instead of the PDFs themselves. +pub struct LumiCache<'a> { + pdfs: Pdfs<'a>, + alphas: &'a mut dyn FnMut(f64) -> f64, + cc1: i32, + cc2: i32, +} + +impl<'a> LumiCache<'a> { + fn new( + grid: &Grid, + pdg1: i32, + pdg2: i32, + pdfs: Pdfs<'a>, + alphas: &'a mut dyn FnMut(f64) -> f64, + ) -> Result { + // PDG identifiers of the initial states + let pdga = grid.key_values().map_or(2212, |kv| { + kv.get("initial_state_1") + .map_or(2212, |s| s.parse::().unwrap()) + }); + let pdgb = grid.key_values().map_or(2212, |kv| { + kv.get("initial_state_2") + .map_or(2212, |s| s.parse::().unwrap()) + }); + + // are the initial states hadrons? + let has_pdfa = grid + .lumi() + .iter() + .all(|entry| entry.entry().iter().all(|&(a, _, _)| a == pdga)); + let has_pdfb = grid + .lumi() + .iter() + .all(|entry| entry.entry().iter().all(|&(_, b, _)| b == pdgb)); + + // do we have to charge-conjugate the initial states? + let cc1 = if pdg1 == pdga { + 1 + } else if pdg1 == -pdga { + -1 + } else if has_pdfa { + return Err(()); + } else { + 0 + }; + let cc2 = if pdg2 == pdgb { + 1 + } else if pdg2 == -pdgb { + -1 + } else if has_pdfb { + return Err(()); + } else { + 0 + }; + + Ok(Self { + pdfs, + alphas, + cc1, + cc2, + }) + } + + /// Construct a luminosity cache with two PDFs, `xfx1` and `xfx2`. The types of hadrons the + /// PDFs correspond to must be given as `pdg1` and `pdg2`. The function to evaluate the + /// strong coupling must be given as `alphas`. The grid that the cache will be used with must + /// be given as `grid`; this parameter determines which of the initial states are hadronic, and + /// if an initial states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some + /// of the PDFs must be charge-conjugated, this is automatically done in this function. + pub fn with_two( + grid: &Grid, + pdg1: i32, + xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, + pdg2: i32, + xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, + alphas: &'a mut dyn FnMut(f64) -> f64, + ) -> Result { + Self::new(grid, pdg1, pdg2, Pdfs::Two { xfx1, xfx2 }, alphas) + } + + /// Construct a luminosity cache with a single PDF `xfx`. The type of hadron the PDF + /// corresponds to must be given as `pdg`. The function to evaluate the strong coupling must be + /// given as `alphas`. The grid that the cache should be used with must be given as `grid`; + /// this parameter determines which of the initial states are hadronic, and if an initial + /// states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some of the PDFs + /// must be charge-conjugated, this is automatically done in this function. + pub fn with_one( + grid: &Grid, + pdg: i32, + xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, + alphas: &'a mut dyn FnMut(f64) -> f64, + ) -> Result { + Self::new(grid, pdg, pdg, Pdfs::One { xfx }, alphas) + } + + /// Return the PDF (multiplied with `x`) for the first initial state. + pub fn xfx1(&mut self, pdg_id: i32, x: f64, q2: f64) -> f64 { + if self.cc1 == 0 { + x + } else { + match &mut self.pdfs { + Pdfs::One { xfx } => xfx(self.cc1 * pdg_id, x, q2), + Pdfs::Two { xfx1, .. } => xfx1(self.cc1 * pdg_id, x, q2), + } + } + } + + /// Return the PDF (multiplied with `x`) for the second initial state. + pub fn xfx2(&mut self, pdg_id: i32, x: f64, q2: f64) -> f64 { + if self.cc2 == 0 { + x + } else { + match &mut self.pdfs { + Pdfs::One { xfx } => xfx(self.cc2 * pdg_id, x, q2), + Pdfs::Two { xfx2, .. } => xfx2(self.cc2 * pdg_id, x, q2), + } + } + } + + /// Return the strong coupling evaluated at the scale `q2`. + pub fn alphas(&mut self, q2: f64) -> f64 { + (self.alphas)(q2) + } +} From 2f9f6e3788236829d8faaa67a87bc7cb920b0be8 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 13 Oct 2021 21:58:57 +0200 Subject: [PATCH 18/39] Add methods `clear` and `set_grids` to `LumiCache` --- pineappl/src/lumi.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index e1eedf2a..e0a59c2d 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -1,6 +1,7 @@ //! Module for everything related to luminosity functions. use super::grid::Grid; +use super::subgrid::Mu2; use serde::{Deserialize, Serialize}; /// This structure represens an entry of a luminosity function. Each entry consists of a tuple, @@ -110,6 +111,9 @@ enum Pdfs<'a> { pub struct LumiCache<'a> { pdfs: Pdfs<'a>, alphas: &'a mut dyn FnMut(f64) -> f64, + mu2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, cc1: i32, cc2: i32, } @@ -165,6 +169,9 @@ impl<'a> LumiCache<'a> { Ok(Self { pdfs, alphas, + mu2_grid: vec![], + x1_grid: vec![], + x2_grid: vec![], cc1, cc2, }) @@ -230,4 +237,35 @@ impl<'a> LumiCache<'a> { pub fn alphas(&mut self, q2: f64) -> f64 { (self.alphas)(q2) } + + /// Clears the cache. + pub fn clear(&mut self) { + self.mu2_grid.clear(); + self.x1_grid.clear(); + self.x2_grid.clear(); + } + + /// Set the grids. + pub fn set_grids( + &mut self, + mu2_grid: &[Mu2], + x1_grid: &[f64], + x2_grid: &[f64], + xir: f64, + xif: f64, + ) { + if (x1_grid != self.x1_grid) || (x2_grid != self.x2_grid) { + self.clear(); + self.x1_grid = x1_grid.to_vec(); + self.x2_grid = x2_grid.to_vec(); + } + + self.mu2_grid = mu2_grid + .iter() + .map(|Mu2 { ren, fac }| Mu2 { + ren: ren * xir * xir, + fac: fac * xif * xif, + }) + .collect(); + } } From 1439bf0af92fe95ce4b3574b33dcd1af91c7bb26 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 14 Oct 2021 14:59:00 +0200 Subject: [PATCH 19/39] Cache mu2/x1/x2 grids and use mu2 grid --- pineappl/src/grid.rs | 5 +++-- pineappl/src/lumi.rs | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4fb203ee..fb0d5892 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -528,11 +528,12 @@ impl Grid { let x1_grid = subgrid.x1_grid(); let x2_grid = subgrid.x2_grid(); + lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); + let mut value = subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { let x1 = x1_grid[ix1]; let x2 = x2_grid[ix2]; - let mur2 = xir * xir * mu2_grid[imu2].ren; let muf2 = xif * xif * mu2_grid[imu2].fac; let mut lumi = 0.0; @@ -542,7 +543,7 @@ impl Grid { lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); } - let alphas = lumi_cache.alphas(mur2); + let alphas = lumi_cache.alphas(imu2); lumi *= alphas.powi(order.alphas.try_into().unwrap()); lumi diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index e0a59c2d..3e37ea7c 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -233,9 +233,10 @@ impl<'a> LumiCache<'a> { } } - /// Return the strong coupling evaluated at the scale `q2`. - pub fn alphas(&mut self, q2: f64) -> f64 { - (self.alphas)(q2) + /// Return the strong coupling for the renormalization scale set with [`LumiCache::set_grids`], + /// in the grid `mu2_grid` at the index `imu2`. + pub fn alphas(&mut self, imu2: usize) -> f64 { + (self.alphas)(self.mu2_grid[imu2].ren) } /// Clears the cache. From a8a27b35fbd98bafe7e3baf776d7f3716c65e30c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 14 Oct 2021 15:08:51 +0200 Subject: [PATCH 20/39] Use imu2 grid also in `LumiCache::xfx` --- pineappl/src/grid.rs | 5 ++--- pineappl/src/lumi.rs | 14 ++++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index fb0d5892..16b2d706 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -534,12 +534,11 @@ impl Grid { subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { let x1 = x1_grid[ix1]; let x2 = x2_grid[ix2]; - let muf2 = xif * xif * mu2_grid[imu2].fac; let mut lumi = 0.0; for entry in lumi_entry.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, x1, muf2); - let xfx2 = lumi_cache.xfx2(entry.1, x2, muf2); + let xfx1 = lumi_cache.xfx1(entry.0, x1, imu2); + let xfx2 = lumi_cache.xfx2(entry.1, x2, imu2); lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); } diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 3e37ea7c..50861e88 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -210,25 +210,27 @@ impl<'a> LumiCache<'a> { } /// Return the PDF (multiplied with `x`) for the first initial state. - pub fn xfx1(&mut self, pdg_id: i32, x: f64, q2: f64) -> f64 { + pub fn xfx1(&mut self, pdg_id: i32, x: f64, imu2: usize) -> f64 { if self.cc1 == 0 { x } else { + let muf2 = self.mu2_grid[imu2].fac; match &mut self.pdfs { - Pdfs::One { xfx } => xfx(self.cc1 * pdg_id, x, q2), - Pdfs::Two { xfx1, .. } => xfx1(self.cc1 * pdg_id, x, q2), + Pdfs::One { xfx } => xfx(self.cc1 * pdg_id, x, muf2), + Pdfs::Two { xfx1, .. } => xfx1(self.cc1 * pdg_id, x, muf2), } } } /// Return the PDF (multiplied with `x`) for the second initial state. - pub fn xfx2(&mut self, pdg_id: i32, x: f64, q2: f64) -> f64 { + pub fn xfx2(&mut self, pdg_id: i32, x: f64, imu2: usize) -> f64 { if self.cc2 == 0 { x } else { + let muf2 = self.mu2_grid[imu2].fac; match &mut self.pdfs { - Pdfs::One { xfx } => xfx(self.cc2 * pdg_id, x, q2), - Pdfs::Two { xfx2, .. } => xfx2(self.cc2 * pdg_id, x, q2), + Pdfs::One { xfx } => xfx(self.cc2 * pdg_id, x, muf2), + Pdfs::Two { xfx2, .. } => xfx2(self.cc2 * pdg_id, x, muf2), } } } From 52111e777ec2a82f8ad6a05f2b92289cc6c7c069 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Thu, 14 Oct 2021 16:14:06 +0200 Subject: [PATCH 21/39] Use x1/x2 grid --- pineappl/src/grid.rs | 4 ++-- pineappl/src/lumi.rs | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 16b2d706..2f4863d2 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -537,8 +537,8 @@ impl Grid { let mut lumi = 0.0; for entry in lumi_entry.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, x1, imu2); - let xfx2 = lumi_cache.xfx2(entry.1, x2, imu2); + let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); + let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); } diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 50861e88..4047c1f2 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -210,7 +210,8 @@ impl<'a> LumiCache<'a> { } /// Return the PDF (multiplied with `x`) for the first initial state. - pub fn xfx1(&mut self, pdg_id: i32, x: f64, imu2: usize) -> f64 { + pub fn xfx1(&mut self, pdg_id: i32, ix1: usize, imu2: usize) -> f64 { + let x = self.x1_grid[ix1]; if self.cc1 == 0 { x } else { @@ -223,7 +224,8 @@ impl<'a> LumiCache<'a> { } /// Return the PDF (multiplied with `x`) for the second initial state. - pub fn xfx2(&mut self, pdg_id: i32, x: f64, imu2: usize) -> f64 { + pub fn xfx2(&mut self, pdg_id: i32, ix2: usize, imu2: usize) -> f64 { + let x = self.x2_grid[ix2]; if self.cc2 == 0 { x } else { From 92b603d05aa2344413fbf3338bfea8977fa16a84 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 15 Oct 2021 15:28:51 +0200 Subject: [PATCH 22/39] Always optimize the luminosity function --- pineappl/src/grid.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 2f4863d2..cea30401 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1014,9 +1014,10 @@ impl Grid { .map_or(true, |map| map["initial_state_1"] == map["initial_state_2"]) { self.symmetrize_lumi(); - self.optimize_lumi(); } + self.optimize_lumi(); + for subgrid in self.subgrids.iter_mut() { if subgrid.is_empty() { *subgrid = EmptySubgridV1::default().into(); From e1dfa0d3147cf24a7c4f701c4b2840aec7bd3f86 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Fri, 15 Oct 2021 17:32:29 +0200 Subject: [PATCH 23/39] Add preliminary support for evolution basis --- pineappl/src/grid.rs | 27 +++++++++- pineappl/src/lib.rs | 1 + pineappl/src/lumi.rs | 29 ++++++++++ pineappl/src/pids.rs | 126 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 181 insertions(+), 2 deletions(-) create mode 100644 pineappl/src/pids.rs diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index cea30401..8cb4c989 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -8,6 +8,7 @@ use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, Lagran use super::lumi::{LumiCache, LumiEntry}; use super::lumi_entry; use super::ntuple_subgrid::NtupleSubgridV1; +use super::pids; use super::sparse_array3::SparseArray3; use super::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use float_cmp::approx_eq; @@ -315,6 +316,26 @@ impl Grid { }) } + fn pdg_lumi(&self) -> Cow<[LumiEntry]> { + if let Some(key_values) = self.key_values() { + if let Some(lumi_id_types) = key_values.get("lumi_id_types") { + match lumi_id_types.as_str() { + "pdg_mc_ids" => {} + "evol" => { + return self + .lumi + .iter() + .map(|entry| LumiEntry::translate(entry, &pids::evol_to_pdg_mc_ids)) + .collect(); + } + _ => unimplemented!(), + } + } + } + + Cow::Borrowed(self.lumi()) + } + /// Performs a convolution of the contained subgrids with the given PDFs, `xfx1` for the first /// parton and `xfx2` for the second parton, `alphas` for the evaluation of the strong /// coupling. The parameters `order_mask` and `lumi_mask` can be used to selectively enable @@ -369,6 +390,7 @@ impl Grid { .iter() .map(|xi| xir_values.iter().position(|xir| xi.0 == *xir).unwrap()) .collect(); + let self_lumi = self.pdg_lumi(); // iterate over the elements of `xi` and a corresponding index, but sorted using the // factorisation value of `xi` @@ -403,7 +425,7 @@ impl Grid { None => continue, }; - let lumi_entry = &self.lumi[k]; + let lumi_entry = &self_lumi[k]; let mut value = if subgrid.is_empty() { 0.0 @@ -499,6 +521,7 @@ impl Grid { }; let mut bins: Vec = vec![0.0; bin_indices.len() * xi.len()]; let normalizations = self.bin_info().normalizations(); + let self_lumi = self.pdg_lumi(); for (xi_index, &(xir, xif)) in xi.iter().enumerate() { for ((ord, bin, lumi), subgrid) in self.subgrids.indexed_iter() { @@ -523,7 +546,7 @@ impl Grid { continue; } - let lumi_entry = &self.lumi[lumi]; + let lumi_entry = &self_lumi[lumi]; let mu2_grid = subgrid.mu2_grid(); let x1_grid = subgrid.x1_grid(); let x2_grid = subgrid.x2_grid(); diff --git a/pineappl/src/lib.rs b/pineappl/src/lib.rs index fd3d6b07..e9318122 100644 --- a/pineappl/src/lib.rs +++ b/pineappl/src/lib.rs @@ -15,5 +15,6 @@ pub mod import_only_subgrid; pub mod lagrange_subgrid; pub mod lumi; pub mod ntuple_subgrid; +pub mod pids; pub mod sparse_array3; pub mod subgrid; diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 4047c1f2..0dce2698 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -51,6 +51,35 @@ impl LumiEntry { Self { entry } } + /// Translates `entry` into a different basis using `translator`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::lumi::LumiEntry; + /// use pineappl::lumi_entry; + /// + /// let entry = LumiEntry::translate(&lumi_entry![103, 11, 1.0], &|evol_id| match evol_id { + /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + /// _ => vec![(evol_id, 1.0)], + /// }); + /// + /// assert_eq!(entry, lumi_entry![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); + /// ``` + pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { + let mut tuples = Vec::new(); + + for &(a, b, factor) in &entry.entry { + for (aid, af) in translator(a) { + for (bid, bf) in translator(b) { + tuples.push((aid, bid, factor * af * bf)); + } + } + } + + Self::new(tuples) + } + /// Returns a tuple representation of this entry. /// /// # Examples diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs new file mode 100644 index 00000000..fc90146d --- /dev/null +++ b/pineappl/src/pids.rs @@ -0,0 +1,126 @@ +//! TODO + +/// Translates IDs from the evolution basis into IDs using PDG Monte Carlo IDs. +pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { + match id { + 100 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, 1.0), + (-6, 1.0), + ], + 103 => vec![(2, 1.0), (-2, 1.0), (1, -1.0), (-1, -1.0)], + 108 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, -2.0), + (-3, -2.0), + ], + 115 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, -3.0), + (-4, -3.0), + ], + 124 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, -4.0), + (-5, -4.0), + ], + 135 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, -5.0), + (-6, -5.0), + ], + 200 => vec![ + (1, 1.0), + (-1, -1.0), + (2, 1.0), + (-2, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, 1.0), + (-6, -1.0), + ], + 203 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + 208 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, -2.0), + (-3, 2.0), + ], + 215 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, -3.0), + (-4, 3.0), + ], + 224 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, -4.0), + (-5, 4.0), + ], + 235 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, -5.0), + (-6, 5.0), + ], + _ => vec![(id, 1.0)], + } +} From 939eaad4e39da7ef97d546dcbb55baa7367251d9 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Mon, 18 Oct 2021 17:53:51 +0200 Subject: [PATCH 24/39] Add implementation of merge from file --- pineappl_py/src/grid.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index d1d9bfeb..3b2aed71 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -7,9 +7,11 @@ use super::subgrid::{PySubgridEnum, PySubgridParams}; use ndarray::{Array, Ix5}; +use std::collections::HashMap; use std::fs::File; use std::io::BufReader; +use pyo3::exceptions::PyValueError; use pyo3::prelude::*; /// PyO3 wrapper to :rustdoc:`pineappl::grid::Order ` @@ -363,9 +365,12 @@ impl PyGrid { } /// Optimize grid content. - // pub fn merge(&mut self, mut other: Self) { - // self.grid.merge(other.grid); - // } + pub fn merge_from_file(&mut self, path: &str) -> PyResult<()> { + match self.grid.merge(Self::read(path).grid) { + Ok(()) => Ok(()), + Err(x) => Err(PyValueError::new_err(format!("{:?}", x))), + } + } /// Extract the number of dimensions for bins. /// From 2a224328356d443c6392c50e4d1e71f4dcad28b4 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 25 Oct 2021 17:34:58 +0200 Subject: [PATCH 25/39] Migrate remaining `convolute` calls --- pineappl/src/fk_table.rs | 6 +- pineappl/src/grid.rs | 165 --------------------------------- pineappl/tests/drell_yan_lo.rs | 37 ++++---- pineappl_capi/src/lib.rs | 86 ++++++++++++++--- pineappl_py/src/fk_table.rs | 24 +++-- pineappl_py/src/grid.rs | 29 +++--- 6 files changed, 121 insertions(+), 226 deletions(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index bb721a08..3f5266f6 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,6 +1,7 @@ //! Provides the [`FkTable`] type. use super::grid::{Grid, Order}; +use super::lumi::LumiCache; use super::subgrid::Subgrid; use ndarray::Array4; use std::convert::TryFrom; @@ -146,15 +147,14 @@ impl FkTable { /// Propagate convolute to grid pub fn convolute( &self, - xfx1: &dyn Fn(i32, f64, f64) -> f64, - xfx2: &dyn Fn(i32, f64, f64) -> f64, + lumi_cache: &mut LumiCache, order_mask: &[bool], bin_indices: &[usize], lumi_mask: &[bool], xi: &[(f64, f64)], ) -> Vec { self.grid - .convolute(xfx1, xfx2, &|_| 1.0, order_mask, bin_indices, lumi_mask, xi) + .convolute2(lumi_cache, order_mask, bin_indices, lumi_mask, xi) } } diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 665e5107..b4aafdd9 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -336,171 +336,6 @@ impl Grid { Cow::Borrowed(self.lumi()) } - /// Performs a convolution of the contained subgrids with the given PDFs, `xfx1` for the first - /// parton and `xfx2` for the second parton, `alphas` for the evaluation of the strong - /// coupling. The parameters `order_mask` and `lumi_mask` can be used to selectively enable - /// perturbative orders and luminosities; they must either be empty (everything enabled) or as - /// large as the orders and luminosity function, respectively. If the corresponding entry is - /// `true` the order/luminosity is enable, `false` disables the entry. The tuple `xi` can be - /// used to independently vary the renormalization (first element) and factorization scale - /// (second element) from their central value `(1.0, 1.0)`. - /// - /// # Panics - /// - /// TODO - pub fn convolute( - &self, - xfx1: &dyn Fn(i32, f64, f64) -> f64, - xfx2: &dyn Fn(i32, f64, f64) -> f64, - alphas: &dyn Fn(f64) -> f64, - order_mask: &[bool], - bin_indices: &[usize], - lumi_mask: &[bool], - xi: &[(f64, f64)], - ) -> Vec { - let bin_indices = if bin_indices.is_empty() { - (0..self.bin_limits.bins()).collect() - } else { - bin_indices.to_vec() - }; - let mut bins: Vec = vec![0.0; bin_indices.len() * xi.len()]; - let bin_sizes = self.bin_info().normalizations(); - - // prepare pdf and alpha caches - let mut pdf_cache1 = FxHashMap::default(); - let mut pdf_cache2 = FxHashMap::default(); - let mut alphas_cache = FxHashMap::default(); - let mut last_xif = 0.0; - - let (mut mu2_grid, mut x1_grid, mut x2_grid) = self - .subgrids - .iter() - .find(|subgrid| !subgrid.is_empty()) - .map_or_else( - || (Cow::default(), Cow::default(), Cow::default()), - |grid| (grid.mu2_grid(), grid.x1_grid(), grid.x2_grid()), - ); - let use_cache = !mu2_grid.is_empty() && !x1_grid.is_empty() && !x2_grid.is_empty(); - let two_caches = !ptr::eq(&xfx1, &xfx2); - - let mut xir_values: Vec<_> = xi.iter().map(|xi| xi.0).collect(); - xir_values.sort_by(|lhs, rhs| lhs.partial_cmp(rhs).unwrap()); - xir_values.dedup(); - let xir_indices: Vec<_> = xi - .iter() - .map(|xi| xir_values.iter().position(|xir| xi.0 == *xir).unwrap()) - .collect(); - let self_lumi = self.pdg_lumi(); - - // iterate over the elements of `xi` and a corresponding index, but sorted using the - // factorisation value of `xi` - for (l, (&(xir, xif), &xir_index)) in xi - .iter() - .zip(xir_indices.iter()) - .enumerate() - .sorted_by(|lhs, rhs| (lhs.1).1.partial_cmp((rhs.1).1).unwrap()) - { - // whenever the value `xif` changes we can clear the PDF cache - if xif != last_xif { - pdf_cache1.clear(); - pdf_cache2.clear(); - last_xif = xif; - } - // iterate subgrids - for ((i, j, k), subgrid) in self.subgrids.indexed_iter() { - let order = &self.orders[i]; - // can we skip? - if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { - continue; - } - - if (!order_mask.is_empty() && !order_mask[i]) - || (!lumi_mask.is_empty() && !lumi_mask[k]) - { - continue; - } - - let bin_index = match bin_indices.iter().position(|&index| index == j) { - Some(i) => i, - None => continue, - }; - - let lumi_entry = &self_lumi[k]; - - let mut value = if subgrid.is_empty() { - 0.0 - } else if use_cache { - // Now we actually have to do something - let new_mu2_grid = subgrid.mu2_grid(); - let new_x1_grid = subgrid.x1_grid(); - let new_x2_grid = subgrid.x2_grid(); - let mu2_grid_changed = new_mu2_grid != mu2_grid; - - if mu2_grid_changed { - mu2_grid = new_mu2_grid; - alphas_cache.clear(); - } - - if mu2_grid_changed || (new_x1_grid != x1_grid) || (new_x2_grid != x2_grid) { - x1_grid = new_x1_grid; - x2_grid = new_x2_grid; - pdf_cache1.clear(); - pdf_cache2.clear(); - } - - // pass convolute down, using caching - subgrid.convolute(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { - let x1 = x1_grid[ix1]; - let x2 = x2_grid[ix2]; - let mu2 = &mu2_grid[imu2]; - let muf2 = xif * xif * mu2.fac; - - let mut lumi = 0.0; - - for entry in lumi_entry.entry() { - let xfx1 = *pdf_cache1 - .entry((entry.0, ix1, imu2)) - .or_insert_with(|| xfx1(entry.0, x1, muf2)); - let xfx2 = if two_caches { - *pdf_cache2 - .entry((entry.1, ix2, imu2)) - .or_insert_with(|| xfx2(entry.1, x2, muf2)) - } else { - *pdf_cache1 - .entry((entry.1, ix2, imu2)) - .or_insert_with(|| xfx2(entry.1, x2, muf2)) - }; - lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); - } - - let alphas = alphas_cache - .entry(xir_values.len() * imu2 + xir_index) - .or_insert_with(|| alphas(xir * xir * mu2.ren)); - - lumi *= alphas.powi(order.alphas.try_into().unwrap()); - lumi - }) - } else { - todo!(); - }; - - // enhance with logarithms - if order.logxir > 0 { - value *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); - } - - if order.logxif > 0 { - value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); - } - - // fill in ... - bins[l + xi.len() * bin_index] += value / bin_sizes[j]; - } - } - - bins - } - /// TODO /// /// # Panics diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index 2277c759..b8e567d4 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -2,6 +2,7 @@ use float_cmp::approx_eq; use lhapdf::Pdf; use pineappl::bin::BinRemapper; use pineappl::grid::{Grid, Ntuple, Order}; +use pineappl::lumi::LumiCache; use pineappl::lumi_entry; use pineappl::subgrid::{ExtraSubgridParams, SubgridParams}; use rand::Rng; @@ -176,8 +177,8 @@ fn dy_aa_lagrange_subgrid_static() -> anyhow::Result<()> { assert!(lhapdf::available_pdf_sets().iter().any(|x| x == &pdf_set)); let pdf = Pdf::with_setname_and_member(&pdf_set, 0); - let xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); - let alphas = |_| 0.0; + let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas = |_| 0.0; // check `read` and `write` let mut file = Cursor::new(Vec::new()); @@ -190,7 +191,8 @@ fn dy_aa_lagrange_subgrid_static() -> anyhow::Result<()> { grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.29438499470369e-1, 5.407794857747981e-1, @@ -236,7 +238,7 @@ fn dy_aa_lagrange_subgrid_static() -> anyhow::Result<()> { .collect::>(), )?)?; - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); // results are slightly different because of the static scale detection - the interpolation // error in the Q^2 dimension is removed @@ -295,8 +297,8 @@ fn dy_aa_lagrange_subgrid_dynamic() -> anyhow::Result<()> { assert!(lhapdf::available_pdf_sets().iter().any(|x| x == &pdf_set)); let pdf = Pdf::with_setname_and_member(&pdf_set, 0); - let xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); - let alphas = |_| 0.0; + let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas = |_| 0.0; // check `read` and `write` let mut file = Cursor::new(Vec::new()); @@ -309,7 +311,8 @@ fn dy_aa_lagrange_subgrid_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -345,7 +348,7 @@ fn dy_aa_lagrange_subgrid_dynamic() -> anyhow::Result<()> { grid.optimize(); // check that the results are still the same - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); @@ -375,8 +378,8 @@ fn dy_aa_lagrange_subgrid_v2_dynamic() -> anyhow::Result<()> { assert!(lhapdf::available_pdf_sets().iter().any(|x| x == &pdf_set)); let pdf = Pdf::with_setname_and_member(&pdf_set, 0); - let xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); - let alphas = |_| 0.0; + let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas = |_| 0.0; // check `read` and `write` let mut file = Cursor::new(Vec::new()); @@ -389,7 +392,8 @@ fn dy_aa_lagrange_subgrid_v2_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -425,7 +429,7 @@ fn dy_aa_lagrange_subgrid_v2_dynamic() -> anyhow::Result<()> { //grid.optimize(); // check that the results are still the same - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); @@ -455,8 +459,8 @@ fn dy_aa_lagrange_sparse_subgrid_dynamic() -> anyhow::Result<()> { assert!(lhapdf::available_pdf_sets().iter().any(|x| x == &pdf_set)); let pdf = Pdf::with_setname_and_member(&pdf_set, 0); - let xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); - let alphas = |_| 0.0; + let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas = |_| 0.0; // check `read` and `write` let mut file = Cursor::new(Vec::new()); @@ -469,7 +473,8 @@ fn dy_aa_lagrange_sparse_subgrid_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -505,7 +510,7 @@ fn dy_aa_lagrange_sparse_subgrid_dynamic() -> anyhow::Result<()> { grid.optimize(); // check that the results are still the same - let bins = grid.convolute(&xfx, &xfx, &alphas, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 8c09c697..8fe3bd9c 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -62,7 +62,7 @@ use pineappl::bin::BinRemapper; use pineappl::empty_subgrid::EmptySubgridV1; use pineappl::grid::{Grid, Ntuple, Order}; use pineappl::import_only_subgrid::ImportOnlySubgridV2; -use pineappl::lumi::LumiEntry; +use pineappl::lumi::{LumiCache, LumiEntry}; use pineappl::sparse_array3::SparseArray3; use pineappl::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridParams}; use std::collections::HashMap; @@ -248,10 +248,69 @@ pub unsafe extern "C" fn pineappl_grid_bin_limits_right( slice::from_raw_parts_mut(right, limits.len()).copy_from_slice(&limits); } -/// Convolutes the specified grid with the PDFs `xfx1` and `xfx2` and strong coupling `alphas`. -/// These functions must evaluate the PDFs for the given `x` and `q2` for the parton with the given -/// PDG id, `pdg_id`, and return the result. Note that the value must be the PDF multiplied with -/// its argument `x`. The value of the pointer `state` provided to these functions is the same one +/// Convolutes the specified grid with the PDF `xfx`, which is the PDF for a hadron with the PDG id +/// `pdg_id`, and strong coupling `alphas`. These functions must evaluate the PDFs for the given +/// `x` and `q2` for the parton with the given PDG id, `pdg_id`, and return the result. Note that +/// the value must be the PDF multiplied with its argument `x`. The value of the pointer `state` +/// provided to these functions is the same one given to this function. The parameter `order_mask` +/// must be as long as there are perturbative orders contained in `grid` and is used to selectively +/// disable (`false`) or enable (`true`) individual orders. If `order_mask` is set to `NULL`, all +/// orders are active. The parameter `lumi_mask` can be used similarly, but must be as long as the +/// luminosity function `grid` was created with has entries, or `NULL` to enable all luminosities. +/// The values `xi_ren` and `xi_fac` can be used to vary the renormalization and factorization from +/// its central value, which corresponds to `1.0`. After convolution of the grid with the PDFs the +/// differential cross section for each bin is written into `results`. +/// +/// # Safety +/// +/// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, +/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must be +/// null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must +/// either be null pointers or point to arrays that are as long as `grid` has orders and lumi +/// entries, respectively. Finally, `results` must be as long as `grid` has bins. +#[no_mangle] +pub unsafe extern "C" fn pineappl_grid_convolute_with_one( + grid: *const Grid, + pdg_id: i32, + xfx: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, + state: *mut c_void, + order_mask: *const bool, + lumi_mask: *const bool, + xi_ren: f64, + xi_fac: f64, + results: *mut f64, +) { + let grid = &*grid; + let mut pdf = |id, x, q2| xfx(id, x, q2, state); + let mut als = |q2| alphas(q2, state); + let order_mask = if order_mask.is_null() { + vec![] + } else { + slice::from_raw_parts(order_mask, grid.orders().len()).to_vec() + }; + let lumi_mask = if lumi_mask.is_null() { + vec![] + } else { + slice::from_raw_parts(lumi_mask, grid.lumi().len()).to_vec() + }; + let results = slice::from_raw_parts_mut(results, grid.bin_info().bins()); + let mut lumi_cache = LumiCache::with_one(pdg_id, &mut pdf, &mut als); + + results.copy_from_slice(&grid.convolute2( + &mut lumi_cache, + &order_mask, + &[], + &lumi_mask, + &[(xi_ren, xi_fac)], + )); +} + +/// Convolutes the specified grid with the PDFs `xfx1` and `xfx2`, which are the PDFs of hadrons +/// with PDG ids `pdg_id1` and `pdg_id2`, respectively, and strong coupling `alphas`. These +/// functions must evaluate the PDFs for the given `x` and `q2` for the parton with the given PDG +/// id, `pdg_id`, and return the result. Note that the value must be the PDF multiplied with its +/// argument `x`. The value of the pointer `state` provided to these functions is the same one /// given to this function. The parameter `order_mask` must be as long as there are perturbative /// orders contained in `grid` and is used to selectively disable (`false`) or enable (`true`) /// individual orders. If `order_mask` is set to `NULL`, all orders are active. The parameter @@ -269,9 +328,11 @@ pub unsafe extern "C" fn pineappl_grid_bin_limits_right( /// either be null pointers or point to arrays that are as long as `grid` has orders and lumi /// entries, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] -pub unsafe extern "C" fn pineappl_grid_convolute( +pub unsafe extern "C" fn pineappl_grid_convolute_with_two( grid: *const Grid, + pdg_id1: i32, xfx1: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, + pdg_id2: i32, xfx2: extern "C" fn(pdg_id: i32, x: f64, q2: f64, state: *mut c_void) -> f64, alphas: extern "C" fn(q2: f64, state: *mut c_void) -> f64, state: *mut c_void, @@ -282,9 +343,9 @@ pub unsafe extern "C" fn pineappl_grid_convolute( results: *mut f64, ) { let grid = &*grid; - let pdf1 = |id, x, q2| xfx1(id, x, q2, state); - let pdf2 = |id, x, q2| xfx2(id, x, q2, state); - let als = |q2| alphas(q2, state); + let mut pdf1 = |id, x, q2| xfx1(id, x, q2, state); + let mut pdf2 = |id, x, q2| xfx2(id, x, q2, state); + let mut als = |q2| alphas(q2, state); let order_mask = if order_mask.is_null() { vec![] } else { @@ -296,11 +357,10 @@ pub unsafe extern "C" fn pineappl_grid_convolute( slice::from_raw_parts(lumi_mask, grid.lumi().len()).to_vec() }; let results = slice::from_raw_parts_mut(results, grid.bin_info().bins()); + let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut pdf1, pdg_id2, &mut pdf2, &mut als); - results.copy_from_slice(&grid.convolute( - &pdf1, - &pdf2, - &als, + results.copy_from_slice(&grid.convolute2( + &mut lumi_cache, &order_mask, &[], &lumi_mask, diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 9fe28a65..04729e0a 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -1,6 +1,7 @@ use numpy::{IntoPyArray, PyArray4}; use pineappl::fk_table::FkTable; use pineappl::grid::Grid; +use pineappl::lumi::LumiCache; use pyo3::prelude::*; use std::convert::TryFrom; use std::fs::File; @@ -95,23 +96,20 @@ impl PyFkTable { /// /// Parameters /// ---------- - /// xfx1 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_1`-grid - /// xfx2 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_2`-grid + /// pdg_id : integer + /// PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for + /// xfx : callable + /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid /// /// Returns /// ------- /// list(float) : /// cross sections for all bins - pub fn convolute(&self, xfx1: &PyAny, xfx2: &PyAny) -> Vec { - self.fk_table.convolute( - &|id, x, q2| f64::extract(xfx1.call1((id, x, q2)).unwrap()).unwrap(), - &|id, x, q2| f64::extract(xfx2.call1((id, x, q2)).unwrap()).unwrap(), - &[], - &[], - &[], - &[(1.0, 1.0)], - ) + pub fn convolute_with_one(&self, pdg_id: i32, xfx: &PyAny) -> Vec { + let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); + let mut alphas = |_| 1.0; + let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); + self.fk_table + .convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]) } } diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 3b2aed71..77883a6a 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -1,4 +1,5 @@ use pineappl::grid::{EkoInfo, Grid, GridAxes, Ntuple, Order}; +use pineappl::lumi::LumiCache; use super::bin::PyBinRemapper; use super::fk_table::PyFkTable; @@ -209,10 +210,10 @@ impl PyGrid { /// /// Parameters /// ---------- - /// xfx1 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_1`-grid - /// xfx2 : callable - /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_2`-grid + /// pdg_id : int + /// PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for + /// xfx : callable + /// lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid /// alphas : callable /// lhapdf like callable with arguments `Q2` returning :math:`\alpha_s` /// order_mask : list(bool) @@ -237,25 +238,21 @@ impl PyGrid { /// list(float) : /// cross sections for all bins, for each scale-variation tuple (first all bins, then /// the scale variation) - pub fn convolute( + pub fn convolute_with_one( &self, - xfx1: &PyAny, - xfx2: &PyAny, + pdg_id: i32, + xfx: &PyAny, alphas: &PyAny, order_mask: Vec, bin_indices: Vec, lumi_mask: Vec, xi: Vec<(f64, f64)>, ) -> Vec { - self.grid.convolute( - &|id, x, q2| f64::extract(xfx1.call1((id, x, q2)).unwrap()).unwrap(), - &|id, x, q2| f64::extract(xfx2.call1((id, x, q2)).unwrap()).unwrap(), - &|q2| f64::extract(alphas.call1((q2,)).unwrap()).unwrap(), - &order_mask, - &bin_indices, - &lumi_mask, - &xi, - ) + let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); + let mut alphas = |q2| f64::extract(alphas.call1((q2,)).unwrap()).unwrap(); + let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); + self.grid + .convolute2(&mut lumi_cache, &order_mask, &bin_indices, &lumi_mask, &xi) } /// Convolute with with an evolution operator. From 4d6ddcae06aa8c4a194f223499271aaa4feda826 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 25 Oct 2021 17:59:12 +0200 Subject: [PATCH 26/39] Remove scale variations from `FkTable` FK tables don't support scale variations, as they contain only one order and therefore the capability of varying the renormalization and factorization scales is lost. --- pineappl/src/fk_table.rs | 10 +++++++--- pineappl_py/src/fk_table.rs | 3 +-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 3f5266f6..1bde2576 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -151,10 +151,14 @@ impl FkTable { order_mask: &[bool], bin_indices: &[usize], lumi_mask: &[bool], - xi: &[(f64, f64)], ) -> Vec { - self.grid - .convolute2(lumi_cache, order_mask, bin_indices, lumi_mask, xi) + self.grid.convolute2( + lumi_cache, + order_mask, + bin_indices, + lumi_mask, + &[(1.0, 1.0)], + ) } } diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 04729e0a..613b8657 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -109,7 +109,6 @@ impl PyFkTable { let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); - self.fk_table - .convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]) + self.fk_table.convolute(&mut lumi_cache, &[], &[], &[]) } } From 6c4dbbaab4a1af6a996d1f849d5012ca7ff929b9 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 25 Oct 2021 18:09:32 +0200 Subject: [PATCH 27/39] Fix Python tests --- pineappl_py/pineappl/grid.py | 18 +++++++++--------- pineappl_py/tests/test_grid.py | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index 2247f82a..a636286f 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -167,10 +167,10 @@ def orders(self): """ return [Order(*pyorder.as_tuple()) for pyorder in self.raw.orders()] - def convolute( + def convolute_with_one( self, - xfx1, - xfx2, + pdg_id, + xfx, alphas, order_mask=(), bin_indices=(), @@ -182,10 +182,10 @@ def convolute( Parameters ---------- - xfx1 : callable - lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_1`-grid - xfx2 : callable - lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x_2`-grid + pdg_id : int + PDG Monte Carlo ID of the hadronic particle `xfx` is the PDF for + xfx : callable + lhapdf like callable with arguments `pid, x, Q2` returning x*pdf for :math:`x`-grid alphas : callable lhapdf like callable with arguments `Q2` returning :math:`\alpha_s` order_mask : list(bool) @@ -212,8 +212,8 @@ def convolute( the scale variation) """ - return self.raw.convolute( - xfx1, xfx2, alphas, order_mask, bin_indices, lumi_mask, xi + return self.raw.convolute_with_one( + pdg_id, xfx, alphas, order_mask, bin_indices, lumi_mask, xi ) def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): diff --git a/pineappl_py/tests/test_grid.py b/pineappl_py/tests/test_grid.py index 4c7cccb5..1c7c23b2 100644 --- a/pineappl_py/tests/test_grid.py +++ b/pineappl_py/tests/test_grid.py @@ -80,7 +80,7 @@ def test_bins(self): np.testing.assert_allclose(g.bin_left(1), [2,3]) np.testing.assert_allclose(g.bin_right(1), [3,5]) - def test_convolute(self): + def test_convolute_with_one(self): g = self.fake_grid() # DIS grid @@ -93,9 +93,9 @@ def test_convolute(self): [1.0], ) g.set_subgrid(0, 0, 0, subgrid) - np.testing.assert_allclose(g.convolute(lambda pid,x,q2: 0., lambda pid,x,q2: 0., lambda q2: 0.), [0.]*2) - np.testing.assert_allclose(g.convolute(lambda pid,x,q2: 1, lambda pid,x,q2: 1., lambda q2: 1.), [5e6/9999,0.]) - np.testing.assert_allclose(g.convolute(lambda pid,x,q2: 1, lambda pid,x,q2: 1., lambda q2: 2.), [2**3 * 5e6/9999,0.]) + np.testing.assert_allclose(g.convolute_with_one(2212, lambda pid,x,q2: 0., lambda q2: 0.), [0.]*2) + np.testing.assert_allclose(g.convolute_with_one(2212, lambda pid,x,q2: 1, lambda q2: 1.), [5e6/9999,0.]) + np.testing.assert_allclose(g.convolute_with_one(2212, lambda pid,x,q2: 1, lambda q2: 2.), [2**3 * 5e6/9999,0.]) def test_axes(self): g = self.fake_grid() From 13e01f2437a2af9518d4320c89a4e266a5c0ff06 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Mon, 25 Oct 2021 18:18:58 +0200 Subject: [PATCH 28/39] Fix examples --- examples/appl2pine/appl2pine.cpp | 4 ++-- examples/cpp/dyaa.cpp | 2 +- examples/fnlo2pine/fnlo2pine.cpp | 8 ++------ examples/python/dyaa.py | 3 ++- 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/examples/appl2pine/appl2pine.cpp b/examples/appl2pine/appl2pine.cpp index 6266b7e8..702e3016 100644 --- a/examples/appl2pine/appl2pine.cpp +++ b/examples/appl2pine/appl2pine.cpp @@ -444,9 +444,9 @@ pineappl_grid* convert_grid(appl::grid& grid, bool reweight) auto const& results = grid.vconvolute(evolvepdf, evolvepdf, alphaspdf, 1); std::vector other_results(results.size()); - pineappl_grid_convolute( + pineappl_grid_convolute_with_one( grids.at(0), - xfx, + 2212, xfx, alphas, pdf.get(), diff --git a/examples/cpp/dyaa.cpp b/examples/cpp/dyaa.cpp index 7597f4e3..2089d942 100644 --- a/examples/cpp/dyaa.cpp +++ b/examples/cpp/dyaa.cpp @@ -137,7 +137,7 @@ int main() { }; std::vector dxsec(24); - pineappl_grid_convolute(grid, xfx, xfx, alphas, pdf, nullptr, + pineappl_grid_convolute_with_one(grid, 2212, xfx, alphas, pdf, nullptr, nullptr, 1.0, 1.0, dxsec.data()); // print the results diff --git a/examples/fnlo2pine/fnlo2pine.cpp b/examples/fnlo2pine/fnlo2pine.cpp index 70dd1802..12d3601a 100644 --- a/examples/fnlo2pine/fnlo2pine.cpp +++ b/examples/fnlo2pine/fnlo2pine.cpp @@ -550,8 +550,6 @@ int main(int argc, char* argv[]) std::vector grids; - std::size_t pdfs = 0; - for (auto const id : ids) { auto coeff_table = file.GetCoeffTable(id); @@ -561,7 +559,6 @@ int main(int argc, char* argv[]) if (converted != nullptr) { - pdfs = converted->GetNPDF(); grids.push_back(convert_coeff_add_fix(converted, file, bins, alpha)); } else @@ -570,7 +567,6 @@ int main(int argc, char* argv[]) if (converted != nullptr) { - pdfs = converted->GetNPDF(); auto const mur_ff = file.GetMuRFunctionalForm(); auto const muf_ff = file.GetMuFFunctionalForm(); @@ -612,10 +608,10 @@ int main(int argc, char* argv[]) auto const& results = file.GetCrossSection(); std::vector other_results(results.size()); - pineappl_grid_convolute( + pineappl_grid_convolute_with_one( grids.at(0), + 2212, xfx, - pdfs == 2 ? xfx : xfx2, alphas, pdf.get(), nullptr, diff --git a/examples/python/dyaa.py b/examples/python/dyaa.py index 3eb82a7b..6811add4 100755 --- a/examples/python/dyaa.py +++ b/examples/python/dyaa.py @@ -157,8 +157,9 @@ def main(calls, pdfname, filename): import lhapdf # pylint: disable=import-outside-toplevel pdf = lhapdf.mkPDF(pdfname, 0) + pdg_id = int(pdf.set().get_entry('Particle')) # perform convolution - dxsec = grid.convolute(pdf.xfxQ2, pdf.xfxQ2, pdf.alphasQ2) + dxsec = grid.convolute_with_one(pdg_id, pdf.xfxQ2, pdf.alphasQ2) for i in range(len(dxsec)): print(f"{bins[i]:.1f} {bins[i + 1]:.1f} {dxsec[i]:.3e}") From cdefdbeeef5200b20c6356a61d15e78b4784554c Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 11:39:55 +0200 Subject: [PATCH 29/39] Fix documentation of CAPI convolute functions --- pineappl_capi/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 8fe3bd9c..6e5cf309 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -264,8 +264,8 @@ pub unsafe extern "C" fn pineappl_grid_bin_limits_right( /// # Safety /// /// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, -/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must be -/// null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must +/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must not +/// be null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must /// either be null pointers or point to arrays that are as long as `grid` has orders and lumi /// entries, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] @@ -323,8 +323,8 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( /// # Safety /// /// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, -/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must be -/// null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must +/// this function is not safe to call. The function pointers `xfx1`, `xfx2`, and `alphas` must not +/// be null pointers and point to valid functions. The parameters `order_mask` and `lumi_mask` must /// either be null pointers or point to arrays that are as long as `grid` has orders and lumi /// entries, respectively. Finally, `results` must be as long as `grid` has bins. #[no_mangle] From 8352253117e19fccae79d0115356be05e14a5e95 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 11:47:22 +0200 Subject: [PATCH 30/39] Rename `Grid::convolute2` to `Grid::convolute` --- pineappl/src/grid.rs | 9 +++++++-- pineappl_capi/src/lib.rs | 4 ++-- pineappl_cli/src/helpers.rs | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index b4aafdd9..e2d53950 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -336,12 +336,17 @@ impl Grid { Cow::Borrowed(self.lumi()) } - /// TODO + /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and only + /// selecting only the orders, bins and luminosities corresponding to `order_mask`, + /// `bin_indices` and `lumi_mask`. A variation of the scales + /// is performed using the factors in `xi`; the first factor varies the renormalization scale, + /// the second the factorization scale. Note that for the variation to be trusted all non-zero + /// log-grids must be contained. /// /// # Panics /// /// TODO - pub fn convolute2( + pub fn convolute( &self, lumi_cache: &mut LumiCache, order_mask: &[bool], diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 6e5cf309..149d115b 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -297,7 +297,7 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_one( let results = slice::from_raw_parts_mut(results, grid.bin_info().bins()); let mut lumi_cache = LumiCache::with_one(pdg_id, &mut pdf, &mut als); - results.copy_from_slice(&grid.convolute2( + results.copy_from_slice(&grid.convolute( &mut lumi_cache, &order_mask, &[], @@ -359,7 +359,7 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_two( let results = slice::from_raw_parts_mut(results, grid.bin_info().bins()); let mut lumi_cache = LumiCache::with_two(pdg_id1, &mut pdf1, pdg_id2, &mut pdf2, &mut als); - results.copy_from_slice(&grid.convolute2( + results.copy_from_slice(&grid.convolute( &mut lumi_cache, &order_mask, &[], diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index 0ca2ae2d..d5a4c51a 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -101,7 +101,7 @@ pub fn convolute( let mut alphas = |q2| lhapdf.alphas_q2(q2); let mut cache = LumiCache::with_one(pdf_pdg_id, &mut pdf, &mut alphas); - grid.convolute2(&mut cache, &orders, bins, lumis, &SCALES_VECTOR[0..scales]) + grid.convolute(&mut cache, &orders, bins, lumis, &SCALES_VECTOR[0..scales]) } pub fn convolute_subgrid( From f06d6292d653c71e9eb9c7959a7c1b2901f11fd7 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 11:47:43 +0200 Subject: [PATCH 31/39] Remove `order_mask` parameter for `FkTable` --- pineappl/src/fk_table.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 1bde2576..26b9c1f4 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -148,17 +148,11 @@ impl FkTable { pub fn convolute( &self, lumi_cache: &mut LumiCache, - order_mask: &[bool], bin_indices: &[usize], lumi_mask: &[bool], ) -> Vec { - self.grid.convolute2( - lumi_cache, - order_mask, - bin_indices, - lumi_mask, - &[(1.0, 1.0)], - ) + self.grid + .convolute(lumi_cache, &[], bin_indices, lumi_mask, &[(1.0, 1.0)]) } } From f64cec55dfb45a27a81e3f446a746acd84293119 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 13:12:55 +0200 Subject: [PATCH 32/39] Fix broken test --- pineappl/tests/drell_yan_lo.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pineappl/tests/drell_yan_lo.rs b/pineappl/tests/drell_yan_lo.rs index b8e567d4..3af72044 100644 --- a/pineappl/tests/drell_yan_lo.rs +++ b/pineappl/tests/drell_yan_lo.rs @@ -192,7 +192,7 @@ fn dy_aa_lagrange_subgrid_static() -> anyhow::Result<()> { grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.29438499470369e-1, 5.407794857747981e-1, @@ -238,7 +238,7 @@ fn dy_aa_lagrange_subgrid_static() -> anyhow::Result<()> { .collect::>(), )?)?; - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); // results are slightly different because of the static scale detection - the interpolation // error in the Q^2 dimension is removed @@ -312,7 +312,7 @@ fn dy_aa_lagrange_subgrid_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -348,7 +348,7 @@ fn dy_aa_lagrange_subgrid_dynamic() -> anyhow::Result<()> { grid.optimize(); // check that the results are still the same - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); @@ -393,7 +393,7 @@ fn dy_aa_lagrange_subgrid_v2_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -429,7 +429,7 @@ fn dy_aa_lagrange_subgrid_v2_dynamic() -> anyhow::Result<()> { //grid.optimize(); // check that the results are still the same - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); @@ -474,7 +474,7 @@ fn dy_aa_lagrange_sparse_subgrid_dynamic() -> anyhow::Result<()> { grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); let reference = vec![ 5.093090431949207e-1, 5.191668797562395e-1, @@ -510,7 +510,7 @@ fn dy_aa_lagrange_sparse_subgrid_dynamic() -> anyhow::Result<()> { grid.optimize(); // check that the results are still the same - let bins = grid.convolute2(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + let bins = grid.convolute(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); for (result, reference) in bins.iter().zip(reference.iter()) { assert!(approx_eq!(f64, *result, *reference, ulps = 16)); From dfdcdcc15ee5bfe50b774fa8e364b873d921ae1a Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 14:16:09 +0200 Subject: [PATCH 33/39] Migrate from lz-fear to lz4_flex This commit also removes the use of `anyhow`, for which instead `thiserror` is used. Furthermore, it adds the method `Grid::write_lz4`, which writes a compressed grid. This method is used in the CLI whenever a file is written that has an extension 'lz4'. --- pineappl/Cargo.toml | 3 +- pineappl/src/fk_table.rs | 13 +++++++-- pineappl/src/grid.rs | 58 ++++++++++++++++++++++++++++--------- pineappl_cli/src/helpers.rs | 27 +++++++++-------- 4 files changed, 72 insertions(+), 29 deletions(-) diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index 93130fef..78a502f9 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -11,14 +11,13 @@ categories = ["science"] description = "PineAPPL is not an extension of APPLgrid" [dependencies] -anyhow = "1.0" arrayvec = "0.5" bincode = "1.0" enum_dispatch = "0.3.5" float-cmp = "0.8" git-version = "0.3" itertools = "0.10" -lz-fear = "0.1" +lz4_flex = "0.8.2" ndarray = { features = ["serde"], version = "0.15.3" } rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0" } diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 26b9c1f4..6291d980 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -1,6 +1,6 @@ //! Provides the [`FkTable`] type. -use super::grid::{Grid, Order}; +use super::grid::{Grid, GridError, Order}; use super::lumi::LumiCache; use super::subgrid::Subgrid; use ndarray::Array4; @@ -140,10 +140,19 @@ impl FkTable { /// # Errors /// /// TODO - pub fn write(&self, writer: impl Write) -> anyhow::Result<()> { + pub fn write(&self, writer: impl Write) -> Result<(), GridError> { self.grid.write(writer) } + /// Propagate `write_lz4` to `Grid`. + /// + /// # Errors + /// + /// See [`Grid::write_lz4`]. + pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { + self.grid.write_lz4(writer) + } + /// Propagate convolute to grid pub fn convolute( &self, diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index e2d53950..0d97ea48 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -15,7 +15,7 @@ use float_cmp::approx_eq; use git_version::git_version; use indicatif::{ProgressBar, ProgressStyle}; use itertools::Itertools; -use lz_fear::{framed::DecompressionError::WrongMagic, LZ4FrameReader}; +use lz4_flex::frame::{self, FrameDecoder, FrameEncoder}; use ndarray::{s, Array3, Array5, Dimension}; use rustc_hash::FxHashMap; use serde::{Deserialize, Serialize}; @@ -23,7 +23,7 @@ use std::borrow::Cow; use std::cmp::Ordering; use std::collections::HashMap; use std::convert::{TryFrom, TryInto}; -use std::io::{Read, Seek, SeekFrom, Write}; +use std::io::{self, BufReader, BufWriter, Read, Seek, Write}; use std::mem; use std::ops::Range; use std::ptr; @@ -119,6 +119,9 @@ pub enum GridError { /// Returned when failed to write a Grid. #[error(transparent)] WriteFailure(bincode::Error), + /// Returned while performing IO operations. + #[error(transparent)] + IoFailure(io::Error), } #[derive(Clone, Deserialize, Serialize)] @@ -541,29 +544,58 @@ impl Grid { } } - /// Constructs a `Grid` by deserializing it from `reader`. Reading is not buffered. + /// Construct a `Grid` by deserializing it from `reader`. Reading is buffered. /// /// # Errors /// /// If reading from the compressed or uncompressed stream fails an error is returned. - pub fn read(mut reader: impl Read + Seek) -> anyhow::Result { - match LZ4FrameReader::new(&mut reader) { - Ok(reader) => Ok(bincode::deserialize_from(reader.into_read())?), - Err(WrongMagic(_)) => { - reader.seek(SeekFrom::Start(0))?; - Ok(bincode::deserialize_from(reader)?) + pub fn read(mut reader: impl Read + Seek) -> Result { + let mut result = bincode::deserialize_from(FrameDecoder::new(&mut reader)); + + // check if there was a `WrongMagicNumber` error, in which case `reader` isn't LZ4 + // compressed + let uncompressed = result.as_ref().err().map_or(false, |e| { + if let bincode::ErrorKind::Io(io_error) = e.as_ref() { + let wrong_magic_number = io_error + .get_ref() + .and_then(|error| error.downcast_ref::()) + .map_or(false, |frame_error| { + matches!(frame_error, frame::Error::WrongMagicNumber) + }); + + if wrong_magic_number { + return true; + } } - Err(e) => Err(anyhow::Error::new(e)), + + false + }); + + if uncompressed { + // go back to the start and try without compression + reader.rewind().map_err(GridError::IoFailure)?; + result = bincode::deserialize_from(BufReader::new(reader)); } + + result.map_err(GridError::ReadFailure) } - /// Serializes `self` into `writer`. Writing is not buffered. + /// Serializes `self` into `writer`. Writing is buffered. /// /// # Errors /// /// If writing fails an error is returned. - pub fn write(&self, writer: impl Write) -> anyhow::Result<()> { - Ok(bincode::serialize_into(writer, self)?) + pub fn write(&self, writer: impl Write) -> Result<(), GridError> { + bincode::serialize_into(BufWriter::new(writer), self).map_err(GridError::WriteFailure) + } + + /// Serializes `self` into `writer`, using LZ4 compression. Writing is buffered. + /// + /// # Errors + /// + /// If writing or compression fails an error is returned. + pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { + bincode::serialize_into(FrameEncoder::new(writer), self).map_err(GridError::WriteFailure) } /// Fills the grid with events for the parton momentum fractions `x1` and `x2`, the scale `q2`, diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index d5a4c51a..4b62bd35 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -6,25 +6,28 @@ use pineappl::lumi::LumiCache; use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; use std::fs::{File, OpenOptions}; -use std::io::{BufReader, BufWriter}; +use std::path::Path; pub const ONE_SIGMA: f64 = 68.268_949_213_708_58; pub fn read_grid(input: &str) -> Result { - Grid::read(BufReader::new( - File::open(input).context(format!("unable to open '{}'", input))?, - )) - .context(format!("unable to read '{}'", input)) + Grid::read(File::open(input).context(format!("unable to open '{}'", input))?) + .context(format!("unable to read '{}'", input)) } pub fn write_grid(output: &str, grid: &Grid) -> Result<()> { - grid.write(BufWriter::new( - OpenOptions::new() - .write(true) - .create_new(true) - .open(output) - .context(format!("unable to write '{}'", output))?, - )) + let path = Path::new(output); + let file = OpenOptions::new() + .write(true) + .create_new(true) + .open(output) + .context(format!("unable to write '{}'", output))?; + + if path.extension().map_or(false, |ext| ext == "lz4") { + Ok(grid.write_lz4(file)?) + } else { + Ok(grid.write(file)?) + } } pub fn create_table() -> Table { From 2ad85716e7827a3abcfc3deb8a1df8009ac4b5ca Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 14:23:54 +0200 Subject: [PATCH 34/39] Fix a few clippy warnings --- pineappl/src/grid.rs | 18 ++++++++++-------- pineappl/src/lumi.rs | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 0d97ea48..c22f20d1 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1180,23 +1180,23 @@ impl Grid { }; let pids1 = if has_pdf1 { - eko_info.grid_axes.pids.to_vec() + eko_info.grid_axes.pids.clone() } else { vec![initial_state_1] }; let pids2 = if has_pdf2 { - eko_info.grid_axes.pids.to_vec() + eko_info.grid_axes.pids.clone() } else { vec![initial_state_2] }; // create target luminosities let tgt_pids1 = if has_pdf1 { - eko_info.target_pids.to_vec() + eko_info.target_pids.clone() } else { vec![initial_state_1] }; let tgt_pids2 = if has_pdf2 { - eko_info.target_pids.to_vec() + eko_info.target_pids.clone() } else { vec![initial_state_2] }; @@ -1318,10 +1318,12 @@ impl Grid { .muf2_grid .iter() .position(|&q2| q2 == eko_info.xir * eko_info.xir * scale) - .expect(&format!( - "Couldn't find q2: {:?} with xir: {:?} and muf2_grid: {:?}", - scale, eko_info.xir, eko_info.grid_axes.muf2_grid - )); + .unwrap_or_else(|| { + panic!( + "Couldn't find q2: {:?} with xir: {:?} and muf2_grid: {:?}", + scale, eko_info.xir, eko_info.grid_axes.muf2_grid + ) + }); let ix1 = if invert_x && has_pdf1 { eko_info.grid_axes.x_grid.len() - ix1 - 1 diff --git a/pineappl/src/lumi.rs b/pineappl/src/lumi.rs index 5507cb61..57a8a503 100644 --- a/pineappl/src/lumi.rs +++ b/pineappl/src/lumi.rs @@ -105,7 +105,7 @@ impl LumiEntry { } } -/// Helper macro to quickly generate a LumiEntry at compile time. +/// Helper macro to quickly generate a `LumiEntry` at compile time. /// /// # Examples /// From 9c7215e6abfc4cc8e618ba11ee976095c858169d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 14:24:11 +0200 Subject: [PATCH 35/39] Add TODO item --- pineappl/src/grid.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index c22f20d1..4e568ceb 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -431,6 +431,8 @@ impl Grid { bins } + // TODO: migrate to use a LumiCache + /// Convolutes a single subgrid `(order, bin, lumi)` with the PDFs strong coupling given by /// `xfx1`, `xfx2` and `alphas`. The convolution result is fully differentially, such that the /// axes of the result correspond to the values given by the subgrid `q2`, `x1` and `x2` grid From 944516632fa9a09cb4a3f7e7e3a72d717122981f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Tue, 26 Oct 2021 14:46:08 +0200 Subject: [PATCH 36/39] Add missing dev-dependency --- pineappl/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index 78a502f9..831a3b6c 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -25,6 +25,7 @@ thiserror = "1.0" indicatif = "0.15.0" [dev-dependencies] +anyhow = "1.0" lhapdf = "0.1.8" rand = { default-features = false, version = "0.7" } rand_pcg = { default-features = false, version = "0.2" } From b1bba7e9b488bef5b3b573d6d71028277c78d2be Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Tue, 26 Oct 2021 17:40:39 +0200 Subject: [PATCH 37/39] Fix leftovers of convolute migration --- pineappl_py/src/fk_table.rs | 2 +- pineappl_py/src/grid.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 613b8657..062f9510 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -109,6 +109,6 @@ impl PyFkTable { let mut xfx = |id, x, q2| f64::extract(xfx.call1((id, x, q2)).unwrap()).unwrap(); let mut alphas = |_| 1.0; let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); - self.fk_table.convolute(&mut lumi_cache, &[], &[], &[]) + self.fk_table.convolute(&mut lumi_cache, &[], &[]) } } diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 77883a6a..3e10fda2 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -252,7 +252,7 @@ impl PyGrid { let mut alphas = |q2| f64::extract(alphas.call1((q2,)).unwrap()).unwrap(); let mut lumi_cache = LumiCache::with_one(pdg_id, &mut xfx, &mut alphas); self.grid - .convolute2(&mut lumi_cache, &order_mask, &bin_indices, &lumi_mask, &xi) + .convolute(&mut lumi_cache, &order_mask, &bin_indices, &lumi_mask, &xi) } /// Convolute with with an evolution operator. From a4bf1f24b9d6aff9104163699dd61965ed459580 Mon Sep 17 00:00:00 2001 From: Alessandro Candido Date: Wed, 27 Oct 2021 11:29:42 +0200 Subject: [PATCH 38/39] Expose write_lz4 within python API --- pineappl_py/src/grid.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 3e10fda2..390d15f5 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -354,6 +354,16 @@ impl PyGrid { self.grid.write(File::create(path).unwrap()).unwrap(); } + /// Write grid to compressed file. + /// + /// Parameters + /// ---------- + /// path : str + /// file path + pub fn write_lz4(&self, path: &str) { + self.grid.write_lz4(File::create(path).unwrap()).unwrap(); + } + /// Optimize grid content. /// /// **Usage:** `yadism` From 714b55922dc9be1a0ec298111b5aa5833624b934 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 27 Oct 2021 11:36:26 +0200 Subject: [PATCH 39/39] Add `order_mask` param to `Grid::convolute_eko` --- pineappl/src/grid.rs | 13 +++++++++++-- pineappl_py/pineappl/grid.py | 6 +++++- pineappl_py/src/grid.rs | 7 ++++++- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 4e568ceb..11e83d54 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -1147,7 +1147,12 @@ impl Grid { /// /// Panics if the parameters do not match with the given grid. #[must_use] - pub fn convolute_eko(&self, operator: Array5, eko_info: EkoInfo) -> Option { + pub fn convolute_eko( + &self, + operator: Array5, + eko_info: EkoInfo, + order_mask: &[bool], + ) -> Option { // Check operator layout let dim = operator.shape(); @@ -1278,6 +1283,10 @@ impl Grid { // iterate over the source grid orders and add all of them together into // `src_array`, using the right powers of alphas for (order, powers) in self.orders.iter().enumerate() { + if !order_mask.is_empty() && !order_mask[order] { + continue; + } + let logs = if (eko_info.xir, eko_info.xif) == (1.0, 1.0) { if (powers.logxir > 0) || (powers.logxif > 0) { continue; @@ -1855,7 +1864,7 @@ mod tests { .collect(), ) .unwrap(); - let fk = grid.convolute_eko(operator, eko_info).unwrap(); + let fk = grid.convolute_eko(operator, eko_info, &[]).unwrap(); assert_eq!(fk.bins(), 1); } diff --git a/pineappl_py/pineappl/grid.py b/pineappl_py/pineappl/grid.py index a636286f..7b0c499a 100644 --- a/pineappl_py/pineappl/grid.py +++ b/pineappl_py/pineappl/grid.py @@ -216,7 +216,7 @@ def convolute_with_one( pdg_id, xfx, alphas, order_mask, bin_indices, lumi_mask, xi ) - def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): + def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids", order_mask=()): """ Create an FKTable with the EKO. @@ -229,6 +229,9 @@ def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): lumi_id_types : str kind of lumi types (e.g. "pdg_mc_ids" for flavor basis, "evol" for evolution basis) + order_mask : list(bool) + Mask for selecting specific orders. The value `True` means the corresponding order + is included. An empty list corresponds to all orders being enabled. Returns ------ @@ -252,6 +255,7 @@ def convolute_eko(self, operators, lumi_id_types="pdg_mc_ids"): operator_grid.flatten(), operator_grid.shape, lumi_id_types, + order_mask, ) ) diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 390d15f5..5527b747 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -298,6 +298,7 @@ impl PyGrid { operator_flattened: Vec, operator_shape: Vec, lumi_id_types: String, + order_mask: Vec, ) -> PyFkTable { let operator = Array::from_shape_vec(operator_shape, operator_flattened).unwrap(); let eko_info = EkoInfo { @@ -317,7 +318,11 @@ impl PyGrid { let evolved_grid = self .grid - .convolute_eko(operator.into_dimensionality::().unwrap(), eko_info) + .convolute_eko( + operator.into_dimensionality::().unwrap(), + eko_info, + &order_mask, + ) .unwrap(); PyFkTable { fk_table: evolved_grid,