From 74294f1dae5899af6a026816c04532a7015ad809 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Tue, 31 Mar 2026 09:53:53 +0200 Subject: [PATCH 01/10] Start by addressing `boc` and `convolution` --- pineappl/src/boc.rs | 210 +++++++++++++++++++++++------------ pineappl/src/convolutions.rs | 38 +++++-- 2 files changed, 168 insertions(+), 80 deletions(-) diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index 39a53c04..17f10262 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -7,18 +7,20 @@ use super::convert; use super::error::{Error, Result}; use float_cmp::approx_eq; -use itertools::{izip, Itertools}; +use itertools::{Itertools, izip}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::cmp::Ordering; use std::ops::Range; use std::str::FromStr; -/// Defines kinematic variables stored in each subgrid of a [`Grid`]. A grid with two convolutions -/// will need exactly two `X`-type kinematic variables, specifically `X(0)` and `X(1)` for the -/// first and second convolutions, respectively. Furthermore, at least one `Scale`-type kinematics -/// is needed to denote factorization, renormalization and/or fragmentation scales. More scales -/// can be used to make the three scales have functionally different forms. +/// Defines the kinematic variables stored in each subgrid of a [`crate::grid::Grid`]. +/// +/// A grid with two convolutions for instance will need exactly two `X`-type kinematic +/// variables, specifically `X(0)` and `X(1)` for the first and second convolutions, +/// respectively. Furthermore, at least one `Scale`-type kinematics is needed to denote +/// factorization, renormalization and/or fragmentation scales. More scales can be used +/// to make the three scales have functionally different forms. #[repr(C)] #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum Kinematics { @@ -28,9 +30,10 @@ pub enum Kinematics { X(usize), } -/// Defines how the factorization, renormalization and fragmentation scale are calculated from the -/// available kinematic scales. A `ScaleFuncForm::Scale(0)` means that the corresponding scale will -/// be calculated from the kinematic variable given as `Kinematics::Scale(0)`. +/// Defines how the factorization, renormalization and fragmentation scale are calculated. +/// +/// A `ScaleFuncForm::Scale(0)` means that the corresponding scale will be calculated from +/// the kinematic variable given as `Kinematics::Scale(0)`. #[repr(C)] #[derive(Clone, Deserialize, Eq, PartialEq, Serialize)] pub enum ScaleFuncForm { @@ -38,36 +41,43 @@ pub enum ScaleFuncForm { NoScale, /// Calculates the corresponding scale as the numerical value given `Kinematics::Scale(0)`. Scale(usize), - /// TODO + /// Combine two scale nodes by taking the quadratic sum: `s = s_1 + s_2`. QuadraticSum(usize, usize), - /// TODO + /// Combine two scale nodes by quadratic mean: `s = frac{1}{2}(s_1 + s_2)`. QuadraticMean(usize, usize), - /// TODO + /// Combine two scale nodes by a scaled quadratic sum: `s = frac{1}{4}(s_1 + s_2)`. QuadraticSumOver4(usize, usize), - /// TODO + /// Combine two scale nodes by linear mean: `s = frac{1}{4}(sqrt{s_1}+sqrt{s_2})^2`. LinearMean(usize, usize), - /// TODO + /// Combine two scale nodes by linear sum: `s = (sqrt{s_1}+sqrt{s_2})^2`. LinearSum(usize, usize), - /// TODO + /// Combine two scale nodes by maximum: `s = max(s_1, s_2)`. ScaleMax(usize, usize), - /// TODO + /// Combine two scale nodes by minimum: `s = min(s_1, s_2)`. ScaleMin(usize, usize), - /// TODO + /// Combine two scale nodes by product: `s = s_1 s_2`. Prod(usize, usize), - /// TODO + /// Combine two scale nodes as `s = s_2 + frac{1}{2} s_1`. S2plusS1half(usize, usize), - /// TODO + /// Combine two scale nodes by fourth-power sum: `s = sqrt{s_1^2 + s_2^2}`. Pow4Sum(usize, usize), - /// TODO + /// Weighted average: `s = frac{s_1^2 + s_2^2}{s_1 + s_2}`. WgtAvg(usize, usize), - /// TODO + /// Combine two scale nodes as `s = s_2 + frac{1}{4} s_1`. S2plusS1fourth(usize, usize), - /// TODO + /// Exponential-product variant: `s = (sqrt{s_1} e^{0.3 sqrt{s_2}})^2`. ExpProd2(usize, usize), } impl ScaleFuncForm { - /// TODO + /// Calculate the scale nodes implied by this scale form. + /// + /// The returned slice contains the scale nodes constructed from the subgrid's + /// `node_values` and the grid's `kinematics` definition. For two-argument forms + /// the output corresponds to the Cartesian product of the two input scale-node + /// vectors. + /// + /// For [`ScaleFuncForm::NoScale`] an empty slice is returned. #[must_use] pub fn calc<'a>( &self, @@ -145,7 +155,13 @@ impl ScaleFuncForm { } } - /// TODO + /// Map a tuple of scale indices to a flattened index for this scale form. + /// + /// - `indices` are the per-dimension indices into the scale-node vectors. + /// - `scale_dims` are the lengths of the scale-node vectors; for two-argument forms + /// the second element is used as the stride. + /// + /// This method is used internally to index precomputed lists for each scale choice. #[must_use] pub fn idx(&self, indices: &[usize], scale_dims: &[usize]) -> usize { match self.clone() { @@ -187,7 +203,10 @@ impl<'a> From<&'a Scales> for [&'a ScaleFuncForm; 3] { } impl Scales { - /// TODO + /// Return `true` if these scale definitions are compatible with `kinematics`. + /// + /// A scale definition is compatible if every [`ScaleFuncForm`] refers only to scale-type + /// kinematic variables that are present in `kinematics`. pub fn compatible_with(&self, kinematics: &[Kinematics]) -> bool { for scale in [&self.ren, &self.fac, &self.frg].map(Clone::clone) { match scale { @@ -216,7 +235,11 @@ impl Scales { } } -/// TODO +/// Bin limits and normalization. +/// +/// A bin may be multi-dimensional; `limits` stores the per-dimension (left, right) bounds, while +/// `normalization` typically stores the product of bin widths (used when converting between +/// differential and integrated representations). #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Bin { limits: Vec<(f64, f64)>, @@ -224,11 +247,11 @@ pub struct Bin { } impl Bin { - /// TODO + /// Construct a new bin. /// /// # Panics /// - /// TODO + /// Panics if any limit interval has an upper bound smaller than its lower bound. #[must_use] pub fn new(limits: Vec<(f64, f64)>, normalization: f64) -> Self { for limits in &limits { @@ -241,25 +264,25 @@ impl Bin { } } - /// TODO + /// Return the number of dimensions of this bin. #[must_use] - pub fn dimensions(&self) -> usize { + pub const fn dimensions(&self) -> usize { self.limits.len() } - /// TODO + /// Return the bin normalization. #[must_use] pub const fn normalization(&self) -> f64 { self.normalization } - /// TODO + /// Return the per-dimension bin limits. #[must_use] pub fn limits(&self) -> &[(f64, f64)] { &self.limits } - /// TODO + /// Compare two bins approximately (ULP-based comparison for floating point values). #[must_use] pub fn partial_eq_with_ulps(&self, other: &Self, ulps: i64) -> bool { self.limits.iter().zip(other.limits()).all(|(&lhs, &rhs)| { @@ -268,7 +291,11 @@ impl Bin { } } -/// TODO +/// Bin collection and fill limits. +/// +/// This structure stores the bin limits/normalizations together with an auxiliary 1D list +/// of *fill limits* used by certain filling/remapping algorithms. For a grid with `n` bins, +/// the `fill_limits` vector has length `n + 1`. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct BinsWithFillLimits { bins: Vec, @@ -276,11 +303,12 @@ pub struct BinsWithFillLimits { } impl BinsWithFillLimits { - /// TODO + /// Construct from explicit bins and fill limits. /// /// # Errors /// - /// TODO + /// Returns an error if the number of bins does not match the number of fill limits + /// minus one. pub fn new(bins: Vec, fill_limits: Vec) -> Result { // TODO: validate the bins @@ -301,11 +329,12 @@ impl BinsWithFillLimits { Ok(Self { bins, fill_limits }) } - /// TODO + /// Construct a 1D binning from fill limits. /// /// # Errors /// - /// TODO + /// Returns an error if the fill limits are inconsistent with the inferred number + /// of bins. pub fn from_fill_limits(fill_limits: Vec) -> Result { let bins = fill_limits .windows(2) @@ -315,15 +344,15 @@ impl BinsWithFillLimits { Self::new(bins, fill_limits) } - /// TODO + /// Construct from per-bin limits and normalizations. /// /// # Errors /// - /// TODO + /// Returns an error if `limits.len() != normalizations.len()`. /// /// # Panics /// - /// TODO + /// Panics if any per-bin limits are invalid (see [`Bin::new`]). pub fn from_limits_and_normalizations( limits: Vec>, normalizations: Vec, @@ -347,7 +376,9 @@ impl BinsWithFillLimits { Self::new(bins, fill_limits) } - /// TODO + /// Return slices that partition the bins into simply-connected blocks. + /// + /// For one-dimensional binning this returns a single slice spanning all bins. pub fn slices(&self) -> Vec> { if self.dimensions() == 1 { // TODO: check that bins are contiguous @@ -369,19 +400,19 @@ impl BinsWithFillLimits { } } - /// TODO + /// Return the underlying bins. #[must_use] pub fn bins(&self) -> &[Bin] { &self.bins } - /// TODO + /// Return the number of bins. #[must_use] pub fn len(&self) -> usize { self.bins.len() } - /// TODO + /// Return the number of dimensions (taken from the first bin). #[must_use] pub fn dimensions(&self) -> usize { self.bins @@ -391,7 +422,7 @@ impl BinsWithFillLimits { .dimensions() } - /// TODO + /// Return the bin index corresponding to `value` in fill-limit space. #[must_use] pub fn fill_index(&self, value: f64) -> Option { match self @@ -406,22 +437,22 @@ impl BinsWithFillLimits { } } - /// TODO + /// Return the fill limits. #[must_use] pub fn fill_limits(&self) -> &[f64] { &self.fill_limits } - /// TODO + /// Return the bin normalizations. pub fn normalizations(&self) -> Vec { self.bins.iter().map(Bin::normalization).collect() } - /// TODO + /// Merge a contiguous range of bins. /// /// # Errors /// - /// TODO + /// Returns an error if `range` does not form a simply-connected block under [`Self::slices`]. // TODO: change range to `RangeBounds` pub fn merge(&self, range: Range) -> Result { // TODO: allow more flexible merging @@ -458,11 +489,11 @@ impl BinsWithFillLimits { ) } - /// TODO + /// Remove a bin by index. /// /// # Panics /// - /// TODO + /// Panics if attempting to remove the last remaining bin. pub fn remove(&mut self, index: usize) -> Bin { assert!(self.len() > 1); @@ -470,7 +501,7 @@ impl BinsWithFillLimits { self.bins.remove(index) } - /// TODO + /// Compare two `BinsWithFillLimits` approximately (ULP-based comparison of bin contents). #[must_use] pub fn bins_partial_eq_with_ulps(&self, other: &Self, ulps: i64) -> bool { (self.bins.len() == other.bins.len()) @@ -894,10 +925,11 @@ impl Order { } } -/// This structure represents a channel. Each channel consists of a tuple containing in the -/// following order, the particle ID of the first incoming parton, then the particle ID of the -/// second parton, and finally a numerical factor that will multiply the result for this specific -/// combination. +/// This structure represents the channel object. +/// +/// Each channel consists of a tuple containing in the following order, the particle ID of the +/// first incoming parton, then the particle ID of the second parton, and finally a numerical +/// factor that will multiply the result for this specific combination. #[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] pub struct Channel { entry: Vec<(Vec, f64)>, @@ -1357,67 +1389,99 @@ mod tests { assert_eq!( Order::create_mask(&orders, 0, 0, false), - [false, false, false, false, false, false, false, false, false, false, false, false] + [ + false, false, false, false, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 0, 1, false), - [false, false, true, false, false, false, false, false, false, false, false, false] + [ + false, false, true, false, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 0, 2, false), - [false, false, true, false, false, false, true, false, false, false, false, false] + [ + false, false, true, false, false, false, true, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 0, 3, false), - [false, false, true, false, false, false, true, false, false, false, false, true] + [ + false, false, true, false, false, false, true, false, false, false, false, true + ] ); assert_eq!( Order::create_mask(&orders, 1, 0, false), - [true, false, false, false, false, false, false, false, false, false, false, false] + [ + true, false, false, false, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 1, 1, false), - [true, true, true, false, false, false, false, false, false, false, false, false] + [ + true, true, true, false, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 1, 2, false), - [true, true, true, false, false, false, true, false, false, false, false, false] + [ + true, true, true, false, false, false, true, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 1, 3, false), - [true, true, true, false, false, false, true, false, false, false, false, true] + [ + true, true, true, false, false, false, true, false, false, false, false, true + ] ); assert_eq!( Order::create_mask(&orders, 2, 0, false), - [true, false, false, true, false, false, false, false, false, false, false, false] + [ + true, false, false, true, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 2, 1, false), - [true, true, true, true, false, false, false, false, false, false, false, false] + [ + true, true, true, true, false, false, false, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 2, 2, false), - [true, true, true, true, true, true, true, false, false, false, false, false] + [ + true, true, true, true, true, true, true, false, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 2, 3, false), - [true, true, true, true, true, true, true, false, false, false, false, true] + [ + true, true, true, true, true, true, true, false, false, false, false, true + ] ); assert_eq!( Order::create_mask(&orders, 3, 0, false), - [true, false, false, true, false, false, false, true, false, false, false, false] + [ + true, false, false, true, false, false, false, true, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 3, 1, false), - [true, true, true, true, false, false, false, true, false, false, false, false] + [ + true, true, true, true, false, false, false, true, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 3, 2, false), - [true, true, true, true, true, true, true, true, false, false, false, false] + [ + true, true, true, true, true, true, true, true, false, false, false, false + ] ); assert_eq!( Order::create_mask(&orders, 3, 3, false), - [true, true, true, true, true, true, true, true, true, true, true, true] + [ + true, true, true, true, true, true, true, true, true, true, true, true + ] ); } diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index 4031f0ad..c13b363b 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -31,7 +31,14 @@ pub struct ConvolutionCache<'a> { } impl<'a> ConvolutionCache<'a> { - /// TODO + /// Construct a new convolution cache. + /// + /// - `convolutions` describes each convolution function (PDF/FF type and hadron PID). + /// - `xfx` provides one callback per convolution, used to evaluate `x f(x, μ^2)` for + /// a given PID, `x`, and scale `μ^2`. + /// - `alphas` provides a callback to evaluate `μ^2`. + /// + /// The cache is filled lazily as [`Grid`] convolution is performed. pub fn new( convolutions: Vec, xfx: Vec<&'a mut dyn FnMut(i32, f64, f64) -> f64>, @@ -168,7 +175,11 @@ impl<'a> ConvolutionCache<'a> { } } -/// TODO +/// A convolution cache configured for a specific [`Grid`]. +/// +/// This is a lightweight adaptor around [`ConvolutionCache`] that precomputes the bookkeeping +/// needed to evaluate PDF/FF factors and `α` at the scales required by a particular grid and +/// subgrid. It is created internally by [`ConvolutionCache`] when convolving a grid. pub struct GridConvCache<'a, 'b> { cache: &'b mut ConvolutionCache<'a>, perm: Vec<(usize, bool)>, @@ -179,7 +190,20 @@ pub struct GridConvCache<'a, 'b> { } impl GridConvCache<'_, '_> { - /// TODO + /// Compute `αs` and convolution-function products for a given tuple of indices. + /// + /// The returned value is the product of all requested convolution functions evaluated at the + /// subgrid's `x` nodes and the appropriate scale(s), multiplied by `αs` raised to `as_order`. + /// + /// # Panics + /// + /// Panics if `indices` do not follow the current internal convention used by the cache. At the + /// moment the implementation assumes that: + /// - `indices[0..x_start]` correspond to scale indices (starting with a factorization-scale + /// dimension), and + /// - the remaining indices correspond to the \(x\) dimensions in the same order as `pdg_ids`. + /// + /// This restriction is tracked in the codebase (see the TODO in the implementation). pub fn as_fx_prod(&mut self, pdg_ids: &[i32], as_order: u8, indices: &[usize]) -> f64 { // TODO: here we assume that // - indices[0] is the (squared) factorization scale, @@ -288,7 +312,7 @@ impl GridConvCache<'_, '_> { } } -/// TODO +/// Convolution type: PDF vs FF and polarized vs unpolarized. #[repr(C)] #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum ConvType { @@ -303,7 +327,7 @@ pub enum ConvType { } impl ConvType { - /// TODO + /// Construct a [`ConvType`] from the two boolean flags. #[must_use] pub const fn new(polarized: bool, time_like: bool) -> Self { match (polarized, time_like) { @@ -314,13 +338,13 @@ impl ConvType { } } - /// TODO + /// Return `true` if this convolution type is a (polarized or unpolarized) PDF. #[must_use] pub const fn is_pdf(&self) -> bool { matches!(self, Self::UnpolPDF | Self::PolPDF) } - /// TODO + /// Return `true` if this convolution type is a (polarized or unpolarized) FF. #[must_use] pub const fn is_ff(&self) -> bool { matches!(self, Self::UnpolFF | Self::PolFF) From b5fa2914faa9801bfa8a7b255b2b5e982b0f0d04 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Tue, 31 Mar 2026 14:01:20 +0200 Subject: [PATCH 02/10] Run formatting accross all files --- examples/cpp/advanced-filling.cpp | 2 +- pineappl/src/evolution.rs | 19 ++++--- pineappl/src/fk_table.rs | 21 ++++--- pineappl/src/grid.rs | 11 ++-- pineappl/src/interpolation.rs | 2 +- pineappl/src/v0.rs | 28 +++++---- pineappl_applgrid/src/lib.rs | 2 +- pineappl_capi/src/lib.rs | 88 ++++++++++++++++------------- pineappl_cli/src/analyze.rs | 4 +- pineappl_cli/src/channels.rs | 4 +- pineappl_cli/src/convolve.rs | 2 +- pineappl_cli/src/diff.rs | 4 +- pineappl_cli/src/evolve.rs | 8 +-- pineappl_cli/src/export.rs | 12 ++-- pineappl_cli/src/export/applgrid.rs | 6 +- pineappl_cli/src/helpers.rs | 4 +- pineappl_cli/src/import.rs | 8 +-- pineappl_cli/src/import/fastnlo.rs | 4 +- pineappl_cli/src/import/fktable.rs | 2 +- pineappl_cli/src/orders.rs | 2 +- pineappl_cli/src/plot.rs | 2 +- pineappl_cli/src/pull.rs | 4 +- pineappl_cli/src/read.rs | 2 +- pineappl_cli/src/uncert.rs | 4 +- pineappl_cli/src/write.rs | 4 +- pineappl_cli/tests/write.rs | 2 +- pineappl_py/tests/conftest.py | 2 +- 27 files changed, 137 insertions(+), 116 deletions(-) diff --git a/examples/cpp/advanced-filling.cpp b/examples/cpp/advanced-filling.cpp index 4e330355..9b9e2a60 100644 --- a/examples/cpp/advanced-filling.cpp +++ b/examples/cpp/advanced-filling.cpp @@ -174,7 +174,7 @@ int main() { pineappl_grid_write(grid, "advanced-filling.pineappl.lz4"); //-----------------------------------------------------------------------// - + // Remove the bins for which the convolution is zero. std::vector zero_indices; zero_indices.reserve(dxsec.size()); diff --git a/pineappl/src/evolution.rs b/pineappl/src/evolution.rs index f7d03dbe..1d5f579a 100644 --- a/pineappl/src/evolution.rs +++ b/pineappl/src/evolution.rs @@ -8,12 +8,12 @@ use super::packed_array::PackedArray; use super::pids::PidBasis; use super::subgrid::{self, ImportSubgridV1, Subgrid, SubgridEnum}; use float_cmp::approx_eq; -use itertools::izip; use itertools::Itertools; +use itertools::izip; use ndarray::linalg; use ndarray::{ - s, Array1, Array2, Array3, ArrayD, ArrayView1, ArrayView4, ArrayViewD, ArrayViewMutD, Axis, - Ix1, Ix2, + Array1, Array2, Array3, ArrayD, ArrayView1, ArrayView4, ArrayViewD, ArrayViewMutD, Axis, Ix1, + Ix2, s, }; use rayon::iter::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; use std::iter; @@ -33,8 +33,9 @@ pub struct EvolveInfo { pub ren1: Vec, } -/// Information about the evolution kernel operator slice (EKO) passed to [`Grid::evolve`] as -/// `operator`, which is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). +/// Information about the evolution kernel operator slice (EKO) passed to [`Grid::evolve`]. +/// +/// This is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). /// The dimensions of the EKO must correspond to the values given in [`fac1`](Self::fac1), /// [`pids0`](Self::pids0), [`x0`](Self::x0), [`pids1`](Self::pids1) and [`x1`](Self::x1), exactly /// in this order. Members with a `1` are defined at the squared factorization scale given as @@ -370,9 +371,11 @@ pub(crate) fn evolve_slice( // TODO: implement matching of different scales for different EKOs let mut fac1_scales: Vec<_> = infos.iter().map(|info| info.fac1).collect(); fac1_scales.sort_by(f64::total_cmp); - assert!(fac1_scales - .windows(2) - .all(|scales| subgrid::node_value_eq(scales[0], scales[1]))); + assert!( + fac1_scales + .windows(2) + .all(|scales| subgrid::node_value_eq(scales[0], scales[1])) + ); let fac1 = fac1_scales[0]; assert_eq!(operators.len(), infos.len()); diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index 5aab8ffc..aa42e0bc 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -6,7 +6,7 @@ use super::error::{Error, Result}; use super::grid::{Grid, GridOptFlags}; use super::pids::{OptRules, PidBasis}; use super::subgrid::{self, EmptySubgridV1, Subgrid}; -use ndarray::{s, ArrayD}; +use ndarray::{ArrayD, s}; use std::collections::BTreeMap; use std::fmt::{self, Display, Formatter}; use std::iter; @@ -27,10 +27,11 @@ pub struct FkTable { grid: Grid, } -/// The optimization assumptions for an [`FkTable`], needed for [`FkTable::optimize`]. Since FK -/// tables are typically stored at very small `Q2 = Q0`, the PDFs `f(x,Q0)` of heavy quarks are -/// typically set to zero at this scale or set to the same value as their anti-quark PDF. This is -/// used to optimize the size of FK tables. +/// The optimization assumptions for an [`FkTable`], needed for [`FkTable::optimize`]. +/// +/// Since FK tables are typically stored at very small `Q2 = Q0`, the PDFs `f(x,Q0)` of heavy +/// quarks are typically set to zero at this scale or set to the same value as their anti-quark +/// PDF. This is used to optimize the size of FK tables. #[repr(C)] #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum FkAssumptions { @@ -192,6 +193,10 @@ impl FkTable { } /// Return the squared factorization scale. + /// + /// # Panics + /// + /// Every `FkTable` has either a single factorization scale or none, otherwise panic. #[must_use] pub fn fac0(&self) -> Option { let fac1 = self.grid.evolve_info(&[true]).fac1; @@ -199,7 +204,6 @@ impl FkTable { if let [fac0] = fac1[..] { Some(fac0) } else { - // every `FkTable` has either a single factorization scale or none assert!(fac1.is_empty()); None @@ -207,6 +211,10 @@ impl FkTable { } /// Return the squared fragmentation scale. + /// + /// # Panics + /// + /// Every `FkTable` has either a single fragmentation scale or none, otherwise panic. #[must_use] pub fn frg0(&self) -> Option { let frg1 = self.grid.evolve_info(&[true]).frg1; @@ -214,7 +222,6 @@ impl FkTable { if let [frg0] = frg1[..] { Some(frg0) } else { - // every `FkTable` has either a single fragmentation scale or none assert!(frg1.is_empty()); None diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index efd5bf30..21080e3c 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -18,7 +18,7 @@ use float_cmp::approx_eq; use git_version::git_version; use itertools::Itertools; use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array2, Array3, ArrayView3, ArrayViewMut3, Axis, CowArray, Dimension, Ix4, Zip}; +use ndarray::{Array2, Array3, ArrayView3, ArrayViewMut3, Axis, CowArray, Dimension, Ix4, Zip, s}; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::io::{BufRead, BufReader, BufWriter, Read, Write}; @@ -108,7 +108,10 @@ impl Grid { .find_map(|(pids, _)| (pids.len() != convolutions.len()).then_some(pids.len())); if let Some(pids_len) = offending_entry { - panic!("channel #{channel_idx} has wrong number of PIDs: expected {}, found {pids_len}", convolutions.len()); + panic!( + "channel #{channel_idx} has wrong number of PIDs: expected {}, found {pids_len}", + convolutions.len() + ); } } @@ -973,7 +976,7 @@ impl Grid { } /// Upgrades the internal data structures to their latest versions. - pub fn upgrade(&mut self) {} + pub const fn upgrade(&mut self) {} /// Return the metadata of this grid. #[must_use] @@ -987,7 +990,7 @@ impl Grid { /// /// TODO #[must_use] - pub fn metadata_mut(&mut self) -> &mut BTreeMap { + pub const fn metadata_mut(&mut self) -> &mut BTreeMap { &mut self.metadata } diff --git a/pineappl/src/interpolation.rs b/pineappl/src/interpolation.rs index aed1c761..093aa13b 100644 --- a/pineappl/src/interpolation.rs +++ b/pineappl/src/interpolation.rs @@ -361,8 +361,8 @@ pub fn interpolate( #[cfg(test)] mod tests { use super::*; - use float_cmp::assert_approx_eq; use float_cmp::Ulps; + use float_cmp::assert_approx_eq; #[test] fn interpolate_two_points() { diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs index 5271cbd7..51b8675f 100644 --- a/pineappl/src/v0.rs +++ b/pineappl/src/v0.rs @@ -174,16 +174,18 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { fac.dedup_by(subgrid::node_value_eq_ref_mut); vec![ren, fac] } else { - vec![old_subgrid - .mu2_grid() - .iter() - .map(|mu2v0| { - // TODO: implement importing flexible-scale grids - assert!(subgrid::node_value_eq(mu2v0.ren, mu2v0.fac)); + vec![ + old_subgrid + .mu2_grid() + .iter() + .map(|mu2v0| { + // TODO: implement importing flexible-scale grids + assert!(subgrid::node_value_eq(mu2v0.ren, mu2v0.fac)); - mu2v0.fac - }) - .collect()] + mu2v0.fac + }) + .collect(), + ] }; let mut dim = if flexible_scale_grid { @@ -280,7 +282,9 @@ fn read_convolutions_from_metadata(grid: &GridV0) -> Vec> { .map(String::as_str), ) { (_, Some("None")) => None, - (Some(Ok(pid)), Some("UnpolPDF")) => Some(Conv::new(ConvType::UnpolPDF, pid)), + (Some(Ok(pid)), Some("UnpolPDF")) => { + Some(Conv::new(ConvType::UnpolPDF, pid)) + } (Some(Ok(pid)), Some("PolPDF")) => Some(Conv::new(ConvType::PolPDF, pid)), (Some(Ok(pid)), Some("UnpolFF")) => Some(Conv::new(ConvType::UnpolFF, pid)), (Some(Ok(pid)), Some("PolFF")) => Some(Conv::new(ConvType::PolFF, pid)), @@ -324,7 +328,9 @@ fn read_convolutions_from_metadata(grid: &GridV0) -> Vec> { panic!("metadata 'convolution_type_{index} = {type_}' is unknown") } (Some(Err(err)), Some(_)) => { - panic!("metadata 'convolution_particle_{index}' could not be parsed: {err}") + panic!( + "metadata 'convolution_particle_{index}' could not be parsed: {err}" + ) } } }) diff --git a/pineappl_applgrid/src/lib.rs b/pineappl_applgrid/src/lib.rs index a4dd9966..97ed0a90 100644 --- a/pineappl_applgrid/src/lib.rs +++ b/pineappl_applgrid/src/lib.rs @@ -121,7 +121,7 @@ pub mod ffi { _: bool, ) -> UniquePtr; fn make_empty_grid(_: &[f64], _: &str, _: i32, _: i32, _: &str, _: &str) - -> UniquePtr; + -> UniquePtr; fn make_lumi_pdf(_: &str, _: &[i32]) -> UniquePtr; fn grid_combine(_: &grid) -> Vec; diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index d195d294..5f5568db 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -55,7 +55,7 @@ //! //! [translation tables]: https://github.com/eqrion/cbindgen/blob/master/docs.md#std-types -use itertools::{izip, Itertools}; +use itertools::{Itertools, izip}; use ndarray::{Array4, CowArray}; use pineappl::boc::{Bin, BinsWithFillLimits, Channel, Kinematics, Order, ScaleFuncForm, Scales}; use pineappl::convolutions::{Conv, ConvType, ConvolutionCache}; @@ -445,18 +445,20 @@ pub unsafe extern "C" fn pineappl_grid_convolute_with_two( } } -/// Convolutes the specified grid with the PDF `xfx`, which is the PDF for a hadron with the PDG id -/// `pdg_id`, and strong coupling `alphas`. These functions must evaluate the PDFs for the given -/// `x` and `q2` for the parton with the given PDG id, `pdg_id`, and return the result. Note that -/// the value must be the PDF multiplied with its argument `x`. The value of the pointer `state` -/// provided to these functions is the same one given to this function. The parameter `order_mask` -/// must be as long as there are perturbative orders contained in `grid` and is used to selectively -/// disable (`false`) or enable (`true`) individual orders. If `order_mask` is set to `NULL`, all -/// orders are active. The parameter `channel_mask` can be used similarly, but must be as long as -/// the channels `grid` was created with has entries, or `NULL` to enable all channels. The values -/// `xi_ren` and `xi_fac` can be used to vary the renormalization and factorization from its -/// central value, which corresponds to `1.0`. After convolution of the grid with the PDFs the -/// differential cross section for each bin is written into `results`. +/// Convolutes the specified grid with the PDF `xfx`. +/// +/// `xfx` is the PDF for a hadron with the PDG id `pdg_id`, and strong coupling `alphas`. These +/// functions must evaluate the PDFs for the given `x` and `q2` for the parton with the given PDG +/// id, `pdg_id`, and return the result. Note that the value must be the PDF multiplied with its +/// argument `x`. The value of the pointer `state` provided to these functions is the same one given +/// to this function. The parameter `order_mask` must be as long as there are perturbative orders +/// contained in `grid` and is used to selectively disable (`false`) or enable (`true`) individual +/// orders. If `order_mask` is set to `NULL`, all orders are active. The parameter `channel_mask` +/// can be used similarly, but must be as long as the channels `grid` was created with has entries, +/// or `NULL` to enable all channels. The values `xi_ren` and `xi_fac` can be used to vary the +/// renormalization and factorization from its central value, which corresponds to `1.0`. After +/// convolution of the grid with the PDFs the differential cross section for each bin is written +/// into `results`. /// /// # Safety /// @@ -508,19 +510,20 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_one( )); } -/// Convolutes the specified grid with the PDFs `xfx1` and `xfx2`, which are the PDFs of hadrons -/// with PDG ids `pdg_id1` and `pdg_id2`, respectively, and strong coupling `alphas`. These -/// functions must evaluate the PDFs for the given `x` and `q2` for the parton with the given PDG -/// id, `pdg_id`, and return the result. Note that the value must be the PDF multiplied with its -/// argument `x`. The value of the pointer `state` provided to these functions is the same one -/// given to this function. The parameter `order_mask` must be as long as there are perturbative -/// orders contained in `grid` and is used to selectively disable (`false`) or enable (`true`) -/// individual orders. If `order_mask` is set to `NULL`, all orders are active. The parameter -/// `channel_mask` can be used similarly, but must be as long as the channels `grid` was created -/// with has entries, or `NULL` to enable all channels. The values `xi_ren` and `xi_fac` can be -/// used to vary the renormalization and factorization from its central value, which corresponds to -/// `1.0`. After convolution of the grid with the PDFs the differential cross section for each bin -/// is written into `results`. +/// Convolutes the specified grid with the PDFs `xfx1` and `xfx2`. +/// +/// `xfx1` and `xfx2` are the PDFs of hadrons with PDG ids `pdg_id1` and `pdg_id2`, respectively, +/// and strong coupling `alphas`. These functions must evaluate the PDFs for the given `x` and `q2` +/// for the parton with the given PDG id, `pdg_id`, and return the result. Note that the value must +/// be the PDF multiplied with its argument `x`. The value of the pointer `state` provided to these +/// functions is the same one given to this function. The parameter `order_mask` must be as long as +/// there are perturbative orders contained in `grid` and is used to selectively disable (`false`) +/// or enable (`true`) individual orders. If `order_mask` is set to `NULL`, all orders are active. +/// The parameter `channel_mask` can be used similarly, but must be as long as the channels `grid` +/// was created with has entries, or `NULL` to enable all channels. The values `xi_ren` and `xi_fac` +/// can be used to vary the renormalization and factorization from its central value, which +/// corresponds to `1.0`. After convolution of the grid with the PDFs the differential cross section +/// for each bin is written into `results`. /// /// # Safety /// @@ -578,9 +581,10 @@ pub unsafe extern "C" fn pineappl_grid_convolve_with_two( )); } -/// Try to deduplicate channels of `grid` by detecting pairs of them that contain the same -/// subgrids. The numerical equality is tested using a tolerance of `ulps`, given in [units of -/// least precision](https://docs.rs/float-cmp/latest/float_cmp/index.html#some-explanation). +/// Try to deduplicate channels of `grid` by detecting pairs of them that contain the same subgrids. +/// +/// The numerical equality is tested using a tolerance of `ulps`, given in +/// [units of least precision](https://docs.rs/float-cmp/latest/float_cmp/index.html#some-explanation). /// /// # Safety /// @@ -622,9 +626,10 @@ pub unsafe extern "C" fn pineappl_grid_fill( grid.fill(order, observable, lumi, &[q2, x1, x2], weight); } -/// Fill `grid` for the given momentum fractions `x1` and `x2`, at the scale `q2` for the given -/// value of the `order` and `observable` with `weights`. The parameter of weight must contain a -/// result for entry of the luminosity function the grid was created with. +/// Fill `grid` for the given momentum fractions `x1` and `x2`, at the scale `q2`. +/// +/// This is done for a given value of the `order` and `observable` with `weights`. The parameter +/// of weight must contain a result for entry of the luminosity function the grid was created with. /// /// # Safety /// @@ -984,9 +989,10 @@ pub unsafe extern "C" fn pineappl_grid_optimize_using(grid: *mut Grid, flags: Gr grid.optimize_using(flags); } -/// Scales each subgrid by a bin-dependent factor given in `factors`. If a bin does not have a -/// corresponding entry in `factors` it is not rescaled. If `factors` has more entries than there -/// are bins the superfluous entries do not have an effect. +/// Scales each subgrid by a bin-dependent factor given in `factors`. +/// +/// If a bin does not have a corresponding entry in `factors` it is not rescaled. If `factors` has +/// more entries than there are bins the superfluous entries do not have an effect. /// /// # Safety /// @@ -1107,9 +1113,11 @@ pub unsafe extern "C" fn pineappl_grid_set_key_value( grid.metadata_mut().insert(key, value); } -/// Sets a remapper for the grid. This can be used to 'upgrade' one-dimensional bin limits to -/// N-dimensional ones. The new bin limits must be given in the form of tuples giving the left and -/// right limits, and a tuple for each dimension. +/// Sets a remapper for the grid. +/// +/// This can be used to 'upgrade' one-dimensional bin limits to N-dimensional ones. The new bin +/// limits must be given in the form of tuples giving the left and right limits, and a tuple for +/// each dimension. /// /// # Safety /// @@ -1236,7 +1244,7 @@ pub unsafe extern "C" fn pineappl_lumi_combinations(lumi: *const Lumi, entry: us /// `pineappl_grid_lumi`. #[deprecated(since = "1.0.0", note = "use `pineappl_channels_count` instead")] #[unsafe(no_mangle)] -pub unsafe extern "C" fn pineappl_lumi_count(lumi: *const Lumi) -> usize { +pub const unsafe extern "C" fn pineappl_lumi_count(lumi: *const Lumi) -> usize { let lumi = unsafe { &*lumi }; lumi.0.len() @@ -1593,7 +1601,7 @@ pub unsafe extern "C" fn pineappl_grid_channels(grid: *const Grid) -> Box usize { +pub const unsafe extern "C" fn pineappl_channels_count(channels: *const Channels) -> usize { let Channels { channels, .. } = unsafe { &*channels }; channels.len() diff --git a/pineappl_cli/src/analyze.rs b/pineappl_cli/src/analyze.rs index 1abc2590..66dda726 100644 --- a/pineappl_cli/src/analyze.rs +++ b/pineappl_cli/src/analyze.rs @@ -2,8 +2,8 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; -use clap::{value_parser, Parser, ValueHint}; -use prettytable::{cell, Row}; +use clap::{Parser, ValueHint, value_parser}; +use prettytable::{Row, cell}; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/channels.rs b/pineappl_cli/src/channels.rs index fa64807f..991d5db4 100644 --- a/pineappl_cli/src/channels.rs +++ b/pineappl_cli/src/channels.rs @@ -2,8 +2,8 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::builder::TypedValueParser; -use clap::{value_parser, Parser, ValueHint}; -use prettytable::{cell, Row}; +use clap::{Parser, ValueHint, value_parser}; +use prettytable::{Row, cell}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/convolve.rs b/pineappl_cli/src/convolve.rs index facee52f..f6ceae11 100644 --- a/pineappl_cli/src/convolve.rs +++ b/pineappl_cli/src/convolve.rs @@ -2,7 +2,7 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Parser, ValueHint}; -use prettytable::{cell, Row}; +use prettytable::{Row, cell}; use std::ops::RangeInclusive; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/diff.rs b/pineappl_cli/src/diff.rs index 10d3dc52..b64e3945 100644 --- a/pineappl_cli/src/diff.rs +++ b/pineappl_cli/src/diff.rs @@ -1,8 +1,8 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::{bail, Result}; +use anyhow::{Result, bail}; use clap::{Parser, ValueHint}; -use prettytable::{cell, Row}; +use prettytable::{Row, cell}; use std::collections::HashSet; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/evolve.rs b/pineappl_cli/src/evolve.rs index 0ebc3986..454c79bf 100644 --- a/pineappl_cli/src/evolve.rs +++ b/pineappl_cli/src/evolve.rs @@ -1,6 +1,6 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::{anyhow, Result}; +use anyhow::{Result, anyhow}; use clap::{Parser, ValueHint}; use lhapdf::Pdf; use pineappl::fk_table::FkTable; @@ -10,11 +10,11 @@ use std::process::ExitCode; #[cfg(feature = "evolve")] mod eko { - use anyhow::{anyhow, Result}; + use anyhow::{Result, anyhow}; + use base64::Engine; use base64::alphabet::URL_SAFE; - use base64::engine::general_purpose::PAD; use base64::engine::GeneralPurpose; - use base64::Engine; + use base64::engine::general_purpose::PAD; use either::Either; use lz4_flex::frame::FrameDecoder; use ndarray::iter::AxisIter; diff --git a/pineappl_cli/src/export.rs b/pineappl_cli/src/export.rs index 5fc1c956..0acc0ab4 100644 --- a/pineappl_cli/src/export.rs +++ b/pineappl_cli/src/export.rs @@ -1,6 +1,6 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::{anyhow, Result}; +use anyhow::{Result, anyhow}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; use lhapdf::Pdf; @@ -125,7 +125,9 @@ impl Subcommand for Opts { .zip(order_mask.iter()) .filter_map(|(order, keep)| (!keep).then_some(order.clone())) { - println!("WARNING: the order O(as^{alphas} a^{alpha} lr^{logxir} lf^{logxif} la^{logxia}) isn't supported by {grid_type} and will be skipped."); + println!( + "WARNING: the order O(as^{alphas} a^{alpha} lr^{logxir} lf^{logxif} la^{logxia}) isn't supported by {grid_type} and will be skipped." + ); } let orders: Vec<_> = grid @@ -191,11 +193,7 @@ impl Subcommand for Opts { // ALLOW: here we really need an exact comparison // TODO: change allow to `expect` if MSRV >= 1.81.0 #[allow(clippy::float_cmp)] - if a == b { - 0.0 - } else { - b / a - 1.0 - } + if a == b { 0.0 } else { b / a - 1.0 } }) .collect(); diff --git a/pineappl_cli/src/export/applgrid.rs b/pineappl_cli/src/export/applgrid.rs index 214e277e..a1ac190a 100644 --- a/pineappl_cli/src/export/applgrid.rs +++ b/pineappl_cli/src/export/applgrid.rs @@ -1,8 +1,8 @@ -use anyhow::{bail, Result}; -use cxx::{let_cxx_string, UniquePtr}; +use anyhow::{Result, bail}; +use cxx::{UniquePtr, let_cxx_string}; use float_cmp::approx_eq; use lhapdf::Pdf; -use ndarray::{s, Axis}; +use ndarray::{Axis, s}; use pineappl::boc::{Channel, Kinematics, Order}; use pineappl::grid::Grid; use pineappl::interpolation::{Interp, InterpMeth, Map, ReweightMeth}; diff --git a/pineappl_cli/src/helpers.rs b/pineappl_cli/src/helpers.rs index adb6cc1f..e3899e45 100644 --- a/pineappl_cli/src/helpers.rs +++ b/pineappl_cli/src/helpers.rs @@ -1,12 +1,12 @@ use super::GlobalConfiguration; -use anyhow::{anyhow, bail, Context, Error, Result}; +use anyhow::{Context, Error, Result, anyhow, bail}; use itertools::Itertools; use lhapdf::{Pdf, PdfSet}; use pineappl::boc::{ScaleFuncForm, Scales}; use pineappl::convolutions::{Conv, ConvType, ConvolutionCache}; use pineappl::grid::Grid; -use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use prettytable::Table; +use prettytable::format::{FormatBuilder, LinePosition, LineSeparator}; use std::fs::{File, OpenOptions}; use std::iter; use std::ops::RangeInclusive; diff --git a/pineappl_cli/src/import.rs b/pineappl_cli/src/import.rs index 0cece221..0fae9832 100644 --- a/pineappl_cli/src/import.rs +++ b/pineappl_cli/src/import.rs @@ -1,6 +1,6 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; -use anyhow::{anyhow, Result}; +use anyhow::{Result, anyhow}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Parser, ValueHint}; use lhapdf::Pdf; @@ -306,11 +306,7 @@ impl Subcommand for Opts { // ALLOW: here we really need an exact comparison // TODO: change allow to `expect` if MSRV >= 1.81.0 #[allow(clippy::float_cmp)] - if a == b { - 0.0 - } else { - b / a - 1.0 - } + if a == b { 0.0 } else { b / a - 1.0 } }) .collect(); diff --git a/pineappl_cli/src/import/fastnlo.rs b/pineappl_cli/src/import/fastnlo.rs index b368d7da..aa9cf8c8 100644 --- a/pineappl_cli/src/import/fastnlo.rs +++ b/pineappl_cli/src/import/fastnlo.rs @@ -10,8 +10,8 @@ use pineappl::packed_array::PackedArray; use pineappl::pids::PidBasis; use pineappl::subgrid::ImportSubgridV1; use pineappl_fastnlo::ffi::{ - self, fastNLOCoeffAddBase, fastNLOCoeffAddFix, fastNLOCoeffAddFlex, fastNLOLHAPDF, - fastNLOPDFLinearCombinations, EScaleFunctionalForm, + self, EScaleFunctionalForm, fastNLOCoeffAddBase, fastNLOCoeffAddFix, fastNLOCoeffAddFlex, + fastNLOLHAPDF, fastNLOPDFLinearCombinations, }; use std::f64::consts::TAU; use std::iter; diff --git a/pineappl_cli/src/import/fktable.rs b/pineappl_cli/src/import/fktable.rs index ddf08287..d1084c54 100644 --- a/pineappl_cli/src/import/fktable.rs +++ b/pineappl_cli/src/import/fktable.rs @@ -1,4 +1,4 @@ -use anyhow::{anyhow, Context, Result}; +use anyhow::{Context, Result, anyhow}; use flate2::read::GzDecoder; use ndarray::s; use pineappl::boc::{BinsWithFillLimits, Kinematics, Order, ScaleFuncForm, Scales}; diff --git a/pineappl_cli/src/orders.rs b/pineappl_cli/src/orders.rs index 543f268c..7000d738 100644 --- a/pineappl_cli/src/orders.rs +++ b/pineappl_cli/src/orders.rs @@ -2,7 +2,7 @@ use super::helpers::{self, ConvFuns, ConvoluteMode}; use super::{GlobalConfiguration, Subcommand}; use anyhow::Result; use clap::{Parser, ValueHint}; -use prettytable::{cell, Row}; +use prettytable::{Row, cell}; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/plot.rs b/pineappl_cli/src/plot.rs index d75bce3d..ba5745fd 100644 --- a/pineappl_cli/src/plot.rs +++ b/pineappl_cli/src/plot.rs @@ -6,7 +6,7 @@ use clap::{Parser, ValueHint}; use itertools::Itertools; use pineappl::boc::Channel; use pineappl::grid::Grid; -use rayon::{prelude::*, ThreadPoolBuilder}; +use rayon::{ThreadPoolBuilder, prelude::*}; use std::fmt::Write; use std::num::NonZeroUsize; use std::ops::Range; diff --git a/pineappl_cli/src/pull.rs b/pineappl_cli/src/pull.rs index 2b270199..0b40586d 100644 --- a/pineappl_cli/src/pull.rs +++ b/pineappl_cli/src/pull.rs @@ -3,8 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{Error, Result}; use clap::{Parser, ValueHint}; use lhapdf::{Pdf, PdfSet}; -use prettytable::{cell, Row}; -use rayon::{prelude::*, ThreadPoolBuilder}; +use prettytable::{Row, cell}; +use rayon::{ThreadPoolBuilder, prelude::*}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/read.rs b/pineappl_cli/src/read.rs index b9dd216d..e3f35e7b 100644 --- a/pineappl_cli/src/read.rs +++ b/pineappl_cli/src/read.rs @@ -5,7 +5,7 @@ use clap::{Args, Parser, ValueHint}; use itertools::Itertools; use pineappl::boc::Order; use pineappl::fk_table::FkTable; -use prettytable::{cell, row, Row}; +use prettytable::{Row, cell, row}; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/uncert.rs b/pineappl_cli/src/uncert.rs index 33b795a4..1a21127c 100644 --- a/pineappl_cli/src/uncert.rs +++ b/pineappl_cli/src/uncert.rs @@ -3,8 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{Error, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{Args, Parser, ValueHint}; -use prettytable::{cell, Row}; -use rayon::{prelude::*, ThreadPoolBuilder}; +use prettytable::{Row, cell}; +use rayon::{ThreadPoolBuilder, prelude::*}; use std::num::NonZeroUsize; use std::path::PathBuf; use std::process::ExitCode; diff --git a/pineappl_cli/src/write.rs b/pineappl_cli/src/write.rs index 5da5d6ce..6590da0a 100644 --- a/pineappl_cli/src/write.rs +++ b/pineappl_cli/src/write.rs @@ -3,8 +3,8 @@ use super::{GlobalConfiguration, Subcommand}; use anyhow::{Context, Result}; use clap::builder::{PossibleValuesParser, TypedValueParser}; use clap::{ - value_parser, Arg, ArgAction, ArgMatches, Args, Command, Error, FromArgMatches, Parser, - ValueHint, + Arg, ArgAction, ArgMatches, Args, Command, Error, FromArgMatches, Parser, ValueHint, + value_parser, }; use pineappl::boc::{Bin, BinsWithFillLimits, Channel, Order}; use pineappl::fk_table::{FkAssumptions, FkTable}; diff --git a/pineappl_cli/tests/write.rs b/pineappl_cli/tests/write.rs index 3da52717..d1d641f3 100644 --- a/pineappl_cli/tests/write.rs +++ b/pineappl_cli/tests/write.rs @@ -1,7 +1,7 @@ #![allow(missing_docs)] use assert_cmd::Command; -use assert_fs::{fixture::FileWriteStr, NamedTempFile}; +use assert_fs::{NamedTempFile, fixture::FileWriteStr}; const HELP_STR: &str = "Write a grid modified by various operations diff --git a/pineappl_py/tests/conftest.py b/pineappl_py/tests/conftest.py index c6e7d621..22bebdf7 100644 --- a/pineappl_py/tests/conftest.py +++ b/pineappl_py/tests/conftest.py @@ -1,6 +1,5 @@ import numpy as np import pytest -import subprocess from typing import List from pineappl.boc import ( @@ -199,6 +198,7 @@ def fake_grids(): def download_objects(tmp_path_factory): def _download_fk(objname: str) -> None: import os + path = os.path.abspath(f"../test-data/{objname}") return path From 988189efa489df876c1520596b09b1a50a83a808 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Tue, 31 Mar 2026 14:32:45 +0200 Subject: [PATCH 03/10] Add more documentations --- pineappl/src/boc.rs | 30 ++++++------ pineappl/src/convolutions.rs | 18 +++---- pineappl/src/error.rs | 4 +- pineappl/src/fk_table.rs | 3 +- pineappl/src/grid.rs | 27 +++++------ pineappl/src/interpolation.rs | 51 +++++++++++--------- pineappl/src/packed_array.rs | 26 +++++----- pineappl/src/pids.rs | 13 ++--- pineappl/src/reference.rs | 28 +++++------ pineappl/src/subgrid.rs | 14 +++--- pineappl_capi/src/lib.rs | 72 +++++++++++++++------------- pineappl_py/src/boc.rs | 90 ++++++++++++++++++----------------- pineappl_py/src/fk_table.rs | 36 +++++++------- pineappl_py/src/grid.rs | 89 +++++++++++++++++----------------- pineappl_py/src/subgrid.rs | 10 ++-- 15 files changed, 265 insertions(+), 246 deletions(-) diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index 17f10262..73572cc0 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -786,7 +786,7 @@ impl Order { /// /// # Example /// - /// In the case of Drell—Yan, there are the following orders: + /// In the case of Drell-Yan, there are the following orders: /// /// - exactly one leading order (LO), /// - two next-to-leading orders (NLO), which are @@ -795,7 +795,7 @@ impl Order { /// - three next-to-next-to-leading orders (NNLO), /// - the NNLO QCD, /// - the NNLO EW, and finally - /// - the mixed NNLO QCD—EW. + /// - the mixed NNLO QCD-EW. /// /// ```rust /// use pineappl::boc::Order; @@ -805,7 +805,7 @@ impl Order { /// Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 /// Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 /// Order::new(2, 2, 0, 0, 0), // NNLO QCD : alphas^2 alpha^2 - /// Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + /// Order::new(1, 3, 0, 0, 0), // NNLO QCD-EW : alphas alpha^3 /// Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 /// ]; /// @@ -850,11 +850,11 @@ impl Order { /// /// let orders = [ /// Order::new(2, 0, 0, 0, 0), // LO QCD : alphas^2 - /// Order::new(1, 1, 0, 0, 0), // LO QCD—EW : alphas alpha + /// Order::new(1, 1, 0, 0, 0), // LO QCD-EW : alphas alpha /// Order::new(0, 2, 0, 0, 0), // LO EW : alpha^2 /// Order::new(3, 0, 0, 0, 0), // NLO QCD : alphas^3 - /// Order::new(2, 1, 0, 0, 0), // NLO QCD—EW : alphas^2 alpha - /// Order::new(1, 2, 0, 0, 0), // NLO QCD—EW : alphas alpha^2 + /// Order::new(2, 1, 0, 0, 0), // NLO QCD-EW : alphas^2 alpha + /// Order::new(1, 2, 0, 0, 0), // NLO QCD-EW : alphas alpha^2 /// Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 /// ]; /// @@ -1137,7 +1137,8 @@ impl Channel { /// /// # Panics /// - /// TODO + /// Panics if any luminosity coefficient is not comparable (for example NaN), or if the + /// channel has no entries (should not occur for a well-formed grid). #[must_use] pub fn factor(&self) -> (f64, Self) { let factor = self @@ -1296,13 +1297,12 @@ mod tests { #[test] fn order_create_mask() { - // Drell—Yan orders let orders = [ Order::new(0, 2, 0, 0, 0), // LO : alpha^2 Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 Order::new(2, 2, 0, 0, 0), // NNLO QCD : alphas^2 alpha^2 - Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(1, 3, 0, 0, 0), // NNLO QCD-EW : alphas alpha^3 Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 ]; @@ -1374,16 +1374,16 @@ mod tests { // Top-pair production orders let orders = [ Order::new(2, 0, 0, 0, 0), // LO QCD : alphas^2 - Order::new(1, 1, 0, 0, 0), // LO QCD—EW : alphas alpha + Order::new(1, 1, 0, 0, 0), // LO QCD-EW : alphas alpha Order::new(0, 2, 0, 0, 0), // LO EW : alpha^2 Order::new(3, 0, 0, 0, 0), // NLO QCD : alphas^3 - Order::new(2, 1, 0, 0, 0), // NLO QCD—EW : alphas^2 alpha - Order::new(1, 2, 0, 0, 0), // NLO QCD—EW : alphas alpha^2 + Order::new(2, 1, 0, 0, 0), // NLO QCD-EW : alphas^2 alpha + Order::new(1, 2, 0, 0, 0), // NLO QCD-EW : alphas alpha^2 Order::new(0, 3, 0, 0, 0), // NLO EW : alpha^3 Order::new(4, 0, 0, 0, 0), // NNLO QCD : alphas^4 - Order::new(3, 1, 0, 0, 0), // NNLO QCD—EW : alphas^3 alpha - Order::new(2, 2, 0, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 - Order::new(1, 3, 0, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(3, 1, 0, 0, 0), // NNLO QCD-EW : alphas^3 alpha + Order::new(2, 2, 0, 0, 0), // NNLO QCD-EW : alphas^2 alpha^2 + Order::new(1, 3, 0, 0, 0), // NNLO QCD-EW : alphas alpha^3 Order::new(0, 4, 0, 0, 0), // NNLO EW : alpha^4 ]; diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index c13b363b..f6ca752a 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -34,9 +34,9 @@ impl<'a> ConvolutionCache<'a> { /// Construct a new convolution cache. /// /// - `convolutions` describes each convolution function (PDF/FF type and hadron PID). - /// - `xfx` provides one callback per convolution, used to evaluate `x f(x, μ^2)` for - /// a given PID, `x`, and scale `μ^2`. - /// - `alphas` provides a callback to evaluate `μ^2`. + /// - `xfx` provides one callback per convolution, used to evaluate `x * f(x, Q2)` for + /// a given PID, `x`, and squared scale `Q2`. + /// - `alphas` provides a callback given `Q2` (squared renormalization scale). /// /// The cache is filled lazily as [`Grid`] convolution is performed. pub fn new( @@ -178,8 +178,9 @@ impl<'a> ConvolutionCache<'a> { /// A convolution cache configured for a specific [`Grid`]. /// /// This is a lightweight adaptor around [`ConvolutionCache`] that precomputes the bookkeeping -/// needed to evaluate PDF/FF factors and `α` at the scales required by a particular grid and -/// subgrid. It is created internally by [`ConvolutionCache`] when convolving a grid. +/// needed to evaluate PDF/FF factors and the strong coupling at the scales required by a +/// particular grid and subgrid. It is created internally by [`ConvolutionCache`] when convolving +/// a grid. pub struct GridConvCache<'a, 'b> { cache: &'b mut ConvolutionCache<'a>, perm: Vec<(usize, bool)>, @@ -190,10 +191,11 @@ pub struct GridConvCache<'a, 'b> { } impl GridConvCache<'_, '_> { - /// Compute `αs` and convolution-function products for a given tuple of indices. + /// Compute `alpha_s` and convolution-function products for a given tuple of indices. /// /// The returned value is the product of all requested convolution functions evaluated at the - /// subgrid's `x` nodes and the appropriate scale(s), multiplied by `αs` raised to `as_order`. + /// subgrid's `x` nodes and the appropriate scale(s), multiplied by `alpha_s` raised to + /// `as_order`. /// /// # Panics /// @@ -201,7 +203,7 @@ impl GridConvCache<'_, '_> { /// moment the implementation assumes that: /// - `indices[0..x_start]` correspond to scale indices (starting with a factorization-scale /// dimension), and - /// - the remaining indices correspond to the \(x\) dimensions in the same order as `pdg_ids`. + /// - the remaining indices correspond to the `x` dimensions in the same order as `pdg_ids`. /// /// This restriction is tracked in the codebase (see the TODO in the implementation). pub fn as_fx_prod(&mut self, pdg_ids: &[i32], as_order: u8, indices: &[usize]) -> f64 { diff --git a/pineappl/src/error.rs b/pineappl/src/error.rs index 33270456..b6d7f1f4 100644 --- a/pineappl/src/error.rs +++ b/pineappl/src/error.rs @@ -1,4 +1,4 @@ -//! TODO +//! Error types for the `pineappl` library. use thiserror::Error; @@ -13,5 +13,5 @@ pub enum Error { Other(#[from] anyhow::Error), } -/// TODO +/// Convenient result type using [`Error`]. pub type Result = std::result::Result; diff --git a/pineappl/src/fk_table.rs b/pineappl/src/fk_table.rs index aa42e0bc..73e644ec 100644 --- a/pineappl/src/fk_table.rs +++ b/pineappl/src/fk_table.rs @@ -124,7 +124,8 @@ impl FkTable { /// /// # Panics /// - /// TODO + /// Panics if a subgrid node value is missing from the global `x` grid or if internal index + /// layout assumptions are violated (indicates a corrupted or inconsistent FK table). #[must_use] pub fn table(&self) -> ArrayD { let x_grid = self.x_grid(); diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 21080e3c..b8c540ef 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -156,13 +156,13 @@ impl Grid { } } - /// TODO + /// Optional reference cross sections and uncertainties carried with this grid. #[must_use] pub const fn reference(&self) -> &Reference { &self.reference } - /// TODO + /// Replace the attached [`Reference`] data (caller should keep layout consistent with bins). pub fn set_reference(&mut self, reference: Reference) { // TODO: check that the number of bins and channels is consistent between the grid and // `reference` @@ -201,7 +201,9 @@ impl Grid { /// /// # Panics /// - /// TODO + /// Panics if [`ConvolutionCache::new_grid_conv_cache`] cannot match the grid convolutions to + /// the cache, if non-empty `order_mask` or `channel_mask` slices are shorter than the number of + /// orders or channels respectively, or if PDF lookup inside the cache panics. pub fn convolve( &self, cache: &mut ConvolutionCache, @@ -288,7 +290,8 @@ impl Grid { /// /// # Panics /// - /// TODO + /// In debug builds, panics if `ntuple.len()` differs from the number of interpolations. Filling + /// an [`ImportSubgridV1`](crate::subgrid::ImportSubgridV1) always panics because it is read-only. pub fn fill( &mut self, order: usize, @@ -655,10 +658,6 @@ impl Grid { /// Scales each subgrid by a factor which is the product of the given values `alphas`, `alpha`, /// `logxir`, and `logxif`, each raised to the corresponding powers for each subgrid. In /// addition, every subgrid is scaled by a factor `global` independently of its order. - /// - /// # Panics - /// - /// TODO pub fn scale_by_order( &mut self, alphas: f64, @@ -721,11 +720,11 @@ impl Grid { self.subgrids.view_mut() } - /// TODO + /// Replace bin definitions while keeping the same number of bins as the existing grid. /// /// # Errors /// - /// TODO + /// Returns [`Error::General`] if `bwfl` has a different number of bins than this grid. pub fn set_bwfl(&mut self, bwfl: BinsWithFillLimits) -> Result<()> { let bins = bwfl.len(); let grid_bins = self.bwfl().len(); @@ -741,7 +740,7 @@ impl Grid { Ok(()) } - /// TODO + /// Bin limits, fill limits, and per-bin normalizations. #[must_use] pub const fn bwfl(&self) -> &BinsWithFillLimits { &self.bwfl @@ -984,11 +983,7 @@ impl Grid { &self.metadata } - /// Return the metadata of this grid. - /// - /// # Panics - /// - /// TODO + /// Mutable access to string metadata key-value pairs stored in the grid file header. #[must_use] pub const fn metadata_mut(&mut self) -> &mut BTreeMap { &mut self.metadata diff --git a/pineappl/src/interpolation.rs b/pineappl/src/interpolation.rs index 093aa13b..8ba75ab9 100644 --- a/pineappl/src/interpolation.rs +++ b/pineappl/src/interpolation.rs @@ -61,35 +61,36 @@ fn lagrange_weights(i: usize, n: usize, u: f64) -> f64 { product / convert::f64_from_usize(factorials) } -/// TODO +/// How node weights are adjusted before accumulating into the sparse grid. #[repr(C)] #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum ReweightMeth { - /// TODO + /// APPLgrid-style reweighting in `x` (see internal `applgrid::reweight_x`). ApplGridX, - /// TODO + /// No multiplicative reweighting (factor 1). NoReweight, } -/// TODO +/// Map between physical variable `x` and internal interpolation coordinate `y`. #[repr(C)] #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum Map { - /// TODO + /// Momentum fraction map used by APPLgrid (`fy2` / `fx2`). ApplGridF2, - /// TODO + /// Scale map using `ln ln(Q2/0.0625)` and its inverse. ApplGridH0, } -/// TODO +/// Interpolation method along one dimension. #[repr(C)] #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum InterpMeth { - /// TODO + /// Lagrange interpolation through `order + 1` consecutive nodes. Lagrange, } -/// TODO +/// One-dimensional interpolation specification: mapped range, node count, polynomial order, +/// and method. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Interp { min: f64, @@ -116,7 +117,7 @@ impl PartialEq for Interp { impl Eq for Interp {} impl Interp { - /// TODO + /// Build an interpolator from physical `min`..=`max`, node count, polynomial `order`, and options. /// /// # Panics /// @@ -172,7 +173,7 @@ impl Interp { convert::f64_from_usize(index).mul_add(self.deltay(), self.min) } - /// TODO + /// Multiplicative weight applied at abscissa `x` before adding to the grid. #[must_use] pub fn reweight(&self, x: f64) -> f64 { match self.reweight { @@ -181,7 +182,9 @@ impl Interp { } } - /// TODO + /// Map `x` to the index of the first contributing node and a fractional offset in `y` space. + /// + /// Returns `None` if `x` maps outside the interpolated interval. #[must_use] pub fn interpolate(&self, x: f64) -> Option<(usize, f64)> { let y = self.map_x_to_y(x); @@ -206,7 +209,7 @@ impl Interp { } } - /// TODO + /// Lagrange weights for each of the `order + 1` nodes in the current stencil, given `fraction` in `y`. #[must_use] pub fn node_weights(&self, fraction: f64) -> ArrayVec { (0..=self.order) @@ -216,13 +219,13 @@ impl Interp { .collect() } - /// TODO + /// Polynomial interpolation order (number of intervals spanned is `order + 1` nodes). #[must_use] pub const fn order(&self) -> usize { self.order } - /// TODO + /// Node positions in physical `x` for this dimension. #[must_use] pub fn node_values(&self) -> Vec { if self.nodes == 1 { @@ -248,43 +251,43 @@ impl Interp { } } - /// TODO + /// Number of support nodes along this dimension. #[must_use] pub const fn nodes(&self) -> usize { self.nodes } - /// TODO + /// Smallest physical `x` covered by the node set (after mapping and ordering). #[must_use] pub fn min(&self) -> f64 { self.map_y_to_x(self.min).min(self.map_y_to_x(self.max)) } - /// TODO + /// Largest physical `x` covered by the node set (after mapping and ordering). #[must_use] pub fn max(&self) -> f64 { self.map_y_to_x(self.min).max(self.map_y_to_x(self.max)) } - /// TODO + /// Coordinate map between `x` and internal `y`. #[must_use] pub const fn map(&self) -> Map { self.map } - /// TODO + /// Interpolation kernel (currently only Lagrange). #[must_use] pub const fn interp_meth(&self) -> InterpMeth { self.interp_meth } - /// TODO + /// Reweighting mode in `x`. #[must_use] pub const fn reweight_meth(&self) -> ReweightMeth { self.reweight } - /// TODO + /// Restrict to the node indices in `range` (inclusive start, exclusive end in node index space). #[must_use] pub fn sub_interp(&self, range: Range) -> Self { Self { @@ -299,7 +302,9 @@ impl Interp { } } -/// TODO +/// Add `weight` into `array` at the stencil given by `interps` and `ntuple`. +/// +/// Returns `false` if `weight` is zero or if any dimension maps outside its range. pub fn interpolate( interps: &[Interp], ntuple: &[f64], diff --git a/pineappl/src/packed_array.rs b/pineappl/src/packed_array.rs index 52b14184..6376dc1e 100644 --- a/pineappl/src/packed_array.rs +++ b/pineappl/src/packed_array.rs @@ -7,8 +7,10 @@ use std::mem; use std::ops::{Index, IndexMut, MulAssign}; /// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not -/// stored to save space. Instead, adjacent non-default elements are grouped together and the index -/// of their first element (`start_index`) and the length of the group (`lengths`) is stored. +/// stored to save space. +/// +/// Instead, adjacent non-default elements are grouped together and the index of their first +/// element (`start_index`) and the length of the group (`lengths`) is stored. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct PackedArray { /// The actual values stored in the array. The length of `entries` is always the sum of the @@ -37,7 +39,7 @@ impl PackedArray { /// Returns `true` if the array contains no element. #[must_use] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.entries.is_empty() } @@ -57,7 +59,7 @@ impl PackedArray { /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in /// units of `f64`. #[must_use] - pub fn overhead(&self) -> usize { + pub const fn overhead(&self) -> usize { ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) / mem::size_of::() } @@ -77,7 +79,7 @@ impl PackedArray { self.entries.iter().filter(|x| **x != T::default()).count() } - /// TODO + /// Iterator over non-default stored elements as `(multi_index, value)`. pub fn indexed_iter(&self) -> impl Iterator, T)> + '_ { self.start_indices .iter() @@ -90,11 +92,13 @@ impl PackedArray { .map(|(indices, entry)| (indices, *entry)) } - /// TODO + /// Flat index into `self` for a sub-block starting at `start_index` with shape `fill_shape`, + /// where `i` is the linear index inside the sub-block (row-major). /// /// # Panics /// - /// TODO + /// Panics if `start_index.len() != fill_shape.len()`, if `i` is out of range for `fill_shape`, + /// or if the computed multi-index is out of bounds for `self.shape()`. // TODO: rewrite this method into `sub_block_iter_mut() -> impl Iterator` #[must_use] pub fn sub_block_idx( @@ -170,11 +174,11 @@ impl From> for PackedArray { } } -/// Converts a `multi_index` into a flat index. +/// Converts a `multi_index` into a flat index (row-major). /// /// # Panics /// -/// TODO +/// Panics if `multi_index` and `shape` have different lengths. #[must_use] pub fn ravel_multi_index(multi_index: &[usize], shape: &[usize]) -> usize { assert_eq!(multi_index.len(), shape.len()); @@ -185,11 +189,11 @@ pub fn ravel_multi_index(multi_index: &[usize], shape: &[usize]) -> usize { .fold(0, |acc, (i, d)| acc * d + i) } -/// TODO +/// Converts a flat `index` into a multi-index for `shape` (row-major). /// /// # Panics /// -/// TODO +/// Panics if `index` is not strictly less than the product of `shape`. #[must_use] pub fn unravel_index(mut index: usize, shape: &[usize]) -> Vec { assert!(index < shape.iter().product()); diff --git a/pineappl/src/pids.rs b/pineappl/src/pids.rs index db64a8a7..33eebf25 100644 --- a/pineappl/src/pids.rs +++ b/pineappl/src/pids.rs @@ -1,4 +1,4 @@ -//! TODO +//! Particle ID basis handling, charge conjugation, and channel translation for FK evolution. use super::boc::Channel; use super::error::{Error, Result}; @@ -104,7 +104,7 @@ impl PidBasis { } } - /// TODO + /// Express `channel` in the target basis `to`, mapping PDG and evolution IDs as needed. #[must_use] pub fn translate(&self, to: Self, channel: Channel) -> Channel { match (self, to) { @@ -114,7 +114,7 @@ impl PidBasis { } } - /// TODO + /// Return symmetry rules used when optimizing FK tables for the given `assumptions`. #[must_use] pub fn opt_rules(&self, assumptions: FkAssumptions) -> OptRules { match (*self, assumptions) { @@ -444,9 +444,10 @@ pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { } } -/// Given `tuples` representing a linear combination of PDG MC IDs, return a PID for the -/// [`PidBasis::Evol`] basis. The order of each tuple in `tuples` is not relevant. If the inversion -/// is not possible, `None` is returned. +/// Given `tuples` representing a linear combination of PDG MC IDs. +/// +/// This return a PID for the [`PidBasis::Evol`] basis. The order of each tuple in `tuples` is not +/// relevant. If the inversion is not possible, `None` is returned. #[must_use] pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { let mut tuples = tuples.to_vec(); diff --git a/pineappl/src/reference.rs b/pineappl/src/reference.rs index b479cf2b..73fe7c6d 100644 --- a/pineappl/src/reference.rs +++ b/pineappl/src/reference.rs @@ -1,43 +1,43 @@ -//! TODO +//! Optional reference cross sections and uncertainties stored alongside a [`crate::grid::Grid`]. use serde::{Deserialize, Serialize}; -/// Absolute reference result for a single bin. +/// Absolute reference cross section for a single bin. #[derive(Clone, Deserialize, Serialize)] pub enum AbsRefRes { - /// TODO + /// One value per bin. ByBin(f64), - /// TODO + /// One value per bin and perturbative order. ByBinOrder(Vec), - /// TODO + /// One value per bin and luminosity channel. ByBinChannel(Vec), - /// TODO + /// Values for each bin, order, and channel. ByBinOrderChannel(Vec>), } /// Relative reference uncertainty for a single bin. #[derive(Clone, Deserialize, Serialize)] pub enum RelRefUnc { - /// TODO + /// Relative uncertainty per bin. ByBin(f64), - /// TODO + /// Per-order uncertainties combined with `CombOp`. ByBinOrder(Vec, CombOp), - /// TODO + /// Per-channel uncertainties combined with `CombOp`. ByBinChannel(Vec, CombOp), - /// TODO + /// Per-order and channel uncertainties combined with `CombOp`. ByBinOrderChannel(Vec>, CombOp), } -/// TODO +/// How to combine several relative uncertainties into one. #[derive(Clone, Deserialize, Serialize)] pub enum CombOp { - /// TODO + /// Linear sum of relative terms. Sum, - /// TODO + /// Square root of the sum of squares (in quadrature). Quadrature, } -/// TODO +/// Reference results and convolution-function labels for validation or plotting. #[derive(Clone, Default, Deserialize, Serialize)] pub struct Reference { ref_res_unc: Vec<(AbsRefRes, RelRefUnc)>, diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index c09968de..9ebc7437 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -8,13 +8,13 @@ use itertools::izip; use serde::{Deserialize, Serialize}; use std::{iter, mem}; -/// TODO +/// Compare two node values for equality up to a fixed float tolerance. #[must_use] pub fn node_value_eq(lhs: f64, rhs: f64) -> bool { approx_eq!(f64, lhs, rhs, ulps = 4096) } -/// TODO +/// Like [`node_value_eq`], taking mutable references for use with `dedup_by`. #[must_use] pub fn node_value_eq_ref_mut(lhs: &mut f64, rhs: &mut f64) -> bool { node_value_eq(*lhs, *rhs) @@ -69,7 +69,7 @@ impl Subgrid for EmptySubgridV1 { fn optimize_nodes(&mut self) {} } -/// TODO +/// Dense imported subgrid backed by a [`PackedArray`] and explicit node coordinates per dimension. #[derive(Clone, Deserialize, Serialize)] pub struct ImportSubgridV1 { array: PackedArray, @@ -408,12 +408,12 @@ pub enum SubgridEnum { InterpSubgridV1, /// Empty subgrid. EmptySubgridV1, - /// TODO + /// Imported sparse layout with arbitrary node grids per dimension. ImportSubgridV1, } impl SubgridEnum { - /// TODO + /// Merge `other` into `self`, optionally swapping two convolution dimensions. pub fn merge(&mut self, other: &Self, transpose: Option<(usize, usize)>) { if other.is_empty() { return; @@ -458,7 +458,7 @@ pub struct Stats { /// Trait each subgrid must implement. #[enum_dispatch] pub trait Subgrid { - /// TODO + /// Node coordinates for each kinematic dimension (same order as the grid kinematics). fn node_values(&self) -> Vec>; /// Fill the subgrid with `weight` that is being interpolated with `interps` using the @@ -489,7 +489,7 @@ pub trait Subgrid { /// Return statistics for this subgrid. fn stats(&self) -> Stats; - /// TODO + /// Try to collapse a static scale dimension into fewer nodes where possible. fn optimize_nodes(&mut self); } diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 5f5568db..7979187e 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -75,22 +75,22 @@ use std::os::raw::{c_char, c_void}; use std::path::Path; use std::slice; -/// TODO +/// Select subgrid type optimization when passed to grid optimization (see `GridOptFlags::OPTIMIZE_SUBGRID_TYPE`). pub const PINEAPPL_GOF_OPTIMIZE_SUBGRID_TYPE: GridOptFlags = GridOptFlags::OPTIMIZE_SUBGRID_TYPE; -/// TODO +/// Select static-scale / node optimization (see `GridOptFlags::OPTIMIZE_NODES`). pub const PINEAPPL_GOF_OPTIMIZE_NODES: GridOptFlags = GridOptFlags::OPTIMIZE_NODES; -/// TODO +/// Merge transposed channels when convolutions coincide (see `GridOptFlags::SYMMETRIZE_CHANNELS`). pub const PINEAPPL_GOF_SYMMETRIZE_CHANNELS: GridOptFlags = GridOptFlags::SYMMETRIZE_CHANNELS; -/// TODO +/// Drop orders whose subgrids are all empty (see `GridOptFlags::STRIP_EMPTY_ORDERS`). pub const PINEAPPL_GOF_STRIP_EMPTY_ORDERS: GridOptFlags = GridOptFlags::STRIP_EMPTY_ORDERS; -/// TODO +/// Merge channels with identical parton content (see `GridOptFlags::MERGE_SAME_CHANNELS`). pub const PINEAPPL_GOF_MERGE_SAME_CHANNELS: GridOptFlags = GridOptFlags::MERGE_SAME_CHANNELS; -/// TODO +/// Remove channels with only empty subgrids (see `GridOptFlags::STRIP_EMPTY_CHANNELS`). pub const PINEAPPL_GOF_STRIP_EMPTY_CHANNELS: GridOptFlags = GridOptFlags::STRIP_EMPTY_CHANNELS; // TODO: make sure no `panic` calls leave functions marked as `extern "C"` @@ -763,7 +763,8 @@ pub unsafe extern "C" fn pineappl_grid_order_count(grid: *const Grid) -> usize { /// /// # Panics /// -/// TODO +/// Panics if bin limits, order parameters, or metadata cannot be converted into a valid `Grid` +/// (invalid strings, parse errors, or inconsistent state); the C API aborts on these errors. #[deprecated(since = "1.0.0", note = "use `pineappl_grid_new2` instead")] #[unsafe(no_mangle)] #[must_use] @@ -837,7 +838,7 @@ pub unsafe extern "C" fn pineappl_grid_new( /// /// # Panics /// -/// TODO +/// Panics if the file cannot be opened or the grid cannot be deserialized. #[unsafe(no_mangle)] #[must_use] pub unsafe extern "C" fn pineappl_grid_read(filename: *const c_char) -> Box { @@ -858,7 +859,7 @@ pub unsafe extern "C" fn pineappl_grid_read(filename: *const c_char) -> Box>) { if let Some(other) = other { @@ -1012,8 +1013,9 @@ pub unsafe extern "C" fn pineappl_grid_scale_by_bin( } /// Scales each subgrid by a factor which is the product of the given values `alphas`, `alpha`, -/// `logxir`, and `logxif`, each raised to the corresponding powers for each subgrid. In addition, -/// every subgrid is scaled by a factor `global` independently of its order. +/// `logxir`, and `logxif`, each raised to the corresponding powers for each subgrid. +/// +/// In addition, every subgrid is scaled by a factor `global` independently of its order. /// /// # Safety /// @@ -1044,7 +1046,7 @@ pub unsafe extern "C" fn pineappl_grid_scale_by_order( /// /// # Panics /// -/// TODO +/// Panics if constructing the returned C string fails (for example embedded null bytes in metadata). #[deprecated(since = "1.0.0", note = "use `pineappl_grid_metadata` instead")] #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_key_value( @@ -1083,7 +1085,7 @@ pub unsafe extern "C" fn pineappl_grid_key_value( /// /// # Panics /// -/// TODO +/// Panics if initial-state metadata strings cannot be parsed as integers when updating convolutions. #[deprecated(since = "1.0.0", note = "use `pineappl_grid_set_metadata` instead")] #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_set_key_value( @@ -1127,7 +1129,7 @@ pub unsafe extern "C" fn pineappl_grid_set_key_value( /// /// # Panics /// -/// TODO +/// Panics if the constructed bins are inconsistent with the grid or `set_bwfl` rejects them. #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_set_remapper( grid: *mut Grid, @@ -1174,7 +1176,7 @@ pub unsafe extern "C" fn pineappl_grid_set_remapper( /// /// # Panics /// -/// TODO +/// Panics if the output file cannot be created or writing the grid fails. #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_write(grid: *const Grid, filename: *const c_char) { let grid = unsafe { &*grid }; @@ -1504,19 +1506,19 @@ pub struct Channels { /// Type for defining the interpolation object #[repr(C)] pub struct Interp { - /// TODO + /// Physical lower edge of the interpolation range (before internal mapping). pub min: f64, - /// TODO + /// Physical upper edge of the interpolation range (before internal mapping). pub max: f64, - /// TODO + /// Number of support nodes. pub nodes: usize, - /// TODO + /// Polynomial interpolation order. pub order: usize, - /// TODO + /// Reweighting mode in the physical variable. pub reweight: ReweightMeth, - /// TODO + /// Map between physical variable and internal coordinate. pub map: Map, - /// TODO + /// Interpolation kernel (Lagrange, ...). pub interp_meth: InterpMeth, } @@ -1656,11 +1658,15 @@ pub extern "C" fn pineappl_channels_delete(channels: Option>) {} /// /// # Safety /// -/// TODO +/// `bin_limits` must point to at least `bins + 1` floats. `order_params` must point to `5 * orders` +/// bytes. `channels` must be a valid `Channels` pointer. `convolutions` must point to as many +/// `Conv` values as recorded in `channels`. `interps` must point to `interpolations` elements. +/// `kinematics` must point to one entry per interpolation. `scales` must point to three +/// `ScaleFuncForm` values (ren, fac, frg). Any mismatch is undefined behavior. /// /// # Panics /// -/// TODO +/// Panics if bin limits, orders, or internal `Grid::new` validation fail (unwraps in the Rust layer). #[unsafe(no_mangle)] #[must_use] pub unsafe extern "C" fn pineappl_grid_new2( @@ -2023,11 +2029,12 @@ pub unsafe extern "C" fn pineappl_grid_convolve( /// /// # Safety /// -/// TODO +/// `grid` must be a valid grid pointer. `xfx` must be a valid function pointer obeying the same +/// calling convention; `state` is passed through to `xfx` and may be null if unused. /// /// # Panics /// -/// TODO +/// Panics if `fix_convolution` fails in the Rust API (for example invalid `conv_idx`). #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_fix_convolution( grid: *const Grid, @@ -2049,7 +2056,8 @@ pub unsafe extern "C" fn pineappl_grid_fix_convolution( /// /// # Safety /// -/// TODO +/// `grid` must be valid. `conv_types` must point to writable memory for `grid.convolutions().len()` +/// elements. #[unsafe(no_mangle)] pub unsafe extern "C" fn pineappl_grid_conv_types(grid: *const Grid, conv_types: *mut ConvType) { let grid = unsafe { &*grid }; @@ -2227,7 +2235,7 @@ pub unsafe extern "C" fn pineappl_grid_set_subgrid( /// /// # Panics /// -/// TODO +/// Panics if the new bin layout cannot be built or `set_bwfl` rejects it (for example bin count mismatch). /// /// # Safety /// @@ -2361,7 +2369,7 @@ pub type OperatorCallback = unsafe extern "C" fn( /// * `grid` - A `Grid` object /// * `op_info` - An array of `OperatorInfo` objects containing the information about the evolution. /// * `operator` - A callack that returns the evolution operator. -/// * `max_orders` - The maximum QCD and EW orders `(αs, α)`. +/// * `max_orders` - The maximum QCD and EW orders `(alpha_s, alpha)`. /// * `params_state` - Parameters that get passed to `operator`. /// * `x_in` - The x-grid that defines the Grid. /// * `x_out` - The x-grid that will define the evolved Grid. @@ -2370,7 +2378,7 @@ pub type OperatorCallback = unsafe extern "C" fn( /// * `eko_shape` - The shape of the evolution operator. /// * `xi` - The values that defines that scale variations. /// * `ren` - An array containing the values of the renormalization scale variation. -/// * `alphas` - An array containing the values of `αs`. It must have the same size as `ren1`. +/// * `alphas` - An array containing the values of `alpha_s`. It must have the same size as `ren1`. /// /// # Safety /// diff --git a/pineappl_py/src/boc.rs b/pineappl_py/src/boc.rs index 924f69bb..58c37a36 100644 --- a/pineappl_py/src/boc.rs +++ b/pineappl_py/src/boc.rs @@ -4,7 +4,7 @@ use numpy::{IntoPyArray, PyArray1}; use pineappl::boc::{Bin, BinsWithFillLimits, Channel, Kinematics, Order, ScaleFuncForm, Scales}; use pyo3::prelude::*; -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Bin `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Bin `. #[pyclass(name = "Bin")] #[repr(transparent)] #[derive(Clone)] @@ -18,7 +18,7 @@ impl PyBin { /// /// Parameters /// ---------- - /// bin_limits: list(tuple(float, float)) + /// `bin_limits`: list(tuple(float, float)) /// edges of the bins /// normalization: float /// normalization factor @@ -38,7 +38,7 @@ impl PyBin { /// dimension on which the observable is defined #[getter] #[must_use] - pub fn dimensions(&self) -> usize { + pub const fn dimensions(&self) -> usize { self.bin.dimensions() } @@ -67,7 +67,7 @@ impl PyBin { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Bin `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Bin `. #[pyclass(name = "BinsWithFillLimits")] #[repr(transparent)] #[derive(Clone)] @@ -80,16 +80,14 @@ impl PyBinsWithFillLimits { /// Constructor for `BinsWithFillLimits`. /// /// # Panics - /// TODO /// - /// # Errors - /// TODO + /// Panics if `bins` and `fill_limits` are inconsistent (same rules as the Rust `BinsWithFillLimits::new`). /// /// Parameters /// ---------- /// bins: Bin /// a list containing the bin specifications - /// fill_limits: list(float) + /// `fill_limits`: list(float) /// edges of the bins #[new] #[must_use] @@ -106,11 +104,12 @@ impl PyBinsWithFillLimits { /// Construct the bin specifications using the bin edges. /// /// # Panics - /// TODO + /// + /// Panics if `fill_limits` cannot be converted into valid bins. /// /// Parameters /// ---------- - /// fill_limits: list(float) + /// `fill_limits`: list(float) /// edges of the bins #[must_use] #[staticmethod] @@ -123,7 +122,8 @@ impl PyBinsWithFillLimits { /// Construct the bin specifications using the edges and the normalizations. /// /// # Panics - /// TODO + /// + /// Panics if `limits` and `normalizations` are inconsistent with the Rust constructors. /// /// Parameters /// ---------- @@ -220,7 +220,7 @@ impl PyBinsWithFillLimits { /// Returns /// ------- /// list(list(tuple(float, float))): - /// the bin edges with shape (n_bins, n_dimension, 2) + /// the bin edges with shape (`n_bins`, `n_dimension`, 2) #[must_use] pub fn bin_limits(&self) -> Vec> { self.bins_fill_limits @@ -246,7 +246,7 @@ impl PyBinsWithFillLimits { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Channel `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Channel `. /// /// Each entry consists of a tuple, which contains, in the following order: /// @@ -286,13 +286,13 @@ impl PyChannel { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Kinematics `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Kinematics `. #[pyclass(eq, name = "Kinematics")] #[derive(Clone, PartialEq, Eq)] pub enum PyKinematics { - /// map to Kinematics::Scale + /// map to `Kinematics::Scale` Scale(usize), - /// map to Kinematics::X + /// map to `Kinematics::X` X(usize), } @@ -305,40 +305,40 @@ impl From for Kinematics { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::ScaleFuncForm `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::ScaleFuncForm `. #[pyclass(eq, name = "ScaleFuncForm")] #[derive(Clone, PartialEq, Eq)] pub enum PyScaleFuncForm { - /// map to ScaleFuncForm::NoScale + /// map to `ScaleFuncForm::NoScale` /// NOTE No variant is not supported in complex enums NoScale(usize), - /// map to ScaleFuncForm::Scale + /// map to `ScaleFuncForm::Scale` Scale(usize), - /// map to ScaleFuncForm::QuadraticSum + /// map to `ScaleFuncForm::QuadraticSum` QuadraticSum(usize, usize), - /// map to ScaleFuncForm::QuadraticMean + /// map to `ScaleFuncForm::QuadraticMean` QuadraticMean(usize, usize), - /// map to ScaleFuncForm::QuadraticSumOver4 + /// map to `ScaleFuncForm::QuadraticSumOver4` QuadraticSumOver4(usize, usize), - /// map to ScaleFuncForm::LinearMean + /// map to `ScaleFuncForm::LinearMean` LinearMean(usize, usize), - /// map to ScaleFuncForm::LinearSum + /// map to `ScaleFuncForm::LinearSum` LinearSum(usize, usize), - /// map to ScaleFuncForm::ScaleMax + /// map to `ScaleFuncForm::ScaleMax` ScaleMax(usize, usize), - /// map to ScaleFuncForm::ScaleMin + /// map to `ScaleFuncForm::ScaleMin` ScaleMin(usize, usize), - /// map to ScaleFuncForm::Prod + /// map to `ScaleFuncForm::Prod` Prod(usize, usize), - /// map to ScaleFuncForm::S2plusS1half + /// map to `ScaleFuncForm::S2plusS1half` S2plusS1half(usize, usize), - /// map to ScaleFuncForm::Pow4Sum + /// map to `ScaleFuncForm::Pow4Sum` Pow4Sum(usize, usize), - /// map to ScaleFuncForm::WgtAvg + /// map to `ScaleFuncForm::WgtAvg` WgtAvg(usize, usize), - /// map to ScaleFuncForm::S2plusS1fourth + /// map to `ScaleFuncForm::S2plusS1fourth` S2plusS1fourth(usize, usize), - /// map to ScaleFuncForm::ExpProd2 + /// map to `ScaleFuncForm::ExpProd2` ExpProd2(usize, usize), } @@ -364,7 +364,7 @@ impl From for ScaleFuncForm { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Scales `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Scales `. #[pyclass(name = "Scales")] pub struct PyScales { pub(crate) scales: Scales, @@ -381,6 +381,7 @@ impl PyScales { /// Constructor for `Scales` #[new] #[must_use] + #[allow(clippy::needless_pass_by_value)] pub fn news_scales( ren: PyRef, fac: PyRef, @@ -393,7 +394,7 @@ impl PyScales { } } -/// PyO3 wrapper to :rustdoc:`pineappl::boc::Order `. +/// `PyO3` wrapper to :rustdoc:`pineappl::boc::Order `. #[pyclass(name = "Order")] #[repr(transparent)] pub struct PyOrder { @@ -413,15 +414,15 @@ impl PyOrder { /// Parameters /// ---------- /// alphas : int - /// power of :math:`\alpha_s` + /// power of `alpha_s` /// alpha : int - /// power of :math:`\alpha` + /// power of alpha (electroweak coupling) /// logxir : int - /// power of :math:`\ln(\xi_r)` + /// power of `ln(xi_r)` (renormalization scale log) /// logxif : int - /// power of :math:`\ln(\xi_f)` + /// power of `ln(xi_f)` (factorization scale log) /// logxia : int - /// power of :math:`\ln(\xi_a)` + /// power of `ln(xi_a)` (fragmentation scale log) #[new] #[must_use] pub const fn new_order(alphas: u8, alpha: u8, logxir: u8, logxif: u8, logxia: u8) -> Self { @@ -433,15 +434,15 @@ impl PyOrder { /// Returns /// ------- /// alphas : int - /// power of :math:`\alpha_s` + /// power of `alpha_s` /// alpha : int - /// power of :math:`\alpha` + /// power of alpha (electroweak coupling) /// logxir : int - /// power of :math:`\ln(\xi_r)` + /// power of `ln(xi_r)` /// logxif : int - /// power of :math:`\ln(\xi_f)` + /// power of `ln(xi_f)` /// logxia : int - /// power of :math:`\ln(\xi_a)` + /// power of `ln(xi_a)` #[must_use] pub const fn as_tuple(&self) -> (u8, u8, u8, u8, u8) { ( @@ -468,6 +469,7 @@ impl PyOrder { /// boolean array, to be used as orders' mask #[staticmethod] #[must_use] + #[allow(clippy::needless_pass_by_value)] pub fn create_mask<'py>( orders: Vec>, max_as: u8, diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index d4f85f85..f464f719 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -15,7 +15,7 @@ use std::io::BufReader; use std::path::PathBuf; use std::str::FromStr; -/// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkAssumptions `. +/// `PyO3` wrapper to :rustdoc:`pineappl::fk_table::FkAssumptions `. #[pyclass(name = "FkAssumptions")] #[repr(transparent)] pub struct PyFkAssumptions { @@ -38,7 +38,7 @@ impl PyFkAssumptions { } } -/// PyO3 wrapper to :rustdoc:`pineappl::fk_table::FkTable `. +/// `PyO3` wrapper to :rustdoc:`pineappl::fk_table::FkTable `. #[pyclass(name = "FkTable")] #[repr(transparent)] pub struct PyFkTable { @@ -50,7 +50,8 @@ impl PyFkTable { /// Constructor from an existing grid. /// /// # Panics - /// TODO + /// + /// Panics if `grid` is not a valid FK-table-backed grid (`FkTable::try_from` fails). #[new] #[must_use] pub fn new(grid: PyGrid) -> Self { @@ -62,9 +63,10 @@ impl PyFkTable { /// Read an FK Table from given path. /// /// # Panics - /// TODO /// - /// Parameteters + /// Panics if the file cannot be opened, the grid cannot be read, or it cannot be converted to an FK table. + /// + /// Parameters /// ------------ /// path : str /// path to the FK table @@ -81,9 +83,6 @@ impl PyFkTable { /// Get cross section tensor. /// - /// # Errors - /// TODO - /// /// Returns /// ------- /// numpy.ndarray : @@ -113,7 +112,7 @@ impl PyFkTable { /// Returns /// ------- /// list(list(float)): - /// limits/edges of the bins with shape (n_bins, n_dimension, 2) + /// limits/edges of the bins with shape (`n_bins`, `n_dimension`, 2) #[must_use] pub fn bin_limits(&self) -> Vec> { self.fk_table @@ -201,7 +200,7 @@ impl PyFkTable { /// /// Returns /// ------- - /// x_grid : numpy.ndarray(float) + /// `x_grid` : numpy.ndarray(float) /// interpolation grid #[must_use] pub fn x_grid<'py>(&self, py: Python<'py>) -> Bound<'py, PyArray1> { @@ -222,7 +221,7 @@ impl PyFkTable { /// /// Parameters /// ---------- - /// pid_basis: PyPidBasis + /// `pid_basis`: `PyPidBasis` /// PID basis of the resulting FK Table pub fn rotate_pid_basis(&mut self, pid_basis: PyPidBasis) { self.fk_table.rotate_pid_basis(pid_basis.into()); @@ -292,18 +291,19 @@ impl PyFkTable { /// Convolve the FK table with as many distributions. /// /// # Panics - /// TODO + /// + /// Panics if Python PDF callbacks fail or if the underlying Rust convolution panics. /// /// Parameters /// ---------- - /// pdg_convs : list(PyConv) + /// `pdg_convs` : list(PyConv) /// list containing the types of convolutions and PID /// xfxs : list(callable) /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf - /// bin_indices : numpy.ndarray(int) + /// `bin_indices` : numpy.ndarray(int) /// A list with the indices of the corresponding bins that should be calculated. An /// empty list means that all bins should be calculated. - /// channel_mask : numpy.ndarray(bool) + /// `channel_mask` : numpy.ndarray(bool) /// Mask for selecting specific channels. The value `True` means the /// corresponding channel is included. An empty list corresponds to all channels being /// enabled. @@ -313,6 +313,7 @@ impl PyFkTable { /// numpy.ndarray(float) : /// cross sections for all bins #[must_use] + #[allow(clippy::needless_pass_by_value)] #[pyo3(signature = (pdg_convs, xfxs, bin_indices = None, channel_mask= None))] pub fn convolve<'py>( &self, @@ -357,9 +358,10 @@ impl PyFkTable { /// /// Parameters /// ---------- - /// assumptions : PyFkAssumptions - /// assumptions about the FkTable properties, declared by the user, deciding which + /// assumptions : `PyFkAssumptions` + /// assumptions about the `FkTable` properties, declared by the user, deciding which /// optimizations are possible + #[allow(clippy::needless_pass_by_value)] pub fn optimize(&mut self, assumptions: PyRef) { self.fk_table.optimize(assumptions.fk_assumptions); } diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 34941203..0b5cef9d 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -23,7 +23,7 @@ use std::fs::File; use std::io::BufReader; use std::path::PathBuf; -/// PyO3 wrapper to :rustdoc:`pineappl::grid::Grid `. +/// `PyO3` wrapper to :rustdoc:`pineappl::grid::Grid `. #[pyclass(name = "Grid", subclass)] #[repr(transparent)] #[derive(Clone)] @@ -43,13 +43,13 @@ impl PyGrid { /// /// Parameters /// ---------- - /// pid_basis : PidBasis + /// `pid_basis` : `PidBasis` /// choice of basis which can be `Evol` or `Pdg` /// channels : list(PyChannel) /// channels /// orders : list(PyOrder) /// orders - /// bins : PyBinsWithFillLimits + /// bins : `PyBinsWithFillLimits` /// bin configurations /// convolutions : list(PyConv) /// contains the types of convolution @@ -57,10 +57,11 @@ impl PyGrid { /// types of interpolations required by each kinematic /// kinematics : list(PyKinematics) /// list of kinematics - /// scale_funcs : PyScales + /// `scale_funcs` : `PyScales` /// `Scales` object #[new] #[must_use] + #[allow(clippy::needless_pass_by_value)] pub fn new_grid( pid_basis: PyPidBasis, channels: Vec>, @@ -108,6 +109,7 @@ impl PyGrid { /// list containing information on kinematics /// weight : float /// cross section weight + #[allow(clippy::needless_pass_by_value)] pub fn fill( &mut self, order: usize, @@ -143,6 +145,7 @@ impl PyGrid { /// list of `ntuple` kinematics /// weights : np.array(float) /// cross section weight for all events + #[allow(clippy::needless_pass_by_value)] pub fn fill_array( &mut self, order: usize, @@ -170,6 +173,7 @@ impl PyGrid { /// list containing information on kinematics /// weights : np.array(float) /// cross section weights, one for each channels + #[allow(clippy::needless_pass_by_value)] pub fn fill_all_channels( &mut self, order: usize, @@ -192,8 +196,9 @@ impl PyGrid { /// bin index /// channel : int /// channel index - /// subgrid : PySubgridEnum + /// subgrid : `PySubgridEnum` /// subgrid object + #[allow(clippy::needless_pass_by_value)] pub fn set_subgrid( &mut self, order: usize, @@ -208,7 +213,7 @@ impl PyGrid { /// /// Returns /// ------- - /// PyBinsWithFillLimits: + /// `PyBinsWithFillLimits`: /// a `PyBinsWithFillLimits` object with containing the bin specifications #[must_use] pub fn bwfl(&self) -> PyBinsWithFillLimits { @@ -220,11 +225,12 @@ impl PyGrid { /// Set the bin specifications for this grid. /// /// # Errors - /// TODO + /// + /// Raises `ValueError` if the new bin layout is incompatible with this grid (for example bin count mismatch). /// /// Parameters /// ---------- - /// specs: PyBinsWithFillLimits + /// specs: `PyBinsWithFillLimits` /// the object to define the bin specs pub fn set_bwfl(&mut self, specs: PyBinsWithFillLimits) -> PyResult<()> { match self.grid.set_bwfl(specs.bins_fill_limits) { @@ -269,7 +275,7 @@ impl PyGrid { /// Returns /// ------- /// list(list(float)): - /// limits/edges of the bins with shape (n_bins, n_dimension, 2) + /// limits/edges of the bins with shape (`n_bins`, `n_dimension`, 2) #[must_use] pub fn bin_limits(&self) -> Vec> { self.grid @@ -324,9 +330,6 @@ impl PyGrid { /// Set a metadata key-value pair in the grid. /// - /// # Panics - /// TODO - /// /// Parameters /// ---------- /// key : str @@ -355,23 +358,25 @@ impl PyGrid { /// Convolve the grid with as many distributions. /// /// # Panics - /// TODO + /// + /// Panics if Python callbacks fail, if the convolution cache cannot be matched to the grid, or + /// if mask lengths are inconsistent with the grid when non-empty. /// /// Parameters /// ---------- - /// pdg_convs : list(PyConv) + /// `pdg_convs` : list(PyConv) /// list containing the types of convolutions and PID /// xfxs : list(callable) /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf /// alphas : callable - /// lhapdf like callable with arguments `Q2` returning :math:`\alpha_s` - /// order_mask : numpy.ndarray(bool) + /// LHAPDF-like callable with argument `Q2` returning `alpha_s(Q2)` + /// `order_mask` : numpy.ndarray(bool) /// Mask for selecting specific orders. The value `True` means the corresponding order /// is included. An empty list corresponds to all orders being enabled. - /// bin_indices : numpy.ndarray(int) + /// `bin_indices` : numpy.ndarray(int) /// A list with the indices of the corresponding bins that should be calculated. An /// empty list means that all bins should be calculated. - /// channel_mask : numpy.ndarray(bool) + /// `channel_mask` : numpy.ndarray(bool) /// Mask for selecting specific channels. The value `True` means the /// corresponding channel is included. An empty list corresponds to all channels being /// enabled. @@ -388,6 +393,7 @@ impl PyGrid { /// cross sections for all bins, for each scale-variation tuple (first all bins, then /// the scale variation) #[must_use] + #[allow(clippy::needless_pass_by_value)] #[pyo3(signature = (pdg_convs, xfxs, alphas, order_mask = None, bin_indices = None, channel_mask = None, xi = None))] pub fn convolve<'py>( &self, @@ -438,7 +444,8 @@ impl PyGrid { /// /// # Panics /// - /// TODO + /// Panics if the wrapped Rust `fix_convolution` fails (for example invalid convolution index) + /// or if the PDF callback raises or returns a non-float. /// /// Parameters /// ---------- @@ -467,17 +474,14 @@ impl PyGrid { /// Collect information for convolution with an evolution operator. /// - /// # Panics - /// TODO - /// /// Parameters /// ---------- - /// order_mask : numpy.ndarray(bool) + /// `order_mask` : numpy.ndarray(bool) /// boolean mask to activate orders /// /// Returns /// ------- - /// PyEvolveInfo : + /// `PyEvolveInfo` : /// evolution informations #[must_use] pub fn evolve_info(&self, order_mask: Vec) -> PyEvolveInfo { @@ -500,21 +504,22 @@ impl PyGrid { /// /// Parameters /// ---------- - /// slices : list(Generator(tuple(PyOperatorSliceInfo, PyReadOnlyArray4))) - /// list of EKOs where each element is in turn a list of (PyOperatorSliceInfo, 4D array) - /// order_mask : numpy.ndarray(bool) + /// slices : list(Generator(tuple(PyOperatorSliceInfo, `PyReadOnlyArray4`))) + /// list of EKOs where each element is in turn a list of (`PyOperatorSliceInfo`, 4D array) + /// `order_mask` : numpy.ndarray(bool) /// boolean mask to activate orders /// xi : (float, float) /// factorization and renormalization variation /// ren1 : numpy.ndarray(float) /// list of renormalization scales /// alphas : numpy.ndarray(float) - /// list with :math:`\alpha_s(Q2)` for the process scales + /// list with `alpha_s(Q2)` at the corresponding entries of `ren1` /// /// Returns /// ------- - /// PyFkTable : + /// `PyFkTable` : /// produced FK table + #[allow(clippy::needless_pass_by_value)] pub fn evolve( &self, slices: Vec>, @@ -566,7 +571,7 @@ impl PyGrid { /// /// Returns /// ------- - /// PyGrid : + /// `PyGrid` : /// grid #[must_use] #[staticmethod] @@ -644,9 +649,6 @@ impl PyGrid { /// Merge with another grid. /// - /// # Panics - /// TODO - /// /// # Errors /// /// If the bin limits of `self` and `other` are different and if the bin limits of `other` can @@ -737,7 +739,7 @@ impl PyGrid { /// /// Parameters /// ---------- - /// pid_basis: PyPidBasis + /// `pid_basis`: `PyPidBasis` /// PID basis of the resulting Grid pub fn rotate_pid_basis(&mut self, pid_basis: PyPidBasis) { self.grid.rotate_pid_basis(pid_basis.into()); @@ -760,22 +762,17 @@ impl PyGrid { /// Scale subgrids bin by bin. /// - /// # Panics - /// TODO - /// /// Parameters /// ---------- /// factors : list[float] /// bin-dependent factors by which to scale + #[allow(clippy::needless_pass_by_value)] pub fn scale_by_bin(&mut self, factors: Vec) { self.grid.scale_by_bin(&factors); } /// Scale subgrids by order. /// - /// # Panics - /// TODO - /// /// Parameters /// ---------- /// alphas : float @@ -806,23 +803,22 @@ impl PyGrid { /// /// Parameters /// ---------- - /// order_indices : list[int] + /// `order_indices` : list[int] /// list of indices of orders to be removed + #[allow(clippy::needless_pass_by_value)] pub fn delete_orders(&mut self, order_indices: Vec) { self.grid.delete_orders(&order_indices); } /// Delete bins. /// - /// # Panics - /// TODO - /// /// Repeated bins and those exceeding the length are ignored. /// /// Parameters /// ---------- - /// bin_indices : list[int] + /// `bin_indices` : list[int] /// list of indices of bins to be removed + #[allow(clippy::needless_pass_by_value)] pub fn delete_bins(&mut self, bin_indices: Vec) { self.grid.delete_bins(&bin_indices); } @@ -832,8 +828,9 @@ impl PyGrid { /// /// Parameters /// ---------- - /// bin_indices : list[int] + /// `bin_indices` : list[int] /// list of indices of bins to be removed + #[allow(clippy::needless_pass_by_value)] pub fn delete_channels(&mut self, channel_indices: Vec) { self.grid.delete_channels(&channel_indices); } diff --git a/pineappl_py/src/subgrid.rs b/pineappl_py/src/subgrid.rs index bd8ca740..d7b89a04 100644 --- a/pineappl_py/src/subgrid.rs +++ b/pineappl_py/src/subgrid.rs @@ -6,7 +6,7 @@ use pineappl::packed_array::PackedArray; use pineappl::subgrid::{ImportSubgridV1, Subgrid, SubgridEnum}; use pyo3::prelude::*; -/// PyO3 wrapper to :rustdoc:`pineappl::subgrid::ImportSubgridV1 `. +/// `PyO3` wrapper to :rustdoc:`pineappl::subgrid::ImportSubgridV1 `. #[pyclass(name = "ImportSubgridV1")] #[derive(Clone)] #[repr(transparent)] @@ -19,17 +19,19 @@ impl PyImportSubgridV1 { /// Constructor. /// /// # Panics - /// TODO + /// + /// Panics if the array shape does not match `node_values` or indexing the packed array fails. /// /// Parameters /// ---------- /// array : numpy.ndarray(float) /// `N`-dimensional array with all weights - /// node_values: list(list(float)) + /// `node_values`: list(list(float)) /// list containing the arrays of energy scales {q1, ..., qn} and momentum fractions /// {x1, ..., xn}. #[new] #[must_use] + #[allow(clippy::needless_pass_by_value)] pub fn new(array: PyReadonlyArrayDyn, node_values: Vec>) -> Self { let mut sparse_array: PackedArray = PackedArray::new(node_values.iter().map(Vec::len).collect()); @@ -56,7 +58,7 @@ impl PyImportSubgridV1 { } } -/// PyO3 wrapper to :rustdoc:`pineappl::subgrid::SubgridEnum ` +/// `PyO3` wrapper to :rustdoc:`pineappl::subgrid::SubgridEnum ` #[pyclass(name = "SubgridEnum")] #[derive(Clone)] #[repr(transparent)] From 4e3f09c39371c2cb2e933191310ea32a0004744a Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Tue, 31 Mar 2026 14:59:16 +0200 Subject: [PATCH 04/10] Addresses #388 --- pineappl/src/convolutions.rs | 26 ++++++++++++++++++++++++-- pineappl/src/grid.rs | 15 +++++++++++---- pineappl/src/subgrid.rs | 16 ++++++++++++++-- pineappl_capi/src/lib.rs | 4 ++++ pineappl_py/src/fk_table.rs | 6 +++++- pineappl_py/src/grid.rs | 15 ++++++++++++--- pineappl_py/src/subgrid.rs | 36 +++++++++++++++++++++++++++++------- 7 files changed, 99 insertions(+), 19 deletions(-) diff --git a/pineappl/src/convolutions.rs b/pineappl/src/convolutions.rs index f6ca752a..40dd206b 100644 --- a/pineappl/src/convolutions.rs +++ b/pineappl/src/convolutions.rs @@ -1,4 +1,20 @@ //! Module for everything related to convolution functions. +//! +//! # Convention for callbacks (`x * f`) versus imported subgrids (`f`) +//! +//! Convolution callbacks (the `xfx` closures passed to [`ConvolutionCache::new`]) follow the +//! LHAPDF convention: for each parton PID, momentum fraction `x`, and squared scale `Q2`, they +//! must return **`x * f(x, Q2)`** (PDF or FF as used by LHAPDF). +//! +//! When [`Grid::convolve`](crate::grid::Grid::convolve) evaluates a luminosity, it recovers the +//! parton-level factor **`f`** by using `xfx(pid, x, Q2) / x` before multiplying stored subgrid +//! coefficients (see [`GridConvCache::as_fx_prod`]). That matches grids built by filling +//! ([`InterpSubgridV1`](crate::subgrid::InterpSubgridV1)) and grids built by importing +//! ([`ImportSubgridV1`](crate::subgrid::ImportSubgridV1)). The import path is mainly for **coefficient +//! functions** dumped from outside PineAPPL that are defined to be convolved with **`f`**; such data +//! must already use the **`f` (not `x * f`)** convention, as in equation (2.8) of the PineAPPL paper. +//! If you put `x * f` into an imported subgrid and also pass standard LHAPDF `xfx`, the `x` factor +//! is applied twice. See [issue #388](https://github.com/NNPDF/pineappl/issues/388). use super::boc::Kinematics; use super::boc::Scales; @@ -22,6 +38,8 @@ struct ConvCache1d<'a> { /// A cache for evaluating PDFs. Methods like [`Grid::convolve`] accept instances of this `struct` /// instead of the PDFs themselves. +/// +/// Callbacks must return **`x * f`** as documented in the [module-level description](crate::convolutions). pub struct ConvolutionCache<'a> { caches: Vec>, alphas: &'a mut dyn FnMut(f64) -> f64, @@ -34,8 +52,9 @@ impl<'a> ConvolutionCache<'a> { /// Construct a new convolution cache. /// /// - `convolutions` describes each convolution function (PDF/FF type and hadron PID). - /// - `xfx` provides one callback per convolution, used to evaluate `x * f(x, Q2)` for - /// a given PID, `x`, and squared scale `Q2`. + /// - `xfx` provides one callback per convolution, used to evaluate **`x * f(x, Q2)`** (LHAPDF + /// style) for a given PID, `x`, and squared scale `Q2`. Internally, convolution uses `f` via + /// `xfx(...) / x`, consistent with stored grid coefficients (see module docs). /// - `alphas` provides a callback given `Q2` (squared renormalization scale). /// /// The cache is filled lazily as [`Grid`] convolution is performed. @@ -206,6 +225,9 @@ impl GridConvCache<'_, '_> { /// - the remaining indices correspond to the `x` dimensions in the same order as `pdg_ids`. /// /// This restriction is tracked in the codebase (see the TODO in the implementation). + /// + /// Each parton factor is `xfx(pid, x, Q2) / x`, i.e. the PDF/FF **`f`** matching imported + /// subgrid data. Callbacks `xfx` still follow the **`x * f`** LHAPDF convention. pub fn as_fx_prod(&mut self, pdg_ids: &[i32], as_order: u8, indices: &[usize]) -> f64 { // TODO: here we assume that // - indices[0] is the (squared) factorization scale, diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index b8c540ef..7847c818 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -199,6 +199,12 @@ impl Grid { /// first factor varies the renormalization scale, the second the factorization scale. Note /// that for the variation to be trusted all non-zero log-grids must be contained. /// + /// Convolution callbacks in `lumi_cache` must return **`x * f`** (LHAPDF). Internally, factors + /// **`f`** are used so that they match subgrid data from [`InterpSubgridV1`](crate::subgrid::InterpSubgridV1) + /// and from [`ImportSubgridV1`](crate::subgrid::ImportSubgridV1). The latter is mainly for + /// **coefficient functions** dumped from outside PineAPPL that are defined to be convolved with + /// **`f`**; those imported values must already use the **`f`** convention. See [`crate::convolutions`]. + /// /// # Panics /// /// Panics if [`ConvolutionCache::new_grid_conv_cache`] cannot match the grid convolutions to @@ -1442,10 +1448,11 @@ impl Grid { /// This function integrates out one of the convolution dimensions of the grid by convolving it /// with the provided function `xfx`. /// - /// The `conv_idx` parameter specifies which convolution to fix. The `xfx` function provides - /// the values of the parton distribution function or fragmentation function for a given parton - /// ID, `x` value, and scale `mu2`. The `xi` parameter is a scale factor for the factorization - /// or fragmentation scale. + /// The `conv_idx` parameter specifies which convolution to fix. The `xfx` function must follow + /// the LHAPDF convention and return **`x * f(x, mu2)`** for the given parton ID, `x`, and squared + /// scale `mu2`. The implementation uses **`f`** when folding into subgrid coefficients (see + /// [`crate::convolutions`]). The `xi` parameter is a scale factor for the factorization or + /// fragmentation scale. /// /// # Special handling of fragmentation functions /// diff --git a/pineappl/src/subgrid.rs b/pineappl/src/subgrid.rs index 9ebc7437..66af5801 100644 --- a/pineappl/src/subgrid.rs +++ b/pineappl/src/subgrid.rs @@ -70,6 +70,15 @@ impl Subgrid for EmptySubgridV1 { } /// Dense imported subgrid backed by a [`PackedArray`] and explicit node coordinates per dimension. +/// +/// This type exists mainly so you can **inject coefficient functions** produced from DIS-like codes. +/// These are defined to be convolved with the parton density **`f`**. +/// +/// # Parton factors: `f` versus `x * f` +/// +/// Coefficients you store here (and the node values you attach) must match the PineAPPL definition +/// where the hard cross section is combined with parton distributions as **`f(x)`**, **not** with +/// **`x * f(x)`**. #[derive(Clone, Deserialize, Serialize)] pub struct ImportSubgridV1 { array: PackedArray, @@ -184,7 +193,10 @@ impl Subgrid for ImportSubgridV1 { } impl ImportSubgridV1 { - /// Constructor. + /// Build an imported subgrid from packed coefficients and per-dimension node values. + /// + /// Intended for **external coefficient dumps** that must be folded with **`f`**; see [`ImportSubgridV1`]. + /// Values in `array` must use the **`f` (not `x * f`)** convention. #[must_use] pub const fn new(array: PackedArray, node_values: Vec>) -> Self { Self { array, node_values } @@ -408,7 +420,7 @@ pub enum SubgridEnum { InterpSubgridV1, /// Empty subgrid. EmptySubgridV1, - /// Imported sparse layout with arbitrary node grids per dimension. + /// Imported sparse layout for **injected coefficient tables** convolved with **`f`** (see [`ImportSubgridV1`]). ImportSubgridV1, } diff --git a/pineappl_capi/src/lib.rs b/pineappl_capi/src/lib.rs index 7979187e..4e290d52 100644 --- a/pineappl_capi/src/lib.rs +++ b/pineappl_capi/src/lib.rs @@ -2181,6 +2181,10 @@ pub unsafe extern "C" fn pineappl_grid_subgrid_array( /// Set the subgrid of a Grid for a given bin, order, and channel. /// +/// This installs an [`ImportSubgridV1`], mainly for **tabulated coefficients from another program** +/// that must be folded with parton **`f`**. Non-zero `subgrid_array` entries must use the **`f`** +/// convention (density or fragmentation as a function of momentum fraction), **not** **`x * f`**. +/// /// # Safety /// /// If `grid` does not point to a valid `Grid` object, for example when `grid` is the null pointer, diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index f464f719..6fa2f216 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -290,6 +290,10 @@ impl PyFkTable { /// Convolve the FK table with as many distributions. /// + /// ``xfx`` callables must return ``x * f`` (LHAPDF). If the underlying filling grid used + /// :class:`pineappl.subgrid.ImportSubgridV1` (**external coefficient dumps** meant to fold + /// with ``f``), those values must have been stored as ``f``, not ``x * f``. + /// /// # Panics /// /// Panics if Python PDF callbacks fail or if the underlying Rust convolution panics. @@ -299,7 +303,7 @@ impl PyFkTable { /// `pdg_convs` : list(PyConv) /// list containing the types of convolutions and PID /// xfxs : list(callable) - /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf + /// list of LHAPDF-like callables ``(pid, x, Q2) -> x * f`` /// `bin_indices` : numpy.ndarray(int) /// A list with the indices of the corresponding bins that should be calculated. An /// empty list means that all bins should be calculated. diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index 0b5cef9d..e630c817 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -188,6 +188,10 @@ impl PyGrid { /// Set a subgrid. /// + /// Typical use with :class:`pineappl.subgrid.ImportSubgridV1`: load **coefficient tables dumped + /// from outside PineAPPL** that are **meant to be convolved with ``f``**. Those coefficients must + /// use the parton ``f(x)`` convention, not ``x * f(x)``. + /// /// Parameters /// ---------- /// order : int @@ -357,6 +361,11 @@ impl PyGrid { /// Convolve the grid with as many distributions. /// + /// Each ``xfx`` callable must return ``x * f(x, Q2)`` (LHAPDF style). The library uses ``f`` when + /// combining with stored subgrid coefficients. Coefficients injected via + /// :class:`pineappl.subgrid.ImportSubgridV1` are mainly **external dumps** defined to fold with + /// ``f``; they must use ``f``, not ``x * f``. + /// /// # Panics /// /// Panics if Python callbacks fail, if the convolution cache cannot be matched to the grid, or @@ -367,7 +376,7 @@ impl PyGrid { /// `pdg_convs` : list(PyConv) /// list containing the types of convolutions and PID /// xfxs : list(callable) - /// list of lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf + /// list of LHAPDF-like callables ``(pid, x, Q2) -> x * f`` /// alphas : callable /// LHAPDF-like callable with argument `Q2` returning `alpha_s(Q2)` /// `order_mask` : numpy.ndarray(bool) @@ -451,8 +460,8 @@ impl PyGrid { /// ---------- /// ``conv_idx``: usize /// index of the convolution (zero-based) - /// ``xfxs`` : callable - /// lhapdf-like callable with arguments `pid, x, Q2` returning x*pdf + /// xfx : callable + /// LHAPDF-like ``(pid, x, Q2) -> x * f`` /// ``xi``: float #[must_use] #[pyo3(signature = (conv_idx, xfx, xi = 1.0))] diff --git a/pineappl_py/src/subgrid.rs b/pineappl_py/src/subgrid.rs index d7b89a04..9c896c41 100644 --- a/pineappl_py/src/subgrid.rs +++ b/pineappl_py/src/subgrid.rs @@ -1,4 +1,12 @@ //! Subgrid interface. +//! +//! [`ImportSubgridV1`] is mainly for **injecting coefficient tables from other programs** (dumped +//! grids, external MC output, etc.) that are **meant to be convolved with parton `f(x)`**, not for the +//! usual event-by-event filling of a grid. Those coefficients use the **`f(x)`** convention (and the +//! analogous fragmentation variable), **not** **`x * f(x)`**. When you convolve with LHAPDF-style +//! callables that return **`x * f`**, the Rust library divides by `x` so the result matches. Do not +//! store **`x * f`** in an imported subgrid or you double-count `x`. See the Rust `pineappl::convolutions` +//! and `pineappl::subgrid::ImportSubgridV1` docs and . use ndarray::{ArrayD, Dimension}; use numpy::{IntoPyArray, PyArrayDyn, PyReadonlyArrayDyn}; @@ -6,7 +14,13 @@ use pineappl::packed_array::PackedArray; use pineappl::subgrid::{ImportSubgridV1, Subgrid, SubgridEnum}; use pyo3::prelude::*; -/// `PyO3` wrapper to :rustdoc:`pineappl::subgrid::ImportSubgridV1 `. +/// Sparse subgrid for **coefficient functions dumped from outside PineAPPL**, to be convolved with **f**. +/// +/// Typical use: load tabulated coefficients from another code; they must be defined to fold with +/// parton **f(x)**, not **x * f(x)**. Non-zero entries in ``array`` follow that **f** convention. +/// :class:`~pineappl.grid.Grid` ``.convolve`` still expects LHAPDF-style callables that return +/// ``x * f``; the core library divides by ``x`` when building the luminosity. See the submodule +/// docstring and `issue 388 `__. #[pyclass(name = "ImportSubgridV1")] #[derive(Clone)] #[repr(transparent)] @@ -16,7 +30,10 @@ pub struct PyImportSubgridV1 { #[pymethods] impl PyImportSubgridV1 { - /// Constructor. + /// Constructor for **imported coefficient tables** (external dumps), convolved with ``f`` later. + /// + /// Stored weights must be ``f(x)``, not ``x * f(x)``, so they match :meth:`pineappl.grid.Grid.convolve` + /// with standard LHAPDF ``xfx`` callbacks (returning ``x * f``). /// /// # Panics /// @@ -25,10 +42,9 @@ impl PyImportSubgridV1 { /// Parameters /// ---------- /// array : numpy.ndarray(float) - /// `N`-dimensional array with all weights - /// `node_values`: list(list(float)) - /// list containing the arrays of energy scales {q1, ..., qn} and momentum fractions - /// {x1, ..., xn}. + /// N-dimensional array of non-zero coefficients (``f`` convention: fold with ``f``, not ``x * f``). + /// node_values : list(list(float)) + /// Per-dimension node coordinates (scales, x values, etc.). #[new] #[must_use] #[allow(clippy::needless_pass_by_value)] @@ -119,7 +135,13 @@ impl PySubgridEnum { /// Raises Errors if (sub-)module is not found. pub fn register(parent_module: &Bound<'_, PyModule>) -> PyResult<()> { let m = PyModule::new(parent_module.py(), "subgrid")?; - m.setattr(pyo3::intern!(m.py(), "__doc__"), "Subgrid interface.")?; + m.setattr( + pyo3::intern!(m.py(), "__doc__"), + "Subgrid interface.\n\n\ + ImportSubgridV1 is for coefficient tables dumped from outside PineAPPL, to be convolved with f; \ + array values use the f(x) convention, not x*f(x). Grid.convolve uses LHAPDF x*f callbacks \ + (see pineappl issue 388).", + )?; pyo3::py_run!( parent_module.py(), m, From 81fe462d06280b0e07729530232cf2a39d049678 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Wed, 1 Apr 2026 09:20:52 +0200 Subject: [PATCH 05/10] Re-organize Python modules documentation --- pineappl_py/docs/source/modules/pineappl.rst | 33 +++++++++++++++----- pineappl_py/src/fk_table.rs | 5 +-- pineappl_py/src/grid.rs | 10 +++--- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/pineappl_py/docs/source/modules/pineappl.rst b/pineappl_py/docs/source/modules/pineappl.rst index 3bbe4b8e..fa56d710 100644 --- a/pineappl_py/docs/source/modules/pineappl.rst +++ b/pineappl_py/docs/source/modules/pineappl.rst @@ -2,41 +2,60 @@ PineAPPL's Python API ===================== .. automodule:: pineappl - :members: - :undoc-members: - :show-inheritance: + :no-index: -.. automodule:: pineappl.bin +Bins, orders, channels (`boc`) +------------------------------ +.. automodule:: pineappl.boc :members: :undoc-members: :show-inheritance: -.. automodule:: pineappl.channel +Convolutions +------------ +.. automodule:: pineappl.convolutions :members: :undoc-members: :show-inheritance: +Evolution +--------- .. automodule:: pineappl.evolution :members: :undoc-members: :show-inheritance: +FK tables +--------- .. automodule:: pineappl.fk_table :members: :undoc-members: :show-inheritance: +Grids +----- .. automodule:: pineappl.grid :members: :undoc-members: :show-inheritance: -.. automodule:: pineappl.subgrid +Interpolation +------------- +.. automodule:: pineappl.interpolation :members: :undoc-members: :show-inheritance: -.. automodule:: pineappl.import_only_subgrid +PID Basis +--------- +.. automodule:: pineappl.pids + :members: + :undoc-members: + :show-inheritance: + +Subgrids +-------- +.. automodule:: pineappl.subgrid :members: :undoc-members: :show-inheritance: diff --git a/pineappl_py/src/fk_table.rs b/pineappl_py/src/fk_table.rs index 6fa2f216..8748dcaa 100644 --- a/pineappl_py/src/fk_table.rs +++ b/pineappl_py/src/fk_table.rs @@ -94,8 +94,9 @@ impl PyFkTable { /// Get the type(s) of convolution(s) for the current FK table. /// /// Returns - /// list(PyConv): - /// list of convolution type with the corresponding PIDs + /// ------- + /// list of `PyConv` + /// One entry per convolution (type and PID). #[getter] #[must_use] pub fn convolutions(&self) -> Vec { diff --git a/pineappl_py/src/grid.rs b/pineappl_py/src/grid.rs index e630c817..6839bb3d 100644 --- a/pineappl_py/src/grid.rs +++ b/pineappl_py/src/grid.rs @@ -689,8 +689,9 @@ impl PyGrid { /// Get the type(s) of convolution(s) for the current Grid. /// /// Returns - /// list(PyConv): - /// list of convolution type with the corresponding PIDs + /// ------- + /// list of `PyConv` + /// One entry per convolution (type and PID). #[getter] #[must_use] pub fn convolutions(&self) -> Vec { @@ -704,8 +705,9 @@ impl PyGrid { /// Get the interpolation specifications for the current grid. /// /// Returns - /// list(PyInterp): - /// list of interpolation specifications + /// ------- + /// list of `PyInterp` + /// One interpolation spec per kinematic dimension. #[getter] #[must_use] pub fn interpolations(&mut self) -> Vec { From 578486f8429108770dc229d62f9cbe8188dd5a40 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Wed, 1 Apr 2026 12:23:11 +0200 Subject: [PATCH 06/10] =?UTF-8?q?Properly=20fix=20Drell=E2=80=94Yan=20spel?= =?UTF-8?q?ling?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pineappl/src/boc.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index 73572cc0..8159c03e 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -786,7 +786,7 @@ impl Order { /// /// # Example /// - /// In the case of Drell-Yan, there are the following orders: + /// In the case of Drell—Yan, there are the following orders: /// /// - exactly one leading order (LO), /// - two next-to-leading orders (NLO), which are @@ -1297,6 +1297,7 @@ mod tests { #[test] fn order_create_mask() { + // Drell—Yan orders let orders = [ Order::new(0, 2, 0, 0, 0), // LO : alpha^2 Order::new(1, 2, 0, 0, 0), // NLO QCD : alphas alpha^2 From 29ba63f48a52de30f8439a00c181161d739e8bad Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Wed, 29 Apr 2026 18:18:55 +0200 Subject: [PATCH 07/10] Undo added `const` in the main crate --- pineappl/src/grid.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/grid.rs b/pineappl/src/grid.rs index 3b95d0cf..62dec766 100644 --- a/pineappl/src/grid.rs +++ b/pineappl/src/grid.rs @@ -981,7 +981,7 @@ impl Grid { } /// Upgrades the internal data structures to their latest versions. - pub const fn upgrade(&mut self) {} + pub fn upgrade(&mut self) {} /// Return the metadata of this grid. #[must_use] From 037af2ac333251648d91e118dd843f31cca5a369 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Wed, 29 Apr 2026 18:24:10 +0200 Subject: [PATCH 08/10] Use en-dash instead of em-dash --- pineappl/src/boc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pineappl/src/boc.rs b/pineappl/src/boc.rs index 9f8906cd..1b481cd5 100644 --- a/pineappl/src/boc.rs +++ b/pineappl/src/boc.rs @@ -784,7 +784,7 @@ impl Order { /// /// # Example /// - /// In the case of Drell—Yan, there are the following orders: + /// In the case of Drell–Yan, there are the following orders: /// /// - exactly one leading order (LO), /// - two next-to-leading orders (NLO), which are From b35684a68ab0fd63eab03405af236d121e5e9157 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Sat, 9 May 2026 14:31:48 +0200 Subject: [PATCH 09/10] Add list of programs using PineAPPL as table --- README.md | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/README.md b/README.md index 9c763551..e556840f 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,75 @@ one: - [Installation instructions](docs/installation.md) - [Changelog](CHANGELOG.md) +# Tools using PineAPPL + +Below are some samples of tools in high-energy physics that uses PineAPPL. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ToolDescription
Parton-Level Monte Carlo Generators
NNLOJetParton-level event generator for jet cross sections at NNLO QCD accuracy.
MATRIXParton-level event generator for fully differential cross sections at NNLO.
General-Purpose Monte Carlo Generators
MadGraph5_aMC@NLOMulti-purpose event generator for LO and NLO calculations.
Fixed-order Codes
PineAPFELAPFEL++ interface that produces PineAPPL grids with coefficient functions for unpolarized and longitudinally polarized DIS, SIA, and SIDIS.
YadismComputes DIS coefficient functions up to N3LO for unpolarized and longitudinally polarized processes.
Evolution Codes
PineAPFELEvolves theory predictions stored as PineAPPL grids using APFEL++.
PinekoEvolves theory predictions stored as PineAPPL grids using EKO evolution operators.
PDF Fitting Codes
nCTEQFramework for extracting nuclear parton distribution functions.
NNPDFMachine learning framework for global PDF analyses.
xFitterQCD fit framework designed to extract PDFs.
Interfaces to Other Grid Formats
APPLgridFast interpolation grid library for PDF-independent storage of perturbative QCD calculations.
fastNLOFramework for fast pQCD calculations for hadron-induced processes.
+ # Citation If you use PineAPPL, please cite From 6efabb1b5b2cbcad2c41793d7eabde0c0bd7b9e8 Mon Sep 17 00:00:00 2001 From: Radonirinaunimi Date: Sun, 10 May 2026 18:19:21 +0200 Subject: [PATCH 10/10] Redesign "Tools using PineAPPL" section --- README.md | 76 +++++++++---------------------------------------------- 1 file changed, 12 insertions(+), 64 deletions(-) diff --git a/README.md b/README.md index e556840f..c2887b41 100644 --- a/README.md +++ b/README.md @@ -39,70 +39,18 @@ one: Below are some samples of tools in high-energy physics that uses PineAPPL. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ToolDescription
Parton-Level Monte Carlo Generators
NNLOJetParton-level event generator for jet cross sections at NNLO QCD accuracy.
MATRIXParton-level event generator for fully differential cross sections at NNLO.
General-Purpose Monte Carlo Generators
MadGraph5_aMC@NLOMulti-purpose event generator for LO and NLO calculations.
Fixed-order Codes
PineAPFELAPFEL++ interface that produces PineAPPL grids with coefficient functions for unpolarized and longitudinally polarized DIS, SIA, and SIDIS.
YadismComputes DIS coefficient functions up to N3LO for unpolarized and longitudinally polarized processes.
Evolution Codes
PineAPFELEvolves theory predictions stored as PineAPPL grids using APFEL++.
PinekoEvolves theory predictions stored as PineAPPL grids using EKO evolution operators.
PDF Fitting Codes
nCTEQFramework for extracting nuclear parton distribution functions.
NNPDFMachine learning framework for global PDF analyses.
xFitterQCD fit framework designed to extract PDFs.
Interfaces to Other Grid Formats
APPLgridFast interpolation grid library for PDF-independent storage of perturbative QCD calculations.
fastNLOFramework for fast pQCD calculations for hadron-induced processes.
+| Tool | Description | +|------|-------------| +| [**MadGraph5_aMC@NLO**](https://github.com/mg5amcnlo/mg5amcnlo) | Multi-purpose event generator for LO and NLO calculations. | +| [**MATRIX**](https://matrix.hepforge.org/) | Parton-level event generator for fully differential cross sections at NNLO. | +| [**nCTEQ**](https://ncteq.hepforge.org/) | Framework for extracting nuclear parton distribution functions. | +| [**NNLOJet**](https://nnlojet.hepforge.org/index.html) | Parton-level event generator for jet cross sections at NNLO QCD accuracy. | +| [**NNPDF**](https://github.com/NNPDF/nnpdf) | Machine learning framework for global PDF analyses. | +| [**NNSFν**](https://github.com/NNPDF/nnusf) | Machine learning framework for all-energy neutrino structure functions. | +| [**PineAPFEL**](https://github.com/QCDLab/PineAPFEL) | [APFEL++](https://github.com/vbertone/apfelxx) interface that produces and evolve PineAPPL grids with coefficient functions for unpolarized and longitudinally polarized DIS, SIA, and SIDIS. | +| [**Pineko**](https://github.com/NNPDF/pineko) | Evolves theory predictions stored as PineAPPL grids using [EKO](https://github.com/NNPDF/eko) evolution operators. | +| [**xFitter**](https://gitlab.cern.ch/fitters/xfitter) | QCD fit framework designed to extract PDFs. | +| [**Yadism**](https://github.com/NNPDF/yadism) | Computes DIS coefficient functions up to N3LO for unpolarized and longitudinally polarized processes. | # Citation