diff --git a/benchmarks/apfel_bench.py b/benchmarks/apfel_bench.py index 1d44f3f16..882b982ac 100644 --- a/benchmarks/apfel_bench.py +++ b/benchmarks/apfel_bench.py @@ -94,8 +94,9 @@ def benchmark_msbar(self, pto): MSbar heavy quark mass scheme when passing kthr != 1 both apfel and eko use ``kThr * msbar``, as thr scale, where ``msbar`` is the usual ms_bar solution. - However apfel and eko mange the alpha_s thr differently - (apfel uses the given mass parameters as thr), so the + However apfel and eko manage the alpha_s thr differently: + apfel uses the given mass parameters as thr multiplied by the kthr, + while in eko only the already computed thr matters, so the benchmark is not a proper comparison with this option allowed. """ th = self.vfns_theory.copy() @@ -179,4 +180,4 @@ def benchmark_sv(self, pto): # obj.benchmark_plain(1) # obj.benchmark_sv(1) # obj.benchmark_kthr(2) - obj.benchmark_msbar(0) + obj.benchmark_msbar(2) diff --git a/doc/source/refs.bib b/doc/source/refs.bib index 347b62de3..30bebb630 100644 --- a/doc/source/refs.bib +++ b/doc/source/refs.bib @@ -419,3 +419,30 @@ @article{Bertone:2013vaa pages = {1647--1668}, year = {2014} } + +@article{Vermaseren:1997fq, + author = "Vermaseren, J. A. M. and Larin, S. A. and van Ritbergen, T.", + title = "{The four loop quark mass anomalous dimension and the invariant quark mass}", + eprint = "hep-ph/9703284", + archivePrefix = "arXiv", + reportNumber = "UM-TH-97-03, NIKHEF-97-012", + doi = "10.1016/S0370-2693(97)00660-6", + journal = "Phys. Lett. B", + volume = "405", + pages = "327--333", + year = "1997" +} + +@article{Liu:2015fxa, + author = "Liu, Tao and Steinhauser, Matthias", + title = "{Decoupling of heavy quarks at four loops and effective Higgs-fermion coupling}", + eprint = "1502.04719", + archivePrefix = "arXiv", + primaryClass = "hep-ph", + reportNumber = "TTP15-05", + doi = "10.1016/j.physletb.2015.05.023", + journal = "Phys. Lett. B", + volume = "746", + pages = "330--334", + year = "2015" +} diff --git a/doc/source/theory/pQCD.rst b/doc/source/theory/pQCD.rst index 3a9aba25e..b0c4078fb 100644 --- a/doc/source/theory/pQCD.rst +++ b/doc/source/theory/pQCD.rst @@ -23,13 +23,29 @@ We implement two different strategies to solve the |RGE|: - ``method="expanded"``: using approximate solutions: .. math :: - a^{\text{LO}}_s(\mu_R^2) &= \frac{a_s(\mu_0^2)}{1 + a_s(\mu_0^2) \beta_0 \ln(\mu_R^2/\mu_0^2)} \\ - a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2) &= a^{\text{LO}}_s(\mu_R^2)-b_1 \left[a^{\text{LO}}_s(\mu_R^2)\right]^2 \ln\left(1+a_s(\mu_0^2) \beta_0 \ln(\mu_R^2/\mu_0^2)\right) \\ + a^{\text{LO}}_s(\mu_R^2) &= \frac{a_s(\mu_0^2)}{1 + a_s(\mu_0^2) \beta_0 L_{\mu}} \\ + a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2) &= a^{\text{LO}}_s(\mu_R^2)-b_1 \left[a^{\text{LO}}_s(\mu_R^2)\right]^2 \ln\left(1+a_s(\mu_0^2) \beta_0 L_{\mu}\right) \\ a^{\text{NNLO}}_{s,\text{exp}}(\mu_R^2) &= a^{\text{LO}}_s(\mu_R^2)\left[1 + a^{\text{LO}}_s(\mu_R^2)\left(a^{\text{LO}}_s(\mu_R^2) - a_s(\mu_0^2)\right)(b_2 - b_1^2) \right.\\ - & \hspace{60pt} \left. + a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2) b_1 \ln\left(a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2)/a_s(\mu_0^2)\right)\right] + & \hspace{60pt} \left. + a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2) b_1 \ln\left(a^{\text{NLO}}_{s,\text{exp}}(\mu_R^2)/a_s(\mu_0^2)\right)\right] \\ + a^{\text{N3LO}}_{s,\text{exp}}(\mu_R^2) &= a^{\text{NNLO}}_s(\mu_R^2) + \frac{a^{\text{LO}}_s(\mu_R^2)^4}{2 b_0^3} \left\{ \right. \\ + & -2 b_1^3 L_{0}^3 + 5 b_1^3 L_{\text{LO}}^2 + 2 b_1^3 L_{\text{LO}}^3 + b_1^3 L_{0}^2 \left(5 + 6 L_{\text{LO}} \right) \\ + & + 2 b_0 b_1 L_{\text{LO}} \left[ b_2 + 2 \left(b_1^2 - b_0 b_2 \right) L_{\mu} a_s(\mu_0^2) \right] \\ + & - b_0^2 L_{\mu} a_s(\mu_0^2) \left[ -2 b_1 b_2 + 2 b_0 b_3 + \left( b_1^3 - 2 b_0 b_1 b_2 + b_0^2 b_3 \right) L_{\mu} a_s(\mu_0^2) \right] \\ + & - 2 b_1 L_{0} \left[ 5 b_1^2 L_{\text{LO}} + 3 b_1^2 L_{\text{LO}}^2 + b_0 \left[b_2 + 2 \left(b_1^2 - b_0 b_2\right) L_{\mu} a_s(\mu_0^2)\right] \right ] \\ + & \left. \right\} + +being: + +.. math :: + L_{\mu} &= \ln(\mu_R^2/\mu_0^2) \\ + L_{0} &= \ln(a_s(\mu_0^2)) \\ + L_{\text{LO}} &= \ln(a^{\text{LO}}_s(\mu_R^2)) \\ When the renormalization scale crosses a flavor threshold matching conditions have to be applied :cite:`Schroder:2005hy,Chetyrkin:2005ia`. +In particular, the matching involved in the change from :math:`n_f` to :math:`n_f-1` schemes +is presented in equation 3.1 of :cite:`Schroder:2005hy` for |MSbar| masses, while the +same expression for POLE masses is reported in Appendix A. Splitting Functions ------------------- @@ -97,7 +113,7 @@ For each heavy quark :math:`h` we solve for :math:`m_h`: where the evolved |MSbar| mass is given by: .. math :: - m_{\overline{MS},h}(\mu^2) = m_{h,0} \int_{a_s(\mu_{h,0}^2)}^{a_s(\mu^2)} \frac{\gamma(a_s)}{\beta(a_s)} d a_s + m_{\overline{MS},h}(\mu^2) = m_{h,0} \exp \left[ - \int_{a_s(\mu_{h,0}^2)}^{a_s(\mu^2)} \frac{\gamma_m(a_s)}{\beta(a_s)} d a_s \right ] and :math:`m_{h,0}` is the given initial condition at the scale :math:`\mu_{h,0}`. Here there is a subtle complication since the solution @@ -127,24 +143,28 @@ In doing so EKO takes advantage of the monotony of the |RGE| solution Now, being able to evaluate :math:`a_s(\mu_{h,0}^2)`, there are two ways of solving the previous integral and finally compute the evolved -:math:`m_{\overline{MS},h}`. In fact, the function :math:`\gamma(a_s)` is the +:math:`m_{\overline{MS},h}`. In fact, the function :math:`\gamma_m(a_s)` is the anomalous QCD mass dimension and, as the :math:`\beta` function, it can be evaluated -perturbatively in :math:`a_s` up to :math:`\mathcal{O}(a_s^3)`: +perturbatively in :math:`a_s` up to :math:`\mathcal{O}(a_s^4)`: .. math :: - \gamma(a_s) &= - \sum\limits_{n=0} \gamma_n a_s^{n+1} \\ + \gamma_m(a_s) &= \sum\limits_{n=0} \gamma_{m,n} a_s^{n+1} \\ -Even here it is useful to define :math:`c_k = \gamma_k/\beta_0, k>0`. +Even here it is useful to define :math:`c_k = \gamma_{m,k}/\beta_0, k \ge 0`. Therefore the two solution strategies are: - ``method = "exact"``: the integral is solved exactly using the expression of - :math:`\beta,\gamma` up to the specified perturbative order + :math:`\beta,\gamma_m` up to the specified perturbative order - ``method = "expanded"``: the integral is approximate by the following expansion: .. math :: m_{\overline{MS},h}(\mu^2) & = m_{h,0} \left ( \frac{a_s(\mu^2)}{a_s(\mu_{h,0}^2)} \right )^{c_0} \frac{j_{exp}(a_s(\mu^2))}{j_{exp}(a_s(\mu_{h,0}^2))} \\ - j_{exp}(a_s) &= 1 + a_s \left [ c_1 - b_1 c_0 \right ] + \frac{a_s^2}{2} \left [c_2 - c_1 b_1 - b_2 c_0 + b_1^2 c_0 + (c_1 - b_1 c_0)^2 \right] + j_{exp}(a_s) &= 1 + a_s \left [ c_1 - b_1 c_0 \right ] \\ + & + \frac{a_s^2}{2} \left [c_2 - c_1 b_1 - b_2 c_0 + b_1^2 c_0 + (c_1 - b_1 c_0)^2 \right] \\ + & + \frac{a_s^3}{6} [ -2 b_3 c_0 - b_1^3 c_0 (1 + c_0) (2 + c_0) - 2 b_2 c_1 \\ + & - 3 b_2 c_0 c_1 + b_1^2 (2 + 3 c_0 (2 + c_0)) c_1 + c_1^3 + 3 c_1 c_2 \\ + & + b_1 (b_2 c_0 (4 + 3 c_0) - 3 (1 + c_0) c_1^2 - (2 + 3 c_0) c_2) + 2 c_3 ] The procedure is iterated on all the heavy quarks, updating the temporary instance @@ -154,10 +174,11 @@ To find coherent solutions and perform the mass running in the correct patches i is necessary to always start computing the mass scales closer to :math:`\mu_{ref}`. Eventually, to ensure that the threshold values are properly set, we add a -consistency check, asserting: +consistency check, asserting that the :math:`m_{\overline{MS},h}` are properly sorted. -.. math :: - m_{\overline{MS},h} (m_h) \leq m_{\overline{MS},h+1} (m_h) +Note that also for |MSbar| mass running when the heavy threshold scales are +crossed we need to apply non trivial matching from order +:math:`\mathcal{O}(a_s^2)` as described here :cite:`Liu:2015fxa`. We provide the following as an illustrative example of how this procedure works: when the strong coupling is given with boundary condition :math:`\alpha_s(\mu_{ref}=91, n_{f_{ref}}=5)` diff --git a/src/eko/anomalous_dimensions/harmonics.py b/src/eko/anomalous_dimensions/harmonics.py index 4aae320f1..75163b781 100644 --- a/src/eko/anomalous_dimensions/harmonics.py +++ b/src/eko/anomalous_dimensions/harmonics.py @@ -13,10 +13,11 @@ zeta2 = scipy.special.zeta(2) zeta3 = scipy.special.zeta(3) zeta4 = scipy.special.zeta(4) +zeta5 = scipy.special.zeta(5) @nb.njit("c16(c16,u1)", cache=True) -def cern_polygamma(Z, K: int): # pylint: disable=all +def cern_polygamma(Z, K): # pylint: disable=all """ Computes the polygamma functions :math:`\\psi_k(z)`. diff --git a/src/eko/anomalous_dimensions/lo.py b/src/eko/anomalous_dimensions/lo.py index 0c5e23c50..39b4398af 100644 --- a/src/eko/anomalous_dimensions/lo.py +++ b/src/eko/anomalous_dimensions/lo.py @@ -32,7 +32,7 @@ def gamma_ns_0(N, s1): @nb.njit("c16(c16,u1)", cache=True) -def gamma_qg_0(N, nf: int): +def gamma_qg_0(N, nf): """ Computes the leading-order quark-gluon anomalous dimension @@ -78,7 +78,7 @@ def gamma_gq_0(N): @nb.njit("c16(c16,c16,u1)", cache=True) -def gamma_gg_0(N, s1, nf: int): +def gamma_gg_0(N, s1, nf): """ Computes the leading-order gluon-gluon anomalous dimension @@ -104,7 +104,7 @@ def gamma_gg_0(N, s1, nf: int): @nb.njit("c16[:,:](c16,c16,u1)", cache=True) -def gamma_singlet_0(N, s1, nf: int): +def gamma_singlet_0(N, s1, nf): r""" Computes the leading-order singlet anomalous dimension matrix diff --git a/src/eko/anomalous_dimensions/nlo.py b/src/eko/anomalous_dimensions/nlo.py index e4ec996c9..3466be105 100644 --- a/src/eko/anomalous_dimensions/nlo.py +++ b/src/eko/anomalous_dimensions/nlo.py @@ -15,7 +15,7 @@ @nb.njit("c16(c16,u1)", cache=True) -def gamma_nsm_1(n, nf: int): +def gamma_nsm_1(n, nf): """ Computes the |NLO| valence-like non-singlet anomalous dimension. @@ -58,7 +58,7 @@ def gamma_nsm_1(n, nf: int): @nb.njit("c16(c16,u1)", cache=True) -def gamma_nsp_1(n, nf: int): +def gamma_nsp_1(n, nf): """ Computes the |NLO| singlet-like non-singlet anomalous dimension. @@ -99,7 +99,7 @@ def gamma_nsp_1(n, nf: int): @nb.njit("c16(c16,u1)", cache=True) -def gamma_ps_1(n, nf: int): +def gamma_ps_1(n, nf): """ Computes the |NLO| pure-singlet quark-quark anomalous dimension. @@ -126,7 +126,7 @@ def gamma_ps_1(n, nf: int): @nb.njit("c16(c16,u1)", cache=True) -def gamma_qg_1(n, nf: int): +def gamma_qg_1(n, nf): """ Computes the |NLO| quark-gluon singlet anomalous dimension. @@ -159,7 +159,7 @@ def gamma_qg_1(n, nf: int): @nb.njit("c16(c16,u1)", cache=True) -def gamma_gq_1(n, nf: int): +def gamma_gq_1(n, nf): """ Computes the |NLO| gluon-quark singlet anomalous dimension. @@ -195,7 +195,7 @@ def gamma_gq_1(n, nf: int): @nb.njit("c16(c16,u1)", cache=True) -def gamma_gg_1(n, nf: int): +def gamma_gg_1(n, nf): """ Computes the |NLO| gluon-gluon singlet anomalous dimension. @@ -233,7 +233,7 @@ def gamma_gg_1(n, nf: int): @nb.njit("c16[:,:](c16,u1)", cache=True) -def gamma_singlet_1(N, nf: int): +def gamma_singlet_1(N, nf): r""" Computes the next-leading-order singlet anomalous dimension matrix diff --git a/src/eko/anomalous_dimensions/nnlo.py b/src/eko/anomalous_dimensions/nnlo.py index f4154375b..46a63eb5c 100644 --- a/src/eko/anomalous_dimensions/nnlo.py +++ b/src/eko/anomalous_dimensions/nnlo.py @@ -17,7 +17,7 @@ @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_nsm_2(n, nf: int, sx): +def gamma_nsm_2(n, nf, sx): """ Computes the |NNLO| valence-like non-singlet anomalous dimension. @@ -94,7 +94,7 @@ def gamma_nsm_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_nsp_2(n, nf: int, sx): +def gamma_nsp_2(n, nf, sx): """ Computes the |NNLO| singlet-like non-singlet anomalous dimension. @@ -171,7 +171,7 @@ def gamma_nsp_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_nsv_2(n, nf: int, sx): +def gamma_nsv_2(n, nf, sx): """ Computes the |NNLO| valence non-singlet anomalous dimension. @@ -226,7 +226,7 @@ def gamma_nsv_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_ps_2(n, nf: int, sx): +def gamma_ps_2(n, nf, sx): """ Computes the |NNLO| pure-singlet quark-quark anomalous dimension. @@ -298,7 +298,7 @@ def gamma_ps_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_qg_2(n, nf: int, sx): +def gamma_qg_2(n, nf, sx): """ Computes the |NNLO| quark-gluon singlet anomalous dimension. @@ -372,7 +372,7 @@ def gamma_qg_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_gq_2(n, nf: int, sx): +def gamma_gq_2(n, nf, sx): """ Computes the |NNLO| gluon-quark singlet anomalous dimension. @@ -462,7 +462,7 @@ def gamma_gq_2(n, nf: int, sx): @nb.njit("c16(c16,u1,c16[:])", cache=True) -def gamma_gg_2(n, nf: int, sx): +def gamma_gg_2(n, nf, sx): """ Computes the |NNLO| gluon-gluon singlet anomalous dimension. @@ -550,7 +550,7 @@ def gamma_gg_2(n, nf: int, sx): @nb.njit("c16[:,:](c16,u1,c16[:])", cache=True) -def gamma_singlet_2(N, nf: int, sx): +def gamma_singlet_2(N, nf, sx): r""" Computes the |NNLO| singlet anomalous dimension matrix diff --git a/src/eko/beta.py b/src/eko/beta.py index 8dba687be..dcdeda6de 100644 --- a/src/eko/beta.py +++ b/src/eko/beta.py @@ -8,10 +8,11 @@ import numba as nb from eko import constants +from eko.anomalous_dimensions.harmonics import zeta3 @nb.njit("f8(u1)", cache=True) -def beta_0(nf: int): +def beta_0(nf): """ Computes the first coefficient of the QCD beta function. @@ -32,7 +33,7 @@ def beta_0(nf: int): @nb.njit("f8(u1)", cache=True) -def beta_1(nf: int): +def beta_1(nf): """ Computes the second coefficient of the QCD beta function. @@ -57,7 +58,7 @@ def beta_1(nf: int): @nb.njit("f8(u1)", cache=True) -def beta_2(nf: int): +def beta_2(nf): """ Computes the third coefficient of the QCD beta function @@ -85,6 +86,33 @@ def beta_2(nf: int): return beta_2 +@nb.njit("f8(u1)", cache=True) +def beta_3(nf): + """ + Computes the fourth coefficient of the QCD beta function + + Implements Eq. (3.6) of :cite:`Herzog:2017ohr`. + + Parameters + ---------- + nf : int + number of active flavors + + Returns + ------- + beta_3 : float + fourth coefficient of the QCD beta function :math:`\\beta_3^{n_f}` + """ + beta_3 = ( + 149753.0 / 6.0 + + 3564.0 * zeta3 + + nf * (-1078361.0 / 162.0 - 6508.0 / 27.0 * zeta3) + + nf**2 * (50065.0 / 162.0 + 6472.0 / 81.0 * zeta3) + + 1093.0 / 729.0 * nf**3 + ) + return beta_3 + + @nb.njit("f8(u1,u1)", cache=True) def beta(k, nf): """ @@ -109,8 +137,10 @@ def beta(k, nf): beta_ = beta_1(nf) elif k == 2: beta_ = beta_2(nf) + elif k == 3: + beta_ = beta_3(nf) else: - raise ValueError("Beta coefficients beyond NNLO are not implemented!") + raise ValueError("Beta coefficients beyond N3LO are not implemented!") return beta_ diff --git a/src/eko/gamma.py b/src/eko/gamma.py index 14d9d247a..15fc1b53b 100644 --- a/src/eko/gamma.py +++ b/src/eko/gamma.py @@ -4,26 +4,83 @@ See :doc:`pQCD ingredients `. """ +import numba as nb -import scipy.special - -from .anomalous_dimensions.harmonics import zeta3, zeta4 +from .anomalous_dimensions.harmonics import zeta3, zeta4, zeta5 +@nb.njit("f8()", cache=True) def gamma_0(): + """ + Computes the first coefficient of the QCD gamma function. + + Implements Eq. (15) of :cite:`Vermaseren:1997fq`. + + Returns + ------- + gamma_0 : float + first coefficient of the QCD gamma function :math:`\\gamma_{m,0}^{n_f}` + """ return 4.0 +@nb.njit("f8(u1)", cache=True) def gamma_1(nf): + """ + Computes the second coefficient of the QCD gamma function. + + Implements Eq. (15) of :cite:`Vermaseren:1997fq`. + + Parameters + ---------- + nf : int + number of active flavors + + Returns + ------- + gamma_1 : float + second coefficient of the QCD gamma function :math:`\\gamma_{m,1}^{n_f}` + """ return 202.0 / 3.0 - 20.0 / 9.0 * nf +@nb.njit("f8(u1)", cache=True) def gamma_2(nf): + """ + Computes the third coefficient of the QCD gamma function. + + Implements Eq. (15) of :cite:`Vermaseren:1997fq`. + + Parameters + ---------- + nf : int + number of active flavors + + Returns + ------- + gamma_2 : float + third coefficient of the QCD gamma function :math:`\\gamma_{m,2}^{n_f}` + """ return 1249.0 - (2216.0 / 27.0 + 160.0 / 3.0 * zeta3) * nf - 140.0 / 81.0 * nf**2 +@nb.njit("f8(u1)", cache=True) def gamma_3(nf): - zeta5 = scipy.special.zeta(5) + """ + Computes the fourth coefficient of the QCD gamma function. + + Implements Eq. (15) of :cite:`Vermaseren:1997fq`. + + Parameters + ---------- + nf : int + number of active flavors + + Returns + ------- + gamma_3 : float + fourth coefficient of the QCD gamma function :math:`\\gamma_{m,3}^{n_f}` + """ return ( 4603055.0 / 162.0 + 135680.0 * zeta3 / 28.0 @@ -40,6 +97,7 @@ def gamma_3(nf): ) +@nb.njit("f8(u1,u1)", cache=True) def gamma(order, nf): """ Compute the value of a gamma coefficient @@ -67,5 +125,5 @@ def gamma(order, nf): elif order == 3: _gamma = gamma_3(nf) else: - raise ValueError("Gamma coefficients beyond N3LO are not implemented!") + raise ValueError("QCD Gamma coefficients beyond N3LO are not implemented!") return _gamma diff --git a/src/eko/msbar_masses.py b/src/eko/msbar_masses.py index d9ef3203e..69732b6e3 100644 --- a/src/eko/msbar_masses.py +++ b/src/eko/msbar_masses.py @@ -2,16 +2,17 @@ r""" This module contains the |RGE| for the |MSbar| masses """ +import numba as nb import numpy as np from scipy import integrate, optimize from .beta import b, beta from .evolution_operator.flavors import quark_names from .gamma import gamma -from .strong_coupling import StrongCoupling, strong_coupling_mod_ev +from .strong_coupling import StrongCoupling, invert_matching_coeffs -def msbar_ker_exact(a0, a1, order, nf): +def ker_exact(a0, a1, order, nf): r""" Exact |MSbar| |RGE| kernel @@ -28,11 +29,11 @@ def msbar_ker_exact(a0, a1, order, nf): Returns ------- - ker: float + ker_exact: float Exact |MSbar| kernel: - ..math: - k_{exact} = e^{\int_{a_s(\mu_{h,0}^2)}^{a_s(\mu^2)} \gamma(a_s) / \beta(a_s) da_s} + .. math:: + k_{exact} = e^{-\int_{a_s(\mu_{h,0}^2)}^{a_s(\mu^2)}\gamma_m(a_s)/ \beta(a_s)da_s} """ b_vec = [beta(0, nf)] g_vec = [gamma(0, nf)] @@ -42,6 +43,9 @@ def msbar_ker_exact(a0, a1, order, nf): if order >= 2: b_vec.append(beta(2, nf)) g_vec.append(gamma(2, nf)) + if order >= 3: + b_vec.append(beta(3, nf)) + g_vec.append(gamma(3, nf)) # quad ker def integrand(a, b_vec, g_vec): @@ -64,7 +68,8 @@ def integrand(a, b_vec, g_vec): return np.exp(val) -def msbar_ker_expanded(a0, a1, order, nf): +@nb.njit("f8(f8,f8,u1,u1)", cache=True) +def ker_expanded(a0, a1, order, nf): r""" Expanded |MSbar| |RGE| kernel @@ -81,15 +86,18 @@ def msbar_ker_expanded(a0, a1, order, nf): Returns ------- - ker: float + ker_expanded: float Expanded |MSbar| kernel: - ..math: + .. math:: k_{expanded} &= \left (\frac{a_s(\mu^2)}{a_s(\mu_{h,0}^2)} \right )^{c_0} \frac{j_{exp}(a_s(\mu^2))}{j_{exp}(a_s(\mu_{h,0}^2))} \\ - j_{exp}(a_s) &= 1 + a_s \left [ c_1 - b_1 c_0 \right ] - + \frac{a_s^2}{2} - \left [c_2 - c_1 b_1 - b_2 c_0 + b_1^2 c_0 + (c_1 - b_1 c_0)^2 right] + j_{exp}(a_s) &= 1 + a_s \left [ c_1 - b_1 c_0 \right ] \\ + & + \frac{a_s^2}{2} + \left [c_2 - c_1 b_1 - b_2 c_0 + b_1^2 c_0 + (c_1 - b_1 c_0)^2 \right ] \\ + & + \frac{a_s^3}{6} [ -2 b_3 c_0 - b_1^3 c_0 (1 + c_0) (2 + c_0) - 2 b_2 c_1 \\ + & - 3 b_2 c_0 c_1 + b_1^2 (2 + 3 c_0 (2 + c_0)) c_1 + c_1^3 + 3 c_1 c_2 \\ + & + b_1 (b_2 c_0 (4 + 3 c_0) - 3 (1 + c_0) c_1^2 - (2 + 3 c_0) c_2) + 2 c_3 ] """ b0 = beta(0, nf) c0 = gamma(0, nf) / b0 @@ -99,19 +107,40 @@ def msbar_ker_expanded(a0, a1, order, nf): if order >= 1: b1 = b(1, nf) c1 = gamma(1, nf) / b0 - r = c1 - b1 * c0 - num += a1 * r - den += a0 * r + u = c1 - b1 * c0 + num += a1 * u + den += a0 * u if order >= 2: b2 = b(2, nf) c2 = gamma(2, nf) / b0 - r = (c2 - c1 * b1 - b2 * c0 + b1**2 * c0 + (c1 - b1 * c0) ** 2) / 2.0 - num += a1**2 * r - den += a0**2 * r + u = (c2 - c1 * b1 - b2 * c0 + b1**2 * c0 + (c1 - b1 * c0) ** 2) / 2.0 + num += a1**2 * u + den += a0**2 * u + if order >= 3: + b3 = b(3, nf) + c3 = gamma(3, nf) / b0 + u = ( + 1 + / 6 + * ( + -2 * b3 * c0 + - b1**3 * c0 * (1 + c0) * (2 + c0) + - 2 * b2 * c1 + - 3 * b2 * c0 * c1 + + b1**2 * (2 + 3 * c0 * (2 + c0)) * c1 + + c1**3 + + 3 * c1 * c2 + + b1 + * (b2 * c0 * (4 + 3 * c0) - 3 * (1 + c0) * c1**2 - (2 + 3 * c0) * c2) + + 2 * c3 + ) + ) + num += a1**3 * u + den += a0**3 * u return ev_mass * num / den -def msbar_ker_dispatcher(q2_to, q2m_ref, strong_coupling, fact_to_ren, nf): +def ker_dispatcher(q2_to, q2m_ref, strong_coupling, fact_to_ren, nf): r""" Select the |MSbar| kernel and compute the strong coupling values @@ -139,23 +168,70 @@ def msbar_ker_dispatcher(q2_to, q2m_ref, strong_coupling, fact_to_ren, nf): method = strong_coupling.method order = strong_coupling.order if method == "expanded": - return msbar_ker_expanded(a0, a1, order, nf) - return msbar_ker_exact(a0, a1, order, nf) + return ker_expanded(a0, a1, order, nf) + return ker_exact(a0, a1, order, nf) + + +def compute_matching_coeffs_up(nf): + """ + |MSbar| matching coefficients :cite:`Liu:2015fxa` at threshold + when moving to a regime with *more* flavors. + + Note :cite:`Liu:2015fxa` (eq 5.) reports the backward relation, + multiplied by a factor of 4 (and 4^2 ...) + + Parameters + ---------- + nf: + number of active flavors in the lower patch + Returns + ------- + matching_coeffs_up: + downward matching coefficient matrix + """ + matching_coeffs_up = np.zeros((4, 4)) + matching_coeffs_up[2, 0] = -89.0 / 27.0 + matching_coeffs_up[2, 1] = 20.0 / 9.0 + matching_coeffs_up[2, 2] = -4.0 / 3.0 + + matching_coeffs_up[3, 0] = -118.248 - 1.58257 * nf + matching_coeffs_up[3, 1] = 71.7887 + 7.85185 * nf + matching_coeffs_up[3, 2] = -700.0 / 27.0 + matching_coeffs_up[3, 3] = -232.0 / 27.0 + 16.0 / 27.0 * nf + + return matching_coeffs_up -def evolve_msbar_mass( + +def compute_matching_coeffs_down(nf): + """ + |MSbar| matching coefficients :cite:`Liu:2015fxa` (eq 5) at threshold + when moving to a regime with *less* flavors. + + Parameters + ---------- + nf: + number of active flavors in the lower patch + + Returns + ------- + matching_coeffs_down: + downward matching coefficient matrix + """ + c_up = compute_matching_coeffs_up(nf) + return invert_matching_coeffs(c_up) + + +def solve( m2_ref, q2m_ref, strong_coupling, - nf_ref=None, - fact_to_ren=1.0, - q2_to=None, + nf_ref, + fact_to_ren, ): r""" - Compute the |MSbar| mass. - If the final scale is not given it solves the equation :math:`m_{\overline{MS}}(m) = m` + Compute the |MSbar| mass solving the equation :math:`m_{\overline{MS}}(m) = m` for a fixed number of nf - Else perform the mass evolution. Parameters ---------- @@ -163,13 +239,55 @@ def evolve_msbar_mass( squared initial mass reference q2m_ref: float squared initial scale - nf_ref: int, optional (not used when q2_to is given) + strong_coupling: eko.strong_coupling.StrongCoupling + Instance of :class:`~eko.strong_coupling.StrongCoupling` able to generate a_s for + any q + nf_ref: int number of active flavours at the scale q2m_ref, where the solution is searched fact_to_ren: float - :math:`\mu_F^2/\muR^2` + :math:`\mu_F^2/\mu_R^2` + + Returns + ------- + m2 : float + :math:`m_{\overline{MS}}(\mu_2)^2` + """ + + def rge(m2, q2m_ref, strong_coupling, fact_to_ren, nf_ref): + return ( + m2_ref + * ker_dispatcher(m2, q2m_ref, strong_coupling, fact_to_ren, nf_ref) ** 2 + - m2 + ) + + msbar_mass = optimize.fsolve( + rge, q2m_ref, args=(q2m_ref, strong_coupling, fact_to_ren, nf_ref) + ) + return float(msbar_mass) + + +def evolve( + m2_ref, + q2m_ref, + strong_coupling, + fact_to_ren, + q2_to, +): + r""" + Perform the |MSbar| mass evolution up to given scale. + It allows for different number of active flavors. + + Parameters + ---------- + m2_ref: float + squared initial mass reference + q2m_ref: float + squared initial scale strong_coupling: eko.strong_coupling.StrongCoupling Instance of :class:`~eko.strong_coupling.StrongCoupling` able to generate a_s for any q + fact_to_ren: float + :math:`\mu_F^2/\mu_R^2` q2_to: float, optional scale at which the mass is computed @@ -178,58 +296,54 @@ def evolve_msbar_mass( m2 : float :math:`m_{\overline{MS}}(\mu_2)^2` """ - - # TODO: split function: 1 function, 1 job - # TODO: this should resolve the argument priority - if q2_to is None: - - def rge(m2, q2m_ref, strong_coupling, fact_to_ren, nf_ref): - return ( - m2_ref - * msbar_ker_dispatcher( - m2, q2m_ref, strong_coupling, fact_to_ren, nf_ref - ) - ** 2 - - m2 - ) - - msbar_mass = optimize.fsolve( - rge, q2m_ref, args=(q2m_ref, strong_coupling, fact_to_ren, nf_ref) + # evolution might involve different number of active flavors. + # Find out the evolution path (always sorted) + q2_low, q2_high = sorted([q2m_ref, q2_to]) + + area_walls = np.array(strong_coupling.thresholds.area_walls) + path = np.concatenate( + ( + [q2_low], + area_walls[(q2_low < area_walls) & (area_walls < q2_high)], + [q2_high], ) - return float(msbar_mass) - else: - - # evolution might involve different number of active flavors. - # Find out the evolution path (always sorted) - q2_low, q2_high = sorted([q2m_ref, q2_to]) - area_walls = np.array(strong_coupling.thresholds.area_walls) - path = np.concatenate( - ( - [q2_low], - area_walls[(q2_low < area_walls) & (area_walls < q2_high)], - [q2_high], - ) + ) + nf_init = len(area_walls[q2_low >= area_walls]) + 2 + nf_final = len(area_walls[q2_high > area_walls]) + 2 + + ev_mass = 1.0 + is_downward_path = bool(q2m_ref > q2_to) + for i, nf in enumerate(np.arange(nf_init, nf_final + 1)): + q2_init = path[i] + q2_final = path[i + 1] + # if you are going backward + # need to reverse the evolution in each path segment + if is_downward_path: + m_coeffs = compute_matching_coeffs_down(nf - 1) + q2_init, q2_final = q2_final, q2_init + shift = 4 + else: + m_coeffs = compute_matching_coeffs_up(nf) + shift = 3 + fact = 1.0 + # shift + for pto in range(1, strong_coupling.order + 1): + for l in range(pto + 1): + as_thr = strong_coupling.a_s(q2_final / fact_to_ren, q2_final) + # TODO: do we need to add np.log(fac_to_ren) here ??? + L = np.log(strong_coupling.thresholds.thresholds_ratios[pto - shift]) + fact += as_thr**pto * L**l * m_coeffs[pto, l] + ev_mass *= ( + fact + * ker_dispatcher(q2_final, q2_init, strong_coupling, fact_to_ren, nf) ** 2 ) - nf_init = len(area_walls[q2_low >= area_walls]) + 2 - nf_final = len(area_walls[q2_high > area_walls]) + 2 - - ev_mass = 1.0 - for i, n in enumerate(np.arange(nf_init, nf_final + 1)): - q2_init = path[i] - q2_final = path[i + 1] - # if you are going backward - # need to reverse the evolution in each path segment - if q2m_ref > q2_to: - q2_init, q2_final = q2_final, q2_init - ev_mass *= msbar_ker_dispatcher( - q2_final, q2_init, strong_coupling, fact_to_ren, n - ) - return m2_ref * ev_mass**2 + return m2_ref * ev_mass -def compute_msbar_mass(theory_card): +def compute(theory_card): r""" Compute the |MSbar| masses solving the equation :math:`m_{\bar{MS}}(\mu) = \mu` + for each heavy quark and consistent boundary contitions. Parameters ---------- @@ -248,18 +362,8 @@ def compute_msbar_mass(theory_card): masses = np.concatenate((np.zeros(nfa_ref - 3), np.full(6 - nfa_ref, np.inf))) fact_to_ren = theory_card["fact_to_ren_scale_ratio"] ** 2 - # TODO: why is this different than calling - # `StrongCoupling.from_dict(theory_card, masses=thr_masses)` def sc(thr_masses): - return StrongCoupling( - theory_card["alphas"], - q2_ref, - thr_masses, - thresholds_ratios=[1, 1, 1], - order=theory_card["PTO"], - method=strong_coupling_mod_ev(theory_card["ModEv"]), - nf_ref=nfa_ref, - ) + return StrongCoupling.from_dict(theory_card, masses=thr_masses) # First you need to look for the thr around the given as_ref heavy_quarks = quark_names[3:] @@ -319,39 +423,25 @@ def sc(thr_masses): # len(masses[q2m_ref > masses]) + 3 is the nf at the given reference scale if nf_target != len(masses[q2m_ref > masses]) + 3: q2_to = masses[q_idx + shift] - m2_ref = evolve_msbar_mass( + m2_ref = evolve( m2_ref, q2m_ref, - strong_coupling=sc(masses), - fact_to_ren=fact_to_ren, - q2_to=q2_to, + sc(masses), + fact_to_ren, + q2_to, ) q2m_ref = q2_to # now solve the RGE - masses[q_idx] = evolve_msbar_mass( + masses[q_idx] = solve( m2_ref, q2m_ref, - strong_coupling=sc(masses), - nf_ref=nf_target, - fact_to_ren=fact_to_ren, + sc(masses), + nf_target, + fact_to_ren, ) # Check the msbar ordering - for m2_msbar, hq in zip(masses[:-1], quark_names[4:]): - q2m_ref = np.power(theory_card[f"Qm{hq}"], 2) - m2_ref = np.power(theory_card[f"m{hq}"], 2) - # check that m_msbar_hq < msbar_hq+1 (m_msbar_hq) - m2_test = evolve_msbar_mass( - m2_ref, - q2m_ref, - strong_coupling=sc(masses), - fact_to_ren=fact_to_ren, - q2_to=m2_msbar, - ) - if m2_msbar > m2_test: - raise ValueError( - "The MSBAR masses do not preserve the correct ordering,\ - check the initial reference values" - ) + if not (masses == np.sort(masses)).all(): + raise ValueError("Msbar masses are not to be sorted") return masses diff --git a/src/eko/runner.py b/src/eko/runner.py index 2ee4ed554..1e4f59601 100644 --- a/src/eko/runner.py +++ b/src/eko/runner.py @@ -8,9 +8,8 @@ import numpy as np from . import basis_rotation as br -from . import interpolation +from . import interpolation, msbar_masses from .evolution_operator.grid import OperatorGrid -from .msbar_masses import compute_msbar_mass from .output import Output from .strong_coupling import StrongCoupling from .thresholds import ThresholdsAtlas @@ -49,7 +48,7 @@ def __init__(self, theory_card, operators_card): # setup the Threshold path, compute masses if necessary masses = None if theory_card["HQ"] == "MSBAR": - masses = compute_msbar_mass(theory_card) + masses = msbar_masses.compute(theory_card) tc = ThresholdsAtlas.from_dict(theory_card, masses=masses) self.out["q2_ref"] = float(tc.q2_ref) diff --git a/src/eko/strong_coupling.py b/src/eko/strong_coupling.py index 4676f50d5..3e977da7a 100644 --- a/src/eko/strong_coupling.py +++ b/src/eko/strong_coupling.py @@ -13,6 +13,7 @@ import scipy from . import constants, thresholds +from .beta import b as beta_b from .beta import beta logger = logging.getLogger(__name__) @@ -67,20 +68,54 @@ def as_expanded(order, as_ref, nf, scale_from, scale_to): res = as_LO # NLO if order >= 1: - beta1 = beta(1, nf) - b1 = beta1 / beta0 + b1 = beta_b(1, nf) as_NLO = as_LO * (1 - b1 * as_LO * np.log(den)) res = as_NLO # NNLO - if order == 2: - beta2 = beta(2, nf) - b2 = beta2 / beta0 + if order >= 2: + b2 = beta_b(2, nf) res = as_LO * ( 1.0 + as_LO * (as_LO - as_ref) * (b2 - b1**2) + as_NLO * b1 * np.log(as_NLO / as_ref) ) - + # N3LO expansion is taken from Luca Rottoli + if order >= 3: + b3 = beta_b(3, nf) + log_fact = np.log(as_LO) + res += ( + as_LO**4 + / (2 * beta0**3) + * ( + -2 * b1**3 * np.log(as_ref) ** 3 + + 5 * b1**3 * log_fact**2 + + 2 * b1**3 * log_fact**3 + + b1**3 * np.log(as_ref) ** 2 * (5 + 6 * log_fact) + + 2 + * beta0 + * b1 + * log_fact + * (b2 + 2 * (b1**2 - beta0 * b2) * lmu * as_ref) + - beta0**2 + * lmu + * as_ref + * ( + -2 * b1 * b2 + + 2 * beta0 * b3 + + (b1**3 - 2 * beta0 * b1 * b2 + beta0**2 * b3) + * lmu + * as_ref + ) + - 2 + * b1 + * np.log(as_ref) + * ( + 5 * b1**2 * log_fact + + 3 * b1**2 * log_fact**2 + + beta0 * (b2 + 2 * (b1**2 - beta0 * b2) * lmu * as_ref) + ) + ) + ) return res @@ -146,8 +181,8 @@ def __init__( raise ValueError(f"alpha_s_ref has to be positive - got {alpha_s_ref}") if scale_ref <= 0: raise ValueError(f"scale_ref has to be positive - got {scale_ref}") - if order not in [0, 1, 2]: - raise NotImplementedError("a_s beyond NNLO is not implemented") + if order not in [0, 1, 2, 3]: + raise NotImplementedError("a_s beyond N3LO is not implemented") self.order = order if method not in ["expanded", "exact"]: raise ValueError(f"Unknown method {method}") @@ -260,14 +295,13 @@ def compute_exact(self, as_ref, nf, scale_from, scale_to): b_vec = [1] # NLO if self.order >= 1: - beta1 = beta(1, nf) - b1 = beta1 / beta0 - b_vec.append(b1) + b_vec.append(beta_b(1, nf)) # NNLO if self.order >= 2: - beta2 = beta(2, nf) - b2 = beta2 / beta0 - b_vec.append(b2) + b_vec.append(beta_b(2, nf)) + # N3LO + if self.order >= 3: + b_vec.append(beta_b(3, nf)) # integration kernel def rge(_t, a, b_vec): return -(a**2) * np.sum([a**k * b for k, b in enumerate(b_vec)]) @@ -359,9 +393,9 @@ def a_s( self.thresholds.thresholds_ratios[seg.nf - shift] ) m_coeffs = ( - compute_matching_coeffs_down(self.hqm_scheme) + compute_matching_coeffs_down(self.hqm_scheme, seg.nf - 1) if is_downward_path - else compute_matching_coeffs_up(self.hqm_scheme) + else compute_matching_coeffs_up(self.hqm_scheme, seg.nf) ) fact = 1.0 # shift @@ -374,10 +408,15 @@ def a_s( return final_as -def compute_matching_coeffs_up(mass_scheme): +def compute_matching_coeffs_up(mass_scheme, nf): r""" - Matching coefficients :cite:`Schroder:2005hy,Chetyrkin:2005ia,Vogt:2004ns` at threshold - when moving to a regime with *more* flavors. + Matching coefficients :cite:`Schroder:2005hy,Chetyrkin:2005ia,Vogt:2004ns` + at threshold when moving to a regime with *more* flavors. + + We follow notation of :cite:`Vogt:2004ns` (eq 2.43) for POLE masses + + The *inverse* |MSbar| values instead are given in :cite:`Schroder:2005hy` (eq 3.1) + multiplied by a factor of 4 (and 4^2 ...) .. math:: a_s^{(n_l+1)} = a_s^{(n_l)} + \sum\limits_{n=1} (a_s^{(n_l)})^n @@ -387,31 +426,63 @@ def compute_matching_coeffs_up(mass_scheme): ---------- mass_scheme: Heavy quark mass scheme: "POLE" or "MSBAR" + nf: + number of active flavors in the lower patch Returns ------- matching_coeffs_down: forward matching coefficient matrix """ - matching_coeffs_up = np.zeros((3, 3)) + matching_coeffs_up = np.zeros((4, 4)) if mass_scheme == "MSBAR": - matching_coeffs_up[2, 0] = -22.0 / 3.0 + matching_coeffs_up[2, 0] = -22.0 / 9.0 matching_coeffs_up[2, 1] = 22.0 / 3.0 + + # c30 = -d30 + matching_coeffs_up[3, 0] = -62.2116 + 5.4177 * nf + # c31 = -d31 + 5 c11 * c20 + matching_coeffs_up[3, 1] = 365.0 / 3.0 - 67.0 / 9.0 * nf + matching_coeffs_up[3, 2] = 109.0 / 3.0 + 16.0 / 9.0 * nf + elif mass_scheme == "POLE": matching_coeffs_up[2, 0] = 14.0 / 3.0 matching_coeffs_up[2, 1] = 38.0 / 3.0 + matching_coeffs_up[3, 0] = 340.729 - 16.7981 * nf + matching_coeffs_up[3, 1] = 8941.0 / 27.0 - 409.0 / 27.0 * nf + matching_coeffs_up[3, 2] = 511.0 / 9.0 matching_coeffs_up[1, 1] = 4.0 / 3.0 * constants.TR matching_coeffs_up[2, 2] = 4.0 / 9.0 + matching_coeffs_up[3, 3] = 8.0 / 27.0 + return matching_coeffs_up # inversion of the matching coefficients -def compute_matching_coeffs_down(mass_scheme): +def compute_matching_coeffs_down(mass_scheme, nf): """ - Matching coefficients :cite:`Schroder:2005hy` :cite:`Chetyrkin:2005ia` at threshold + Matching coefficients :cite:`Schroder:2005hy,Chetyrkin:2005ia` at threshold when moving to a regime with *less* flavors. + Parameters + ---------- + mass_scheme: + Heavy quark mass scheme: "POLE" or "MSBAR" + nf: + number of active flavors in the lower patch + + Returns + ------- + matching_coeffs_down: + downward matching coefficient matrix + """ + c_up = compute_matching_coeffs_up(mass_scheme, nf) + return invert_matching_coeffs(c_up) + + +def invert_matching_coeffs(c_up): + """ This is the perturbative inverse of :data:`matching_coeffs_up` and has been obtained via .. code-block:: Mathematica @@ -430,18 +501,23 @@ def compute_matching_coeffs_down(mass_scheme): Parameters ---------- - mass_scheme: - Heavy quark mass scheme: "POLE" or "MSBAR" + c_up : numpy.ndarray + forward matching coefficient matrix Returns ------- matching_coeffs_down: downward matching coefficient matrix """ - _c = compute_matching_coeffs_up(mass_scheme) - matching_coeffs_down = np.zeros_like(_c) - matching_coeffs_down[1, 1] = -_c[1, 1] - matching_coeffs_down[2, 0] = -_c[2, 0] - matching_coeffs_down[2, 1] = -_c[2, 1] - matching_coeffs_down[2, 2] = 2.0 * _c[1, 1] ** 2 - _c[2, 2] + matching_coeffs_down = np.zeros_like(c_up) + matching_coeffs_down[1, 1] = -c_up[1, 1] + matching_coeffs_down[2, 0] = -c_up[2, 0] + matching_coeffs_down[2, 1] = -c_up[2, 1] + matching_coeffs_down[2, 2] = 2.0 * c_up[1, 1] ** 2 - c_up[2, 2] + matching_coeffs_down[3, 0] = -c_up[3, 0] + matching_coeffs_down[3, 1] = 5 * c_up[1, 1] * c_up[2, 0] - c_up[3, 1] + matching_coeffs_down[3, 2] = 5 * c_up[1, 1] * c_up[2, 1] - c_up[3, 2] + matching_coeffs_down[3, 3] = ( + -5 * c_up[1, 1] ** 3 + 5 * c_up[1, 1] * c_up[2, 2] - c_up[3, 3] + ) return matching_coeffs_down diff --git a/src/ekomark/benchmark/external/LHA_utils.py b/src/ekomark/benchmark/external/LHA_utils.py index 89589b831..d91624180 100644 --- a/src/ekomark/benchmark/external/LHA_utils.py +++ b/src/ekomark/benchmark/external/LHA_utils.py @@ -4,9 +4,9 @@ """ import pathlib -import matplotlib.pyplot as plt import numpy as np import yaml +from matplotlib import pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from eko import basis_rotation as br diff --git a/src/ekomark/benchmark/external/apfel_utils.py b/src/ekomark/benchmark/external/apfel_utils.py index 39d58cf9a..dbc94a9de 100644 --- a/src/ekomark/benchmark/external/apfel_utils.py +++ b/src/ekomark/benchmark/external/apfel_utils.py @@ -72,14 +72,14 @@ def compute_apfel_data( # init evolution apfel.InitializeAPFEL() - print("Loading APFEL took %f s" % (time.perf_counter() - apf_start)) + print(f"Loading APFEL took {(time.perf_counter() - apf_start)} s") # Run apf_tabs = {} for q2 in operators["Q2grid"]: apfel.EvolveAPFEL(theory["Q0"], np.sqrt(q2)) - print("Executing APFEL took %f s" % (time.perf_counter() - apf_start)) + print(f"Executing APFEL took {(time.perf_counter() - apf_start)} s") tab = {} for pid in br.flavor_basis_pids: diff --git a/tests/benchmark_msbar_evolution.py b/tests/benchmark_msbar_evolution.py index c42ee6bc0..63f16eb63 100644 --- a/tests/benchmark_msbar_evolution.py +++ b/tests/benchmark_msbar_evolution.py @@ -2,7 +2,7 @@ """This module benchmarks MSbar mass evolution against APFEL.""" import numpy as np -from eko.msbar_masses import evolve_msbar_mass +from eko import msbar_masses from eko.strong_coupling import StrongCoupling # try to load APFEL - if not available, we'll use the cached values @@ -13,6 +13,28 @@ except ImportError: use_APFEL = False +theory_dict = { + "alphas": 0.1180, + "Qref": 91, + "nfref": 5, + "MaxNfPdf": 6, + "MaxNfAs": 6, + "Q0": 1, + "fact_to_ren_scale_ratio": 1.0, + "mc": 1.5, + "mb": 4.1, + "mt": 175.0, + "kcThr": 1.0, + "kbThr": 1.0, + "ktThr": 1.0, + "HQ": "MSBAR", + "Qmc": 18, + "Qmb": 20, + "Qmt": 175.0, + "PTO": 2, + "ModEv": "EXA", +} + class BenchmarkMSbar: def benchmark_APFEL_msbar_evolution(self): @@ -23,79 +45,201 @@ def benchmark_APFEL_msbar_evolution(self): Q2m = np.power([2.0, 4.5, 175], 2) m2 = np.power((1.4, 4.5, 175), 2) apfel_vals_dict = { - 0: np.array( - [ - [1.6046128320144937, 5.82462147889985, 319.1659462836651], - [0.9184216270407894, 3.3338000474743867, 182.67890037277968], - [0.8892735505271812, 3.2279947658871553, 176.88119438599423], - ] - ), - 1: np.array( - [ - [1.7606365126844892, 6.707425163030458, 392.9890591164956], - [0.8227578662769597, 3.134427109507681, 183.6465604399247], - [0.7934449726444709, 3.0227549733595973, 177.10367302092334], - ] - ), - 2: np.array( - [ - [1.8315619347807859, 7.058348563796064, 417.06081590596904], - [0.8078493428925942, 3.113242546931765, 183.76436225206226], - [0.7788276351007536, 3.0014003869088755, 177.16269119698978], - ] - ), + "exact": { + 0: np.array( + [ + [1.6046128320144937, 5.82462147889985, 319.1659462836651], + [0.9184216270407894, 3.3338000474743867, 182.67890037277968], + [0.8892735505271812, 3.2279947658871553, 176.88119438599423], + ] + ), + 1: np.array( + [ + [1.7606365126844892, 6.707425163030458, 392.9890591164956], + [0.8227578662769597, 3.134427109507681, 183.6465604399247], + [0.7934449726444709, 3.0227549733595973, 177.10367302092334], + ] + ), + 2: np.array( + [ + [1.8315619347807859, 7.058348563796064, 416.630860855048], + [0.8078493428925942, 3.113242546931765, 183.76436225206226], + [0.7788276351007536, 3.0014003869088755, 177.16269119698978], + ] + ), + }, + "expanded": { + 0: np.array( + [ + [1.6046128320144941, 5.824621478899853, 319.1659462836651], + [0.9184216270407891, 3.3338000474743863, 182.67890037277968], + [0.8892735505271808, 3.227994765887154, 176.88119438599423], + ] + ), + 1: np.array( + [ + [1.7533055503995305, 6.672122790503439, 390.2255025944903], + [0.8251533585949282, 3.1400827586994855, 183.65075271254497], + [0.7957427000040153, 3.028161864236207, 177.104951823859], + ] + ), + 2: np.array( + [ + [1.8268480938423455, 7.037891257825139, 415.2638988980831], + [0.8084237419306857, 3.1144420987483485, 183.76461377981423], + [0.7793806824526442, 3.002554084550691, 177.16277079680097], + ] + ), + }, } # collect my values - for order in [0, 1, 2]: - as_VFNS = StrongCoupling( - alphas_ref, - scale_ref, - m2, - thresholds_ratios, - order=order, - method="exact", - hqm_scheme="MSBAR", - ) - my_vals = [] - for Q2 in Q2s: - my_masses = [] - for n in [3, 4, 5]: - my_masses.append( - evolve_msbar_mass( - m2[n - 3], - Q2m[n - 3], - strong_coupling=as_VFNS, - fact_to_ren=1.0, - q2_to=Q2, + for method in ["exact", "expanded"]: + for order in [0, 1, 2]: + as_VFNS = StrongCoupling( + alphas_ref, + scale_ref, + m2, + thresholds_ratios, + order=order, + method=method, + hqm_scheme="MSBAR", + ) + my_vals = [] + for Q2 in Q2s: + my_masses = [] + for n in [3, 4, 5]: + my_masses.append( + msbar_masses.evolve( + m2[n - 3], + Q2m[n - 3], + strong_coupling=as_VFNS, + fact_to_ren=1.0, + q2_to=Q2, + ) ) + my_vals.append(my_masses) + # get APFEL numbers - if available else use cache + apfel_vals = apfel_vals_dict[method][order] + if use_APFEL: + # run apfel + apfel.CleanUp() + apfel.SetTheory("QCD") + apfel.SetPerturbativeOrder(order) + apfel.SetAlphaEvolution(method) + apfel.SetAlphaQCDRef(alphas_ref, np.sqrt(scale_ref)) + apfel.SetVFNS() + apfel.SetMSbarMasses(*np.sqrt(m2)) + apfel.SetMassScaleReference(*np.sqrt(Q2m)) + apfel.SetRenFacRatio(1.0) + apfel.InitializeAPFEL() + # collect apfel masses + apfel_vals_cur = [] + for Q2 in Q2s: + masses = [] + for n in [4, 5, 6]: + masses.append(apfel.HeavyQuarkMass(n, np.sqrt(Q2))) + apfel_vals_cur.append(masses) + print(apfel_vals_cur) + np.testing.assert_allclose( + apfel_vals, np.array(apfel_vals_cur), err_msg=f"order={order}" ) - my_vals.append(my_masses) + # check myself to APFEL + np.testing.assert_allclose( + apfel_vals, np.sqrt(np.array(my_vals)), rtol=2e-3 + ) + + def benchmark_APFEL_msbar_solution(self): + apfel_vals_dict = { + "EXA": { + 0: np.array( + [1.9855, 4.8062, 175.0000], + ), + 1: np.array( + [2.1308, 4.9656, 175.0000], + ), + 2: np.array([2.1566, 4.9841, 175.0000]), + }, + } + # collect my values + for order in [0, 1, 2]: + theory_dict["PTO"] = order + my_masses = msbar_masses.compute(theory_dict) # get APFEL numbers - if available else use cache - apfel_vals = apfel_vals_dict[order] + apfel_vals = apfel_vals_dict[theory_dict["ModEv"]][order] if use_APFEL: # run apfel apfel.CleanUp() apfel.SetTheory("QCD") apfel.SetPerturbativeOrder(order) apfel.SetAlphaEvolution("exact") - apfel.SetAlphaQCDRef(alphas_ref, np.sqrt(scale_ref)) + apfel.SetAlphaQCDRef(theory_dict["alphas"], theory_dict["Qref"]) apfel.SetVFNS() - apfel.SetMSbarMasses(*np.sqrt(m2)) - apfel.SetMassScaleReference(*np.sqrt(Q2m)) - apfel.SetRenFacRatio(1.0) - apfel.InitializeAPFEL() - # collect apfel masses - apfel_vals_cur = [] - for Q2 in Q2s: - masses = [] - for n in [4, 5, 6]: - masses.append(apfel.HeavyQuarkMass(n, np.sqrt(Q2))) - apfel_vals_cur.append(masses) - print(apfel_vals_cur) - np.testing.assert_allclose( - apfel_vals, np.array(apfel_vals_cur), err_msg=f"order={order}" + apfel.SetMSbarMasses( + theory_dict["mc"], theory_dict["mb"], theory_dict["mt"] ) + apfel.SetMassScaleReference( + theory_dict["Qmc"], theory_dict["Qmb"], theory_dict["Qmt"] + ) + apfel.SetRenFacRatio(theory_dict["fact_to_ren_scale_ratio"]) + apfel.InitializeAPFEL() + apfel.EnableWelcomeMessage(1) # check myself to APFEL np.testing.assert_allclose( - apfel_vals, np.sqrt(np.array(my_vals)), rtol=2e-3 + apfel_vals, np.sqrt(np.array(my_masses)), rtol=5e-5 ) + + def benchmark_msbar_solution_kthr(self): + """ + With this test you can see that in EKO + the solution value of mb is not affected by "kbThr", + since mb is searched with an Nf=5 larger range. + While in Apfel this doesn't happen. + """ + theory_dict.update( + { + "mc": 1.5, + "mb": 4.1, + "mt": 175.0, + "kcThr": 1.2, + "kbThr": 1.8, + "ktThr": 1.0, + "Qmc": 18, + "Qmb": 20, + "Qmt": 175.0, + "PTO": 0, + } + ) + my_masses_thr = msbar_masses.compute(theory_dict) + apfel_masses_thr = [1.9891, 4.5102, 175.0000] + theory_dict.update( + { + "kcThr": 1.0, + "kbThr": 1.0, + } + ) + my_masses_plain = msbar_masses.compute(theory_dict) + apfel_masses_plain = ([1.9855, 4.8062, 175.0000],) + + # Eko bottom mass is the same + np.testing.assert_allclose(my_masses_thr[1], my_masses_plain[1]) + # Eko charm mass is not the same + np.testing.assert_raises( + AssertionError, + np.testing.assert_allclose, + my_masses_thr[0], + my_masses_plain[0], + ) + + # Apfel bottom masses are not the same + np.testing.assert_raises( + AssertionError, + np.testing.assert_allclose, + apfel_masses_thr[0], + apfel_masses_plain[0], + ) + np.testing.assert_raises( + AssertionError, + np.testing.assert_allclose, + apfel_masses_thr[0], + apfel_masses_plain[0], + ) diff --git a/tests/benchmark_strong_coupling.py b/tests/benchmark_strong_coupling.py index 523239990..a4caa9e7c 100644 --- a/tests/benchmark_strong_coupling.py +++ b/tests/benchmark_strong_coupling.py @@ -165,10 +165,18 @@ def benchmark_pegasus_ffns(self): 0.009245273177012448, ] ), + 3: np.array( + [ + 0.02224042559454458, + 0.014988806602725375, + 0.01140950716164355, + 0.009245168439298405, + ] + ), } # collect my values threshold_holder = thresholds.ThresholdsAtlas.ffns(nf) - for order in [0, 1, 2]: + for order in [0, 1, 2, 3]: as_FFNS = StrongCoupling( alphas_ref, scale_ref, @@ -200,11 +208,11 @@ def benchmark_pegasus_ffns(self): ) # print(pegasus_vals_cur) np.testing.assert_allclose( - pegasus_vals, np.array(pegasus_vals_cur), err_msg="order=%d" % order + pegasus_vals, np.array(pegasus_vals_cur), err_msg=f"order={order}" ) - # check myself to APFEL + # check myself to PEGASUS np.testing.assert_allclose( - pegasus_vals, np.array(my_vals), rtol=1.5e-7, err_msg="order=%d" % order + pegasus_vals, np.array(my_vals), rtol=1.5e-7, err_msg=f"order={order}" ) def benchmark_APFEL_vfns(self): @@ -474,18 +482,18 @@ def benchmark_APFEL_vfns_threshold(self): np.testing.assert_allclose(apfel_vals, np.array(my_vals)) def benchmark_APFEL_vfns_msbar(self): - Q2s = np.power([30, 96, 150], 2) + Q2s = np.power([3, 96, 150], 2) alphas_ref = 0.118 scale_ref = 91.0**2 thresholds_ratios = np.power((1.0, 1.0, 1.0), 2) - Q2m = np.power([2.0, 2.0, 175], 2) - m2 = np.power((1.4, 2.0, 175), 2) + Q2m = np.power([2.0, 4.0, 175], 2) + m2 = np.power((1.4, 4.0, 175), 2) apfel_vals_dict = { 1: np.array( - [0.011285356789817751, 0.009315017128518715, 0.00873290495922747] + [0.01985110421500437, 0.009315017128518715, 0.00873290495922747] ), 2: np.array( - [0.011292233300595585, 0.009314871933701218, 0.008731890252153528] + [0.02005213805142418, 0.009314871933701218, 0.008731890252153528] ), } # collect my values @@ -524,7 +532,7 @@ def benchmark_APFEL_vfns_msbar(self): print(apfel_vals_cur) np.testing.assert_allclose(apfel_vals, np.array(apfel_vals_cur)) # check myself to APFEL - np.testing.assert_allclose(apfel_vals, np.array(my_vals), rtol=2e-4) + np.testing.assert_allclose(apfel_vals, np.array(my_vals)) def _get_Lambda2_LO(self, as_ref, scale_ref, nf): """Transformation to Lambda_QCD""" @@ -675,8 +683,16 @@ def benchmark_lhapdf_exact(self): 0.009214183512196827, ] ), + 3: np.array( + [ + 0.025955428065113105, + 0.015862752802768762, + 0.011614793662449371, + 0.009214009540864635, + ] + ), } - for order in range(2 + 1): + for order in range(3 + 1): sc = StrongCoupling( alphas_ref, scale_ref, diff --git a/tests/test_beta.py b/tests/test_beta.py index 93f7d5116..b6c61c167 100644 --- a/tests/test_beta.py +++ b/tests/test_beta.py @@ -7,6 +7,7 @@ import pytest from eko import beta +from eko.anomalous_dimensions.harmonics import zeta3 def _flav_test(function): @@ -38,14 +39,24 @@ def test_beta_2(): np.testing.assert_approx_equal(beta.beta_2(5), 4**3 * 9769 / 3456) +def test_beta_3(): + """Test fourth beta function coefficient""" + _flav_test(beta.beta_3) + # from hep-ph/9706430 + np.testing.assert_allclose( + beta.beta_3(5), 4**4 * (11027.0 / 648.0 * zeta3 - 598391.0 / 373248.0) + ) + + def test_beta(): """beta-wrapper""" nf = 3 np.testing.assert_allclose(beta.beta(0, nf), beta.beta_0(nf)) np.testing.assert_allclose(beta.beta(1, nf), beta.beta_1(nf)) np.testing.assert_allclose(beta.beta(2, nf), beta.beta_2(nf)) + np.testing.assert_allclose(beta.beta(3, nf), beta.beta_3(nf)) with pytest.raises(ValueError): - beta.beta(3, 3) + beta.beta(4, 3) def test_b(): diff --git a/tests/test_msbar_masses.py b/tests/test_msbar_masses.py index 68d3fbc80..fbe530534 100644 --- a/tests/test_msbar_masses.py +++ b/tests/test_msbar_masses.py @@ -5,8 +5,8 @@ import numpy as np import pytest +from eko import msbar_masses from eko.evolution_operator.flavors import quark_names -from eko.msbar_masses import compute_msbar_mass, evolve_msbar_mass from eko.strong_coupling import StrongCoupling theory_dict = { @@ -36,19 +36,19 @@ class TestMsbarMasses: def test_compute_msbar_mass(self): # Test solution of msbar(m) = m for method in ["EXA", "EXP"]: - for order in [1, 2]: + for order in [1, 2, 3]: theory_dict.update({"ModEv": method, "PTO": order}) strong_coupling = StrongCoupling.from_dict(theory_dict) # compute the scale such msbar(m) = m - m2_computed = compute_msbar_mass(theory_dict) + m2_computed = msbar_masses.compute(theory_dict) m2_test = [] for nf in [3, 4, 5]: # compute msbar( m ) m2_ref = theory_dict[f"m{quark_names[nf]}"] ** 2 Q2m_ref = theory_dict[f"Qm{quark_names[nf]}"] ** 2 m2_test.append( - evolve_msbar_mass( + msbar_masses.evolve( m2_ref, Q2m_ref, strong_coupling=strong_coupling, @@ -56,7 +56,7 @@ def test_compute_msbar_mass(self): q2_to=m2_computed[nf - 3], ) ) - np.testing.assert_allclose(m2_computed, m2_test, rtol=5e-3) + np.testing.assert_allclose(m2_computed, m2_test, rtol=4e-3) def test_compute_msbar_mass_VFNS(self): # test the solution now with some initial contition @@ -64,7 +64,7 @@ def test_compute_msbar_mass_VFNS(self): theory_dict.update( { "ModEv": "TRN", - "PTO": 2, + "PTO": 3, "mc": 2.0, "mb": 4.0, "Qmc": 80.0, @@ -73,14 +73,14 @@ def test_compute_msbar_mass_VFNS(self): ) strong_coupling = StrongCoupling.from_dict(theory_dict) # compute the scale such msbar(m) = m - m2_computed = compute_msbar_mass(theory_dict) + m2_computed = msbar_masses.compute(theory_dict) m2_test = [] for nf in [3, 4, 5]: # compute msbar( m ) m2_ref = theory_dict[f"m{quark_names[nf]}"] ** 2 Q2m_ref = theory_dict[f"Qm{quark_names[nf]}"] ** 2 m2_test.append( - evolve_msbar_mass( + msbar_masses.evolve( m2_ref, Q2m_ref, strong_coupling=strong_coupling, @@ -88,22 +88,12 @@ def test_compute_msbar_mass_VFNS(self): q2_to=m2_computed[nf - 3], ) ) - np.testing.assert_allclose(m2_computed, m2_test, rtol=5e-3) + np.testing.assert_allclose(m2_computed, m2_test, rtol=3e-3) def test_errors(self): # test mass ordering - with pytest.raises(ValueError, match="do not preserve the correct ordering"): - theory_dict.update( - dict( - mc=1.0, - mb=1.00001, - Qmc=1.0, - Qmb=1.00001, - ) - ) - compute_msbar_mass(theory_dict) - with pytest.raises(ValueError, match="masses need to be sorted"): + with pytest.raises(ValueError, match="Msbar masses are not to be sorted"): theory_dict.update( dict( mc=1.1, @@ -112,26 +102,26 @@ def test_errors(self): Qmb=1.0, ) ) - compute_msbar_mass(theory_dict) + msbar_masses.compute(theory_dict) # test forward conditions on alphas_ref with pytest.raises(ValueError, match="should be lower than"): theory_dict.update(dict(Qmb=91.0001)) - compute_msbar_mass(theory_dict) + msbar_masses.compute(theory_dict) # test backward conditions on alphas_ref with pytest.raises(ValueError, match="should be greater than"): theory_dict.update(dict(Qmt=89.9999)) - compute_msbar_mass(theory_dict) + msbar_masses.compute(theory_dict) theory_dict.update(dict(Qmb=4.0, Qmt=175)) # test forward conditions on masses with pytest.raises(ValueError, match="should be lower than m"): theory_dict.update(dict(mt=174)) - compute_msbar_mass(theory_dict) + msbar_masses.compute(theory_dict) # test backward conditions on masses with pytest.raises(ValueError, match="should be greater than m"): theory_dict.update(dict(mb=4.1, mt=176)) - compute_msbar_mass(theory_dict) + msbar_masses.compute(theory_dict) diff --git a/tests/test_strong_coupling.py b/tests/test_strong_coupling.py index a52da8183..4b6e1b88c 100644 --- a/tests/test_strong_coupling.py +++ b/tests/test_strong_coupling.py @@ -85,7 +85,7 @@ def test_init(self): scale_ref, threshold_holder.area_walls[1:-1], (1.0, 1.0, 1.0), - 3, + 4, ) with pytest.raises(ValueError): StrongCoupling( @@ -127,7 +127,7 @@ def test_ref(self): alphas_ref = 0.118 scale_ref = 91.0**2 for thresh_setup in thresh_setups: - for order in [0, 1, 2]: + for order in [0, 1, 2, 3]: for method in ["exact", "expanded"]: # create sc = StrongCoupling( @@ -163,3 +163,35 @@ def test_exact_LO(self): np.testing.assert_allclose( sc_expanded.a_s(q2), sc_exact.a_s(q2), rtol=5e-4 ) + + def benchmark_expanded_n3lo(self): + """test N3LO - NNLO expansion with some reference value from Mathematica""" + Q2 = 100**2 + # use a big alpha_s to enlarge the difference + alphas_ref = 0.9 + scale_ref = 90**2 + m2c = 2 + m2b = 25 + m2t = 30625 + threshold_list = [m2c, m2b, m2t] + mathematica_val = -0.000169117 + # collect my values + as_NNLO = StrongCoupling( + alphas_ref, + scale_ref, + threshold_list, + (1.0, 1.0, 1.0), + order=2, + method="expanded", + ) + as_N3LO = StrongCoupling( + alphas_ref, + scale_ref, + threshold_list, + (1.0, 1.0, 1.0), + order=3, + method="expanded", + ) + np.testing.assert_allclose( + mathematica_val, as_N3LO.a_s(Q2) - as_NNLO.a_s(Q2), rtol=3e-6 + )