diff --git a/.gitignore b/.gitignore index 7f150f4..8762454 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,7 @@ # To ignore *.*~ **/*.*~ -git_push.sh \ No newline at end of file +git_push.sh +__pycache__ +outputs/ +tmp_lhapdf diff --git a/README b/README index e8ea136..55b1c5f 100644 --- a/README +++ b/README @@ -1,16 +1,20 @@ Readme (under construction) for fixpar_nnpdf code -Requires NNPDF code from https://docs.nnpdf.science via conda installation. Tested for: +Requires NNPDF code from https://docs.nnpdf.science either from source or conda -conda install nnpdf python=3.9 - -(I think python version may be needed for lhapdf to run though need to double check that) +``` + conda install nnpdf -c https://packages.nnpdf.science/conda -c conda-forge +``` To run simply execute: python fixpar_nnpdf.py --config=configs/**.yaml -for the corresponding .yaml file. Some explanation of the various flags is provided in the example .yaml files but clearly this needs better documentation + some specific example cases to be provided. +for the corresponding .yaml file. Some explanation of the various flags is provided in the example .yaml files but clearly this needs better documentation + some specific example cases to be provided. Adding to this, to run: + +- A fixed chi^2 output. Set fixpar to False. +- A fit. Set fixpar to True. The second column of 0/1's in the input file (in input/) specify whether a given parmeter is free (1) or fixed (0). If all are fixed this will be as if fixpar is set to True. The parameters for the best fit will be output to outputs/pars/$label.dat and the covariance matrix to outputs/cov/$label.dat. +- An eigenvector scan. Set readcov to True, with the 'cov input file' given by the output of a previous fit. Then specify if dynamic tolerance is used (dynamic_tol) and if not the size of the fixed tolerance (t2_err). The outputs are in the outputs/ folder: @@ -18,4 +22,4 @@ The outputs are in the outputs/ folder: /buffer : outputs from running code /cov : covariance matrix (used for error calculation) /evscans: outputs if eigenvector scan is done -/pars : the PDF parameters \ No newline at end of file +/pars : the PDF parameters diff --git a/configs/fit_example.yaml b/configs/fit_example.yaml new file mode 100644 index 0000000..5f278a0 --- /dev/null +++ b/configs/fit_example.yaml @@ -0,0 +1,68 @@ +# input files +inputs: + # PDF parameter inputs: + input file: inputdef_2parfree.dat + # input covariance matrix file (if used) + cov input file: newmin_final_remin_out.dat + # if true then read in covariance matrix and evaluate PDF errors + readcov: False + +# output files +outputs: + # label for output files + label: fitout_jcm + +basis pars: + # If true then two term gluon parameterisation uesd + g_second_term: True + # if true and g_second_term=False then includes 7th Chebyshev for gluon + g_cheb7: False + # If true then delta_S+ set to that of sea + asp_fix: True + # If true thedelta_d=delta_u for d_V and u_V (need to set delta_d fixed or will crash!) + dvd_eq_uvd: False + +pdf pars: + # use external LHAPDF grid as input + lhin: False + # if true then use lhapdf grids for theory evaluation + # (for PDF pd fits can use parameterisation directly instead, otherwise has to be true) + uselha: True + # if lhin=true, name of set input + PDFlabel_lhin: NNPDF40_nnlo_pch_as_01180 + +pdf closure: + # if true then scatters when doing direct PDF pd fit + pdfscat: False + # label of pseudodata used + pdlabel: 'NNPDF40pch_gl_l0' + # if true then do direct fit to PDF pseudodata + pdpdf: False + +pseudodata flags: + # if true then write pseudodata out to file + pdout: False + # if true then write pseudodata out to file + pdin: False + +fit pars: + # Only use t0 for chi2i and chi2o in levmar (i.e. no derivative at all) + t0_noderivin: True + # run with fixed input parameters (override input card flags) + fixpar: False + # NNPDF theory id (211 = NNLO pch) + theoryidi: 40001000 + # impose NNPDF positivity in fit + nnpdf_pos: False + # if true then impose 4.0 positivity constraint, if pos_const=True + pos_40: True + # if true impose weight to prefer delta_d > 0.25 + deld_const: False + # if true then generates a replica from baseline dataset with seed irep + pseud: False + # irep number - also used when generating error grids + irep: 0 + # NMC PD data - set covariance matrix to be diagonal + nmcpd_diag: False + # dataset flag - global, HERAonly, noHERA, noLHC + dset_type: 'global_new' \ No newline at end of file diff --git a/configs/fit_mshtfit_publication_longmin.yaml b/configs/fit_mshtfit_publication_longmin.yaml new file mode 100644 index 0000000..47cb4ec --- /dev/null +++ b/configs/fit_mshtfit_publication_longmin.yaml @@ -0,0 +1,72 @@ +# input files +inputs: + # PDF parameter inputs: + input file: input_mshtfit_publication_longmin.dat + # input covariance matrix file (if used) + cov input file: newmin_final_remin_out.dat + # if true then read in covariance matrix and evaluate PDF errors + readcov: False + +# output files +outputs: + # label for output files + label: mshtfit_publication_longmin_out + +basis pars: + # If true then two term gluon parameterisation uesd + g_second_term: True + # if true and g_second_term=False then includes 7th Chebyshev for gluon + g_cheb7: False + # If true then delta_S+ set to that of sea + asp_fix: True + # If true thedelta_d=delta_u for d_V and u_V (need to set delta_d fixed or will crash!) + dvd_eq_uvd: False + +pdf pars: + # use external LHAPDF grid as input + lhin: False + # if true then use lhapdf grids for theory evaluation + # (for PDF pd fits can use parameterisation directly instead, otherwise has to be true) + uselha: True + # if lhin=true, name of set input + PDFlabel_lhin: NNPDF40_nnlo_pch_as_01180 + +pdf closure: + # if true then scatters when doing direct PDF pd fit + pdfscat: False + # label of pseudodata used + pdlabel: 'NNPDF40pch_gl_l0' + # if true then do direct fit to PDF pseudodata + pdpdf: False + +pseudodata flags: + # if true then write pseudodata out to file + pdout: False + # if true then write pseudodata out to file + pdin: False + +fit pars: + # Use dynamic tolerance in error calc + dynamic_tol: False + # T value for error calculation + t2_err: 1. + # Only use t0 for chi2i and chi2o in levmar (i.e. no derivative at all) + t0_noderivin: True + # run with fixed input parameters (override input card flags) + fixpar: False + # NNPDF theory id (211 = NNLO pch) + theoryidi: 40001000 + # impose NNPDF positivity in fit + nnpdf_pos: False + # if true then impose 4.0 positivity constraint, if pos_const=True + pos_40: True + # if true impose weight to prefer delta_d > 0.25 + deld_const: False + # if true then generates a replica from baseline dataset with seed irep + pseud: False + # irep number - also used when generating error grids + irep: 0 + # NMC PD data - set covariance matrix to be diagonal + nmcpd_diag: False + # dataset flag - global, HERAonly, noHERA, noLHC + dset_type: 'global_new' \ No newline at end of file diff --git a/configs/fit_mshtfit_publication_shortmin.yaml b/configs/fit_mshtfit_publication_shortmin.yaml new file mode 100644 index 0000000..58d08dc --- /dev/null +++ b/configs/fit_mshtfit_publication_shortmin.yaml @@ -0,0 +1,72 @@ +# input files +inputs: + # PDF parameter inputs: + input file: input_mshtfit_publication_shortmin.dat + # input covariance matrix file (if used) + cov input file: newmin_final_remin_out.dat + # if true then read in covariance matrix and evaluate PDF errors + readcov: False + +# output files +outputs: + # label for output files + label: mshtfit_publication_shortmin_out + +basis pars: + # If true then two term gluon parameterisation uesd + g_second_term: True + # if true and g_second_term=False then includes 7th Chebyshev for gluon + g_cheb7: False + # If true then delta_S+ set to that of sea + asp_fix: True + # If true thedelta_d=delta_u for d_V and u_V (need to set delta_d fixed or will crash!) + dvd_eq_uvd: False + +pdf pars: + # use external LHAPDF grid as input + lhin: False + # if true then use lhapdf grids for theory evaluation + # (for PDF pd fits can use parameterisation directly instead, otherwise has to be true) + uselha: True + # if lhin=true, name of set input + PDFlabel_lhin: NNPDF40_nnlo_pch_as_01180 + +pdf closure: + # if true then scatters when doing direct PDF pd fit + pdfscat: False + # label of pseudodata used + pdlabel: 'NNPDF40pch_gl_l0' + # if true then do direct fit to PDF pseudodata + pdpdf: False + +pseudodata flags: + # if true then write pseudodata out to file + pdout: False + # if true then write pseudodata out to file + pdin: False + +fit pars: + # Use dynamic tolerance in error calc + dynamic_tol: False + # T value for error calculation + t2_err: 1. + # Only use t0 for chi2i and chi2o in levmar (i.e. no derivative at all) + t0_noderivin: True + # run with fixed input parameters (override input card flags) + fixpar: False + # NNPDF theory id (211 = NNLO pch) + theoryidi: 40001000 + # impose NNPDF positivity in fit + nnpdf_pos: False + # if true then impose 4.0 positivity constraint, if pos_const=True + pos_40: True + # if true impose weight to prefer delta_d > 0.25 + deld_const: False + # if true then generates a replica from baseline dataset with seed irep + pseud: False + # irep number - also used when generating error grids + irep: 0 + # NMC PD data - set covariance matrix to be diagonal + nmcpd_diag: False + # dataset flag - global, HERAonly, noHERA, noLHC + dset_type: 'global_new' \ No newline at end of file diff --git a/fixpar_nnpdf.py b/fixpar_nnpdf.py index 628fc46..c457be7 100644 --- a/fixpar_nnpdf.py +++ b/fixpar_nnpdf.py @@ -1,15 +1,15 @@ -import warnings as wa +import warnings as wa from validphys import core from validphys.api import API from validphys.fkparser import load_fktable from validphys.loader import Loader from validphys.convolution import predictions, linear_predictions, central_predictions import validphys.convolution as conv -# from validphys.results import experiments_index +# from validphys.results import experiments_index import validphys.results as rs from validphys.covmats import covmat_from_systematics import numpy as np -from validphys.commondataparser import load_commondata +from nnpdf_data.commondataparser import load_commondata import validphys.commondata as cdat import pandas as pd from validphys.calcutils import calc_chi2 @@ -17,14 +17,12 @@ import os import lhapdf import shutil as sh -from scipy.integrate import quadrature -from scipy.integrate import fixed_quad from validphys.covmats import dataset_inputs_covmat_from_systematics from validphys.pseudodata import make_replica from scipy.optimize import minimize from scipy.optimize import least_squares import itertools as IT -from reportengine.compat import yaml +from reportengine.utils import yaml_safe import time import sys import argparse @@ -32,6 +30,7 @@ from validphys.loader import _get_nnpdf_profile from validphys.lhaindex import get_lha_datapath +lhapdf.setVerbosity(0) sys.path.append("src/") from global_pars import * @@ -45,6 +44,13 @@ from lhapdf_funs import * from error_calc import * +if DEBUG: +# # Fake LHAPDF folder, no longer needed + TEMP_LHAPDF = pathlib.Path("tmp_lhapdf") + TEMP_LHAPDF.mkdir(exist_ok=True) + lhapdf.pathsAppend(TEMP_LHAPDF.as_posix()) + + parser = argparse.ArgumentParser() parser.add_argument("--config", required=True, type=str, help="Path to the config file") @@ -54,7 +60,7 @@ # Loading yaml config ################## with open(args.config, 'r') as file: - config = yaml.safe_load(file) + config = yaml_safe.load(file) inp = config.get("inputs", None) inout_pars.inputnam=inp.get("input file") @@ -142,9 +148,8 @@ if fit_pars.nlo_cuts is None: fit_pars.nlo_cuts=False -fit_pars.newmin=fitp.get("newmin") -if fit_pars.newmin is None: - fit_pars.newmin=True +# Make sure that new_min is True +fit_pars.newmin=True fit_pars.dynT_group=fitp.get("dynT_group") if fit_pars.dynT_group is None: @@ -172,7 +177,7 @@ if fit_pars.pos_gluononly: fit_pars.pos_data31=fit_pars.pos_data31_gluononly fit_pars.pos_data40=fit_pars.pos_data40_gluononly - + if fit_pars.pos_dyc: fit_pars.pos_40=False fit_pars.pos_data31=fit_pars.pos_data31_dyc @@ -222,17 +227,17 @@ fit_pars.cftrue[i]=True elif fit_pars.dset_type == 'lowenergyDISDY': fit_pars.dataset_40=fit_pars.dataset_lowenergyDISDY - fit_pars.imaxdat=len(fit_pars.dataset_40) + fit_pars.imaxdat=len(fit_pars.dataset_40) for i in range(10,19): fit_pars.cftrue[i]=True elif fit_pars.dset_type == 'lowenergyDISDY_HERAonly': fit_pars.dataset_40=fit_pars.dataset_lowenergyDISDY_HERAonly - fit_pars.imaxdat=len(fit_pars.dataset_40) + fit_pars.imaxdat=len(fit_pars.dataset_40) elif fit_pars.dset_type == 'test': fit_pars.dataset_40=fit_pars.dataset_test fit_pars.imaxdat=len(fit_pars.dataset_40) for i in range(0,fit_pars.imaxdat): - fit_pars.cftrue[i]=False + fit_pars.cftrue[i]=False elif fit_pars.dset_type == 'global_new': fit_pars.dataset_40=fit_pars.dataset_40_new fit_pars.imaxdat=len(fit_pars.dataset_40) @@ -246,9 +251,15 @@ if fit_pars.theoryidi==212: fit_pars.dataset_40=fit_pars.dataset_40_nlo - fit_pars.imaxdat=len(fit_pars.dataset_40) + fit_pars.imaxdat=len(fit_pars.dataset_40) + +if os.environ.get("LHAPDF_DATA_PATH") is not None: + pdf_pars.lhapdfdir = os.environ["LHAPDF_DATA_PATH"] +else: + pdf_pars.lhapdfdir = get_lha_datapath() + "/" -pdf_pars.lhapdfdir=get_lha_datapath()+'/' +if DEBUG: + pdf_pars.tmp_lhapdfdir = TEMP_LHAPDF profile=_get_nnpdf_profile(None) fit_pars.datapath=pathlib.Path(profile["data_path"]) # fit_pars.theories_path=pathlib.Path(profile["theories_path"]) @@ -256,7 +267,7 @@ fit_pars.newmin=True if chi2_pars.diff_4: - chi2_pars.diff_2=True + chi2_pars.diff_2=True if not pdf_pars.uselha: pdf_pars.lhin=False @@ -278,7 +289,7 @@ # fit_pars.pos_const=False if fit_pars.nnpdf_pos: fit_pars.pos_const=True - else: + else: fit_pars.pos_const=False (afi,hess,jac)=readincov() if chi2_pars.dynamic_tol: @@ -302,28 +313,42 @@ use_levmar=True +# Prepare the initial PDF that might be used for the computation +# of the chi2, the minimization, etc. +if pdf_pars.lhin: + # If using directly an LHAPDF input, load said PDF + vp_pdf = API.pdf(pdf = pdf_pars.PDFlabel_lhin) +else: + # NB the name is irrelevant since the PDF object doesn't leave the program + pdfname = f"{inout_pars.label}_run+{pdf_pars.idir}" + + # Take the free parameters and create the parameter set + parin = initpars() + pdf_parameters_raw = parset(afi, parin, are_free = pdf_pars.par_isf) + # Modify it so the sumrules work (is this independent from the parametrization or need the function below?) + pdf_parameters = sumrules(pdf_parameters_raw) + + vp_pdf = MSHTPDF(name = pdfname, pdf_parameters = pdf_parameters, pdf_function = "msht") + + + if inout_pars.readcov: print('finish!') -elif len(afi)==0: # no free pars \ - +elif len(afi)==0: # no free pars if inout_pars.pdout: print('Output PD to file...') - chi2_pars.t0=False fit_pars.pos_const=False - chi2expi=chi2min(afi) + chi2expi=chi2min(afi, vp_pdf=vp_pdf) elif(chi2_pars.chi2ind): - chi2_pars.t0=True fit_pars.pos_const=False - chi2t0i=chi2min(afi) - - + chi2t0i=chi2min(afi, vp_pdf=vp_pdf) print('chi2(t0) in (no pos pen):',chi2t0i,chi2t0i/chi2_pars.ndat) t1=time.process_time() @@ -332,17 +357,17 @@ chi2_pars.t0=False fit_pars.pos_const=False - chi2expi=chi2min(afi) + chi2expi=chi2min(afi, vp_pdf=vp_pdf) chi2_pars.t0=True - chi2t0i=chi2min(afi) + chi2t0i=chi2min(afi, vp_pdf=vp_pdf) fit_pars.pos_const=True if(inout_pars.pdin): - - chi2posi=chi2min(afi) + + chi2posi=chi2min(afi, vp_pdf=vp_pdf) pospeni=chi2posi-chi2t0i - + # print('chi2(exp) in:',chi2expi,(chi2expi)/chi2_pars.ndat) # t1=time.process_time() # print('time= ',t1-tzero) @@ -364,8 +389,8 @@ else: fit_pars.pos_const=True - - chi2posi=chi2min(afi) + + chi2posi=chi2min(afi, vp_pdf=vp_pdf) pospeni=chi2posi-chi2t0i print('chi2(exp) in:',chi2expi+pospeni,(chi2expi+pospeni)/chi2_pars.ndat) @@ -401,22 +426,35 @@ print('afo = ', afo) else: - chi2_pars.t0=False - fit_pars.pos_const=False - chi2expi=chi2min(afi) - chi2_pars.t0=True - chi2t0i=chi2min(afi) - fit_pars.pos_const=True - chi2posi=chi2min(afi) - pospeni=chi2posi-chi2t0i - print('chi2(exp) in:',chi2expi+pospeni,(chi2expi+pospeni)/chi2_pars.ndat) - print('chi2(t0) in:',(chi2t0i+pospeni),(chi2t0i+pospeni)/chi2_pars.ndat) - print('pos penalty in = ',pospeni,pospeni/chi2_pars.ndat) - + # Compute the experimental chi2 + chi2_pars.t0=False + fit_pars.pos_const=False + chi2expi=chi2min(afi, vp_pdf = vp_pdf) + + # Now the t0 chi2 + chi2_pars.t0=True + chi2t0i=chi2min(afi, vp_pdf = vp_pdf) + + # And now the positivity + fit_pars.pos_const=True + chi2posi=chi2min(afi, vp_pdf = vp_pdf) + + # TODO: these three calls should have t0/pos as input parameter to the chi2 function + # and possibily also the PDF *object* should be an input + pospeni=chi2posi-chi2t0i + + # Now compute the loss functions (ie., chi2+positivity) + loss_exp = chi2expi+pospeni + loss_t0 = chi2posi + ndat = chi2_pars.ndat + print(f"chi2(exp) in:{loss_exp:.8} {loss_exp/ndat:.5}") + print(f"chi2(t0) in:{loss_t0:.8} {loss_t0/ndat:.5}") + print(f"chi2(exp) in:{pospeni:.5}") + chi2_pars.t0=True if fit_pars.nnpdf_pos: fit_pars.pos_const=True - else: + else: fit_pars.pos_const=False chi2_pars.diff_4=False @@ -424,7 +462,7 @@ # chi2_pars.t0=False # chi2_pars.diff_2=False # afo=levmar(afi) - # print('afo = ', afo) + # print('afo = ', afo) chi2_pars.t0=False dload_pars.dflag=1 @@ -438,6 +476,7 @@ if fit_pars.nnpdf_pos: + # TODO: there are a few calls to levmar here that need to be treated if(inout_pars.pdin): chi2_pars.t0=False chi2_pars.t0_noderiv=False @@ -474,7 +513,7 @@ outputfile.write("\n") afo=levmar(afo) else: - + # chi2_pars.diff_2=True if(inout_pars.pdin): @@ -486,18 +525,18 @@ chi2_pars.t0=False chi2_pars.t0_noderiv=False - + chi2_pars.t0=False - chi2_pars.t0_noderiv=False + chi2_pars.t0_noderiv=True # chi2_pars.diff_2=True - + # TODO this should open a context manager and pass down the file already opened outputfile=open('outputs/buffer/'+inout_pars.label+'.dat','a') outputfile.write("diff2=False") print("diff2=False") outputfile.write(inout_pars.inputnam) outputfile.write("\n") - afo=levmar(afi) + afo = levmar(afi) if chi2_pars.t0_noderivin and not inout_pars.pdin: chi2_pars.t0_noderiv=False @@ -506,32 +545,7 @@ chi2_pars.t0=False chi2_pars.t0_noderiv=False - # chi2_pars.t0_noderiv=False - # chi2_pars.t0=True - - if not fit_pars.newmin: - chi2_pars.diff_2=True - outputfile=open('outputs/buffer/'+inout_pars.label+'.dat','a') - print('t0=True,diff2=True') - outputfile.write("t0=True,diff2=True") - outputfile.write(inout_pars.inputnam) - outputfile.write("\n") - afo=levmar(afo) - - - # chi2_pars.diff_2=True - # outputfile.write("lampos = 1e3, t0,d2=true") - # outputfile.write(inout_pars.inputnam) - # outputfile.write("\n") - # afo=levmar(afo) - - # else: - # afo=levmar(afi) - - # diff_2=True - # afo=levmar_meth3(afi) - - print('afo = ', afo) + print('afo = ', afo) if(pdf_closure.pdpdf): @@ -557,7 +571,7 @@ fit_pars.pos_const=True chi2posf=chi2min(afo) pospenf=chi2posf-chi2expf - + print('chi2(exp) in:',chi2expi+pospeni,(chi2expi+pospeni)/chi2_pars.ndat) print('chi2(t0) in:',(chi2t0i+pospeni),(chi2t0i+pospeni)/chi2_pars.ndat) print('pos penalty in = ',pospeni,pospeni/chi2_pars.ndat) @@ -578,7 +592,7 @@ chi2expi=chi2min(afi) chi2_pars.t0=True chi2t0i=chi2min(afi) - ## fit_pars.pos_const=True + ## fit_pars.pos_const=True fit_pars.pos_const=True chi2posi=chi2min(afi) pospeni=chi2posi-chi2t0i @@ -586,8 +600,8 @@ print('chi2(t0) in:',chi2t0i,chi2t0i/chi2_pars.ndat) print('pos penalty = ',pospeni) res = minimize(chi2min, afi, method='Nelder-Mead', tol=1e-4, options = {'maxiter': 10000, 'maxfev': 10000}) - # res = minimize(chi2min, afi, method='Newton-CG', jac=jac_fun, hess=hess_fun, options = {'disp': True}) -# res = minimize(chi2min, afi, method='CG', jac=jac_fun, options = {'maxiter': 10000}) + # res = minimize(chi2min, afi, method='Newton-CG', jac=jac_fun, hess=hess_fun, options = {'disp': True}) +# res = minimize(chi2min, afi, method='CG', jac=jac_fun, options = {'maxiter': 10000}) print('pars out =',res.x) print(chi2min(res.x),chi2min(res.x)/chi2_pars.ndat) gridout() @@ -610,3 +624,6 @@ resout(pospeni,pospenf,chi2t0i,chi2expi,chi2t0f,chi2expf,chi2_pars.ndat) t1=time.process_time() print('time= ',t1-tzero) + + +# TODO Remove TEMP_LHAPDF at the end diff --git a/input/input_mshtfit_publication_longmin.dat b/input/input_mshtfit_publication_longmin.dat new file mode 100644 index 0000000..6b174c5 --- /dev/null +++ b/input/input_mshtfit_publication_longmin.dat @@ -0,0 +1,67 @@ +uv parameters (delu,etau,cu1-6) +0.34359110680421356 1 +3.748657630257328 1 +-1.4485203963068545 1 +0.7414364465358945 1 +-0.3388642818301362 1 +0.2280848321195746 1 +-0.09711848793057141 1 +0.0 1 +dv parameters (deld,etad,cd1-6) +0.1282679461193071 1 +3.979488631894834 1 +-0.6458627025417831 1 +-1.0084703291029582 1 +0.9089438747936651 1 +-0.037370288283984136 1 +-0.13569928165046855 1 +0.0 1 +sea parameters (AS,delS,etaS,cS1-6) +41.58449323857026 1 +-0.009876269835192724 1 +7.04310422436405 1 +-1.7767116962046965 1 +1.3784164636765508 1 +-0.8889378927790684 1 +0.46253485385558746 1 +-0.1714246671894168 1 +0.0 1 +s+ parameters (Asp,delsp,etasp,csp1-6) +0.3507526996185292 1 +-0.009876269835192724 1 +3.3216182269099597 1 +-0.12325713325327833 1 +0.7069012358154876 1 +-0.01869621773020992 1 +-0.01803450004502132 1 +0.16233132301019212 1 +-0.11341329021966999 1 +Gluon parameters (etagp,delgp,cg1-4,etagm,delgm) +5.251270998324863 1 +0.6697611015665883 1 +-0.7692192888244083 1 +1.053858946235386 1 +-0.2945725663935634 1 +0.18899337352783627 1 +-0.36797267610953976 1 +1.34544084908059 1 +-0.2780248209456775 1 +s- parameters (Asm,delsm,etasm, cs1-4) +-0.00020242971145737493 1 +0.0014906899461692123 0 +7.459056728811248 1 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +db/ub parameters (Arho,etarho,crho1-6) +3.2658603567888016 1 +2.7349338912295797 1 +-0.5233663315323309 1 +-1.1131362882747111 1 +1.5019462076096963 1 +-1.154365488496816 1 +0.5317645918880659 1 +-0.13328110490804296 1 diff --git a/input/input_mshtfit_publication_shortmin.dat b/input/input_mshtfit_publication_shortmin.dat new file mode 100644 index 0000000..043d443 --- /dev/null +++ b/input/input_mshtfit_publication_shortmin.dat @@ -0,0 +1,67 @@ +uv parameters (delu,etau,cu1-6) +0.34359110680421356 1 +3.748657630257328 1 +-1.4485203963068545 1 +0.7414364465358945 1 +-0.3388642818301362 1 +0.2280848321195746 1 +-0.09711848793057141 1 +0.0 1 +dv parameters (deld,etad,cd1-6) +0.1282679461193071 1 +3.979488631894834 1 +-0.6458627025417831 1 +-1.0084703291029582 1 +0.9089438747936651 1 +-0.037370288283984136 1 +-0.13569928165046855 1 +0.18212207494760163 1 +sea parameters (AS,delS,etaS,cS1-6) +41.58449323857026 1 +-0.009876269835192724 1 +7.04310422436405 1 +-1.7767116962046965 1 +1.3784164636765508 1 +-0.8889378927790684 1 +0.46253485385558746 1 +-0.1714246671894168 1 +0.03857700228145249 1 +s+ parameters (Asp,delsp,etasp,csp1-6) +0.3507526996185292 1 +-0.009876269835192724 1 +3.3216182269099597 1 +-0.12325713325327833 1 +0.7069012358154876 1 +-0.01869621773020992 1 +-0.01803450004502132 1 +0.16233132301019212 1 +-0.11341329021966999 1 +Gluon parameters (etagp,delgp,cg1-4,etagm,delgm) +5.251270998324863 1 +0.6697611015665883 1 +-0.7692192888244083 1 +1.053858946235386 1 +-0.2945725663935634 1 +0.18899337352783627 1 +-0.36797267610953976 1 +1.34544084908059 1 +-0.2780248209456775 1 +s- parameters (Asm,delsm,etasm, cs1-4) +-0.00020242971145737493 1 +0.0014906899461692123 0 +7.459056728811248 1 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +db/ub parameters (Arho,etarho,crho1-6) +3.2658603567888016 1 +2.7349338912295797 1 +-0.5233663315323309 1 +-1.1131362882747111 1 +1.5019462076096963 1 +-1.154365488496816 1 +0.5317645918880659 1 +-0.13328110490804296 1 diff --git a/input/inputdef_2parfree.dat b/input/inputdef_2parfree.dat new file mode 100644 index 0000000..4f52fb4 --- /dev/null +++ b/input/inputdef_2parfree.dat @@ -0,0 +1,77 @@ +uv parameters (delu,etau,cu1-6) +0.17122373884608855 1 +3.2743726678269165 1 +-1.2122918898379094 0 +0.1517819079177516 0 +0.12285498035990065 0 +-0.010122247519152774 0 +-0.014270897304430395 0 +0.03262297673072622 0 +dv parameters (deld,etad,cd1-6) +0.13865892320621695 0 +2.3352051735263344 0 +-0.44034691419255034 0 +-0.7939290540562433 0 +0.04328245533312397 0 +0.903822027741519 0 +-0.40085984152229265 0 +0.33023220117596597 0 +sea parameters (AS,delS,etaS,cS1-6) +8.262723047435793 0 +-0.06750937068295088 0 +5.189572313265036 0 +-1.624315151833448 0 +1.3080917021760117 0 +-0.8792934176979688 0 +0.4786052188746685 0 +-0.17873098728686534 0 +0.041566782615336216 0 +s+ parameters (Asp,delsp,etasp,csp1-6) +6.3684990056840105 0 +-0.06815580231450868 0 +5.695241421086358 0 +-1.7316660809876203 0 +1.385230907196718 0 +-0.8683490279392682 0 +0.471977126805542 0 +-0.1727444537147735 0 +0.04482507708725548 0 +Gluon parameters (etagp,delgp,cg1-4,etagm,delgm) +6.814826793215428 0 +0.440844331381777 0 +-1.650826229386586 0 +0.881190652182608 0 +-0.5084408693656719 0 +0.03265726584557637 0 +-1370.7158962827482 0 +1209.670571783354 0 +0.8728597655753609 0 +s- parameters (Asm,delsm,etasm, cs1-4) +-0.003245394946185798 0 +0.022384494068275074 0 +6.526146784622549 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +db/ub parameters (Arho,etarho,crho1-6) +1.2857208900336687 0 +1.0071680503069815 0 +0.0971452909922652 0 +-1.1783327161950718 0 +0.8044045111588943 0 +-0.506706557884737 0 +0.2164830132196875 0 +-0.04654818746580779 0 +charm parameters (Ac,etac,cc1-6) +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 diff --git a/outputs/buffer/test.dat b/outputs_old/buffer/test.dat similarity index 100% rename from outputs/buffer/test.dat rename to outputs_old/buffer/test.dat diff --git a/outputs/evgrids/TODO.dat b/outputs_old/evgrids/TODO.dat similarity index 100% rename from outputs/evgrids/TODO.dat rename to outputs_old/evgrids/TODO.dat diff --git a/outputs/evgrids/post_fit.sh b/outputs_old/evgrids/post_fit.sh similarity index 100% rename from outputs/evgrids/post_fit.sh rename to outputs_old/evgrids/post_fit.sh diff --git a/outputs/evgrids/test/nnfit/replica_1/test.exportgrid b/outputs_old/evgrids/test/nnfit/replica_1/test.exportgrid similarity index 100% rename from outputs/evgrids/test/nnfit/replica_1/test.exportgrid rename to outputs_old/evgrids/test/nnfit/replica_1/test.exportgrid diff --git a/outputs/grids/test/test.info b/outputs_old/grids/test/test.info similarity index 100% rename from outputs/grids/test/test.info rename to outputs_old/grids/test/test.info diff --git a/outputs/grids/test/test_0000.dat b/outputs_old/grids/test/test_0000.dat similarity index 100% rename from outputs/grids/test/test_0000.dat rename to outputs_old/grids/test/test_0000.dat diff --git a/outputs/pars/test.dat b/outputs_old/pars/test.dat similarity index 100% rename from outputs/pars/test.dat rename to outputs_old/pars/test.dat diff --git a/outputs/plots/test.dat b/outputs_old/plots/test.dat similarity index 100% rename from outputs/plots/test.dat rename to outputs_old/plots/test.dat diff --git a/outputs/res/test.dat b/outputs_old/res/test.dat similarity index 100% rename from outputs/res/test.dat rename to outputs_old/res/test.dat diff --git a/publication_tests/buffer/mshtfit_publication_longmin_out.dat b/publication_tests/buffer/mshtfit_publication_longmin_out.dat new file mode 100644 index 0000000..1b85d2a --- /dev/null +++ b/publication_tests/buffer/mshtfit_publication_longmin_out.dat @@ -0,0 +1,268 @@ +Starting new buffer... +inputnam = input_mshtfit_publication_cheb5a.dat +LM meth 1 +ntries = 1 +chi2i = 224491.18852929212 +step = 1 +lam = 0.001 +chi2o = 28114116.969667714 +chi2i = 224491.18852929212 +step = 2 +lam = 0.01 +chi2o = 27428.51314989846 +chi2i = 224491.18852929212 +chi2o < chi2i - next iteration +ntries = 2 +chi2i = 27428.51314989846 +step = 1 +lam = 0.001 +chi2o = 8474.104554231028 +chi2i = 27428.51314989846 +chi2o < chi2i - next iteration +ntries = 3 +chi2i = 8474.104554231028 +step = 1 +lam = 0.0001 +step = 2 +lam = 0.001 +step = 3 +lam = 0.01 +step = 4 +lam = 0.1 +chi2o = 6200.859157310142 +chi2i = 8474.104554231028 +chi2o < chi2i - next iteration +ntries = 4 +chi2i = 6200.859157310142 +step = 1 +lam = 0.001 +chi2o = 12472.525727836812 +chi2i = 6200.859157310142 +step = 2 +lam = 0.01 +chi2o = 6585.52496575855 +chi2i = 6200.859157310142 +step = 3 +lam = 0.1 +chi2o = 6030.680942074619 +chi2i = 6200.859157310142 +chi2o < chi2i - next iteration +ntries = 5 +chi2i = 6030.680942074619 +step = 1 +lam = 0.001 +chi2o = 8448.523817363162 +chi2i = 6030.680942074619 +step = 2 +lam = 0.01 +chi2o = 6055.462052951501 +chi2i = 6030.680942074619 +step = 3 +lam = 0.1 +chi2o = 5985.536674218513 +chi2i = 6030.680942074619 +chi2o < chi2i - next iteration +ntries = 6 +chi2i = 5985.536674218513 +step = 1 +lam = 0.001 +chi2o = 6984.665335303685 +chi2i = 5985.536674218513 +step = 2 +lam = 0.01 +chi2o = 5937.890342960325 +chi2i = 5985.536674218513 +chi2o < chi2i - next iteration +ntries = 7 +chi2i = 5937.890342960325 +step = 1 +lam = 0.001 +chi2o = 6085.568826105325 +chi2i = 5937.890342960325 +step = 2 +lam = 0.01 +chi2o = 5843.461176393787 +chi2i = 5937.890342960325 +chi2o < chi2i - next iteration +ntries = 8 +chi2i = 5843.461176393787 +step = 1 +lam = 0.001 +chi2o = 5905.060067253815 +chi2i = 5843.461176393787 +step = 2 +lam = 0.01 +chi2o = 5812.092612527062 +chi2i = 5843.461176393787 +chi2o < chi2i - next iteration +ntries = 9 +chi2i = 5812.092612527062 +step = 1 +lam = 0.001 +chi2o = 5836.290572148721 +chi2i = 5812.092612527062 +step = 2 +lam = 0.01 +chi2o = 5792.33014393892 +chi2i = 5812.092612527062 +chi2o < chi2i - next iteration +ntries = 10 +chi2i = 5792.33014393892 +step = 1 +lam = 0.001 +chi2o = 5793.51710759696 +chi2i = 5792.33014393892 +step = 2 +lam = 0.01 +chi2o = 5779.336042082064 +chi2i = 5792.33014393892 +chi2o < chi2i - next iteration +ntries = 11 +chi2i = 5779.336042082064 +step = 1 +lam = 0.001 +chi2o = 5774.668412877901 +chi2i = 5779.336042082064 +chi2o < chi2i - next iteration +ntries = 12 +chi2i = 5774.668412877901 +step = 1 +lam = 0.0001 +chi2o = 5786.924471457789 +chi2i = 5774.668412877901 +step = 2 +lam = 0.001 +chi2o = 5751.955055480534 +chi2i = 5774.668412877901 +chi2o < chi2i - next iteration +ntries = 13 +chi2i = 5751.955055480534 +step = 1 +lam = 0.0001 +chi2o = 5760.356661780489 +chi2i = 5751.955055480534 +step = 2 +lam = 0.001 +chi2o = 5750.449909155048 +chi2i = 5751.955055480534 +chi2o < chi2i - next iteration +ntries = 14 +chi2i = 5750.449909155048 +step = 1 +lam = 0.0001 +chi2o = 5754.3091901395055 +chi2i = 5750.449909155048 +step = 2 +lam = 0.001 +chi2o = 5749.74124546488 +chi2i = 5750.449909155048 +chi2o < chi2i - next iteration +ntries = 15 +chi2i = 5749.74124546488 +step = 1 +lam = 0.0001 +chi2o = 5751.529495643029 +chi2i = 5749.74124546488 +step = 2 +lam = 0.001 +chi2o = 5749.4481462335925 +chi2i = 5749.74124546488 +chi2o < chi2i - next iteration +ntries = 16 +chi2i = 5749.4481462335925 +step = 1 +lam = 0.0001 +chi2o = 5749.829636490144 +chi2i = 5749.4481462335925 +step = 2 +lam = 0.001 +chi2o = 5749.206078771691 +chi2i = 5749.4481462335925 +chi2o < chi2i - next iteration +ntries = 17 +chi2i = 5749.206078771691 +step = 1 +lam = 0.0001 +chi2o = 5749.023307639162 +chi2i = 5749.206078771691 +chi2o < chi2i - next iteration +ntries = 18 +chi2i = 5749.023307639162 +step = 1 +lam = 1e-05 +chi2o = 5887.462372675723 +chi2i = 5749.023307639162 +step = 2 +lam = 0.0001 +chi2o = 5747.259449616578 +chi2i = 5749.023307639162 +chi2o < chi2i - next iteration +ntries = 19 +chi2i = 5747.259449616578 +step = 1 +lam = 1e-05 +chi2o = 5816.9042152212205 +chi2i = 5747.259449616578 +step = 2 +lam = 0.0001 +chi2o = 5747.062489199895 +chi2i = 5747.259449616578 +chi2o < chi2i - next iteration +ntries = 20 +chi2i = 5747.062489199895 +step = 1 +lam = 1e-05 +chi2o = 5791.238423182862 +chi2i = 5747.062489199895 +step = 2 +lam = 0.0001 +chi2o = 5747.197786153675 +chi2i = 5747.062489199895 +step = 3 +lam = 0.001 +chi2o = 5747.961765014968 +chi2i = 5747.062489199895 +step = 4 +lam = 0.01 +chi2o = 5747.990979051017 +chi2i = 5747.062489199895 +step = 5 +lam = 0.1 +chi2o = 5747.935821468584 +chi2i = 5747.062489199895 +step = 6 +lam = 1.0 +chi2o = 5747.6697945260075 +chi2i = 5747.062489199895 +step = 7 +lam = 10.0 +chi2o = 5747.213308812315 +chi2i = 5747.062489199895 +step = 8 +lam = 100.0 +chi2o = 5747.078552338051 +chi2i = 5747.062489199895 +step = 9 +lam = 1000.0 +chi2o = 5747.06407643748 +chi2i = 5747.062489199895 +step = 10 +lam = 10000.0 +chi2o = 5747.062647579913 +chi2i = 5747.062489199895 +step = 11 +lam = 100000.0 +chi2o = 5747.06250484707 +chi2i = 5747.062489199895 +step = 12 +lam = 1000000.0 +chi2o = 5747.062490643631 +chi2i = 5747.062489199895 +step = 13 +lam = 10000000.0 +chi2o = 5747.062489133743 +chi2i = 5747.062489199895 +chi2o < chi2i - next iteration +ntries = 21 +chi2i - chi2o < tol : exit +diff2=Falseinput_mshtfit_publication_cheb5a.dat diff --git a/publication_tests/buffer/mshtfit_publication_shortmin_out.dat b/publication_tests/buffer/mshtfit_publication_shortmin_out.dat new file mode 100644 index 0000000..8598112 --- /dev/null +++ b/publication_tests/buffer/mshtfit_publication_shortmin_out.dat @@ -0,0 +1,93 @@ +Starting new buffer... +inputnam = input_mshtfit_publication_cheb5b.dat +LM meth 1 +ntries = 1 +chi2i = 14996.168159534356 +step = 1 +lam = 0.001 +chi2o = 5757.553093443755 +chi2i = 14996.168159534356 +chi2o < chi2i - next iteration +ntries = 2 +chi2i = 5757.553093443755 +step = 1 +lam = 0.0001 +chi2o = 5753.952189066632 +chi2i = 5757.553093443755 +chi2o < chi2i - next iteration +ntries = 3 +chi2i = 5753.952189066632 +step = 1 +lam = 1e-05 +chi2o = 167895.25292698928 +chi2i = 5753.952189066632 +step = 2 +lam = 0.0001 +chi2o = 5747.014812610185 +chi2i = 5753.952189066632 +chi2o < chi2i - next iteration +ntries = 4 +chi2i = 5747.014812610185 +step = 1 +lam = 1e-05 +chi2o = 185568.01416140993 +chi2i = 5747.014812610185 +step = 2 +lam = 0.0001 +chi2o = 5748.075615315986 +chi2i = 5747.014812610185 +step = 3 +lam = 0.001 +chi2o = 5748.177081861058 +chi2i = 5747.014812610185 +step = 4 +lam = 0.01 +chi2o = 5748.320584433638 +chi2i = 5747.014812610185 +step = 5 +lam = 0.1 +chi2o = 5748.216187143117 +chi2i = 5747.014812610185 +step = 6 +lam = 1.0 +chi2o = 5747.548831056266 +chi2i = 5747.014812610185 +step = 7 +lam = 10.0 +chi2o = 5746.798337847987 +chi2i = 5747.014812610185 +chi2o < chi2i - next iteration +ntries = 5 +chi2i = 5746.798337847987 +step = 1 +lam = 0.001 +chi2o = 5748.138283189406 +chi2i = 5746.798337847987 +step = 2 +lam = 0.01 +chi2o = 5748.284491926071 +chi2i = 5746.798337847987 +step = 3 +lam = 0.1 +chi2o = 5748.212478063861 +chi2i = 5746.798337847987 +step = 4 +lam = 1.0 +chi2o = 5747.675484011465 +chi2i = 5746.798337847987 +step = 5 +lam = 10.0 +chi2o = 5746.914004947866 +chi2i = 5746.798337847987 +step = 6 +lam = 100.0 +chi2o = 5746.79941299915 +chi2i = 5746.798337847987 +step = 7 +lam = 1000.0 +chi2o = 5746.798185759394 +chi2i = 5746.798337847987 +chi2o < chi2i - next iteration +ntries = 6 +chi2i - chi2o < tol : exit +diff2=Falseinput_mshtfit_publication_cheb5b.dat diff --git a/publication_tests/pars/mshtfit_publication_longmin_out.dat b/publication_tests/pars/mshtfit_publication_longmin_out.dat new file mode 100644 index 0000000..2b55b97 --- /dev/null +++ b/publication_tests/pars/mshtfit_publication_longmin_out.dat @@ -0,0 +1,77 @@ +uv parameters (delu,etau,cu1-6) +0.3231802826346196 0 +3.8193447177404902 0 +-1.4790548326024462 0 +0.7660025304264685 0 +-0.3507487513245998 0 +0.21760597361821568 0 +-0.09124873859308547 0 +0.050828814377725386 0 +dv parameters (deld,etad,cd1-6) +0.12662824587568577 0 +3.982330510540003 0 +-0.6585212650241054 0 +-0.9963193544674869 0 +0.921333950051162 0 +-0.0664097167591226 0 +-0.11939531921744188 0 +0.17354231943120665 0 +sea parameters (AS,delS,etaS,cS1-6) +44.548062830019695 0 +-3.818205036831553e-05 0 +7.234497361744915 0 +-1.775925111896799 0 +1.3757350892631226 0 +-0.8840404297924626 0 +0.4601395697562931 0 +-0.17014231047507294 0 +0.03865306619890488 0 +s+ parameters (Asp,delsp,etasp,csp1-6) +0.08432248873755688 0 +-3.818205036831553e-05 0 +2.634050092199919 0 +5.664559974250834 0 +-1.49914583803317 0 +3.0420745731884318 0 +-1.6412568678642165 0 +1.3542335339981215 0 +-0.5933600417058799 0 +Gluon parameters (etagp,delgp,cg1-4,etagm,delgm) +5.082584857437277 0 +0.6328402336238066 0 +-0.7256170495882449 0 +1.005814307749848 0 +-0.26507890152018887 0 +0.17103579763891572 0 +-0.8949942245437073 0 +1.7102060131747245 0 +-0.16279510930111443 0 +s- parameters (Asm,delsm,etasm, cs1-4) +-0.00021449844304874093 0 +0.0014906899461692123 0 +7.476855031950739 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +db/ub parameters (Arho,etarho,crho1-6) +2.8882928979166604 0 +2.722878946030263 0 +-0.4257715551550677 0 +-1.1925984037066522 0 +1.4868970037128817 0 +-1.0852571516825906 0 +0.4823879311091083 0 +-0.10795203062170497 0 +charm parameters (Ac,etac,cc1-6) +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 diff --git a/publication_tests/pars/mshtfit_publication_shortmin_out.dat b/publication_tests/pars/mshtfit_publication_shortmin_out.dat new file mode 100644 index 0000000..9954eda --- /dev/null +++ b/publication_tests/pars/mshtfit_publication_shortmin_out.dat @@ -0,0 +1,77 @@ +uv parameters (delu,etau,cu1-6) +0.33869402321599046 0 +3.7911746289835935 0 +-1.4657698067657148 0 +0.7619271740728625 0 +-0.35227766637714336 0 +0.2281620215453435 0 +-0.09620238410730757 0 +0.055183834088221674 0 +dv parameters (deld,etad,cd1-6) +0.12856069297793873 0 +3.9797546235646126 0 +-0.6520851966112214 0 +-1.0010023705079036 0 +0.9167715550920073 0 +-0.05237437558262494 0 +-0.1292481879613925 0 +0.1807500511638142 0 +sea parameters (AS,delS,etaS,cS1-6) +45.33050714456857 0 +-0.0005868965682626107 0 +7.214923118760184 0 +-1.7778202968885553 0 +1.3781054404617266 0 +-0.8864061059709096 0 +0.4614854490052164 0 +-0.17075431271931935 0 +0.03875069984791884 0 +s+ parameters (Asp,delsp,etasp,csp1-6) +0.2783476494842286 0 +-0.0005868965682626107 0 +3.2784405390724864 0 +0.48141017788320234 0 +0.43531002356349435 0 +0.358105832724931 0 +-0.19590839669722512 0 +0.30688730073245785 0 +-0.15277468168399974 0 +Gluon parameters (etagp,delgp,cg1-4,etagm,delgm) +5.220200599548408 0 +0.6816593602021185 0 +-0.7221389981372724 0 +1.038528146218595 0 +-0.2705525707981772 0 +0.18264551369788973 0 +-0.596879131187242 0 +1.504815443893027 0 +-0.21035098202338054 0 +s- parameters (Asm,delsm,etasm, cs1-4) +-0.00021305540179974164 0 +0.0014906899461692123 0 +7.52815689494676 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +db/ub parameters (Arho,etarho,crho1-6) +2.960373417745138 0 +2.6800966945975553 0 +-0.4588362546068339 0 +-1.149553092425683 0 +1.4709676975053088 0 +-1.0967204556173658 0 +0.4961499163919127 0 +-0.1149656984403958 0 +charm parameters (Ac,etac,cc1-6) +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 +0.0 0 diff --git a/src/__pycache__/chebyshevs.cpython-38.pyc b/src/__pycache__/chebyshevs.cpython-38.pyc deleted file mode 100644 index 420576d..0000000 Binary files a/src/__pycache__/chebyshevs.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/chebyshevs.cpython-39.pyc b/src/__pycache__/chebyshevs.cpython-39.pyc deleted file mode 100644 index 5fa3f33..0000000 Binary files a/src/__pycache__/chebyshevs.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/chi2s.cpython-38.pyc b/src/__pycache__/chi2s.cpython-38.pyc deleted file mode 100644 index 2b4831c..0000000 Binary files a/src/__pycache__/chi2s.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/chi2s.cpython-39.pyc b/src/__pycache__/chi2s.cpython-39.pyc deleted file mode 100644 index bc86629..0000000 Binary files a/src/__pycache__/chi2s.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/data_theory.cpython-38.pyc b/src/__pycache__/data_theory.cpython-38.pyc deleted file mode 100644 index dea1659..0000000 Binary files a/src/__pycache__/data_theory.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/data_theory.cpython-39.pyc b/src/__pycache__/data_theory.cpython-39.pyc deleted file mode 100644 index 4abee19..0000000 Binary files a/src/__pycache__/data_theory.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/error_calc.cpython-38.pyc b/src/__pycache__/error_calc.cpython-38.pyc deleted file mode 100644 index 4de170c..0000000 Binary files a/src/__pycache__/error_calc.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/error_calc.cpython-39.pyc b/src/__pycache__/error_calc.cpython-39.pyc deleted file mode 100644 index 2604f31..0000000 Binary files a/src/__pycache__/error_calc.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/global_pars.cpython-38.pyc b/src/__pycache__/global_pars.cpython-38.pyc deleted file mode 100644 index cfad5e8..0000000 Binary files a/src/__pycache__/global_pars.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/global_pars.cpython-39.pyc b/src/__pycache__/global_pars.cpython-39.pyc deleted file mode 100644 index 21884ba..0000000 Binary files a/src/__pycache__/global_pars.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/inputs.cpython-38.pyc b/src/__pycache__/inputs.cpython-38.pyc deleted file mode 100644 index 3a8eb44..0000000 Binary files a/src/__pycache__/inputs.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/inputs.cpython-39.pyc b/src/__pycache__/inputs.cpython-39.pyc deleted file mode 100644 index 8b0ff97..0000000 Binary files a/src/__pycache__/inputs.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/levmar.cpython-38.pyc b/src/__pycache__/levmar.cpython-38.pyc deleted file mode 100644 index f2ab849..0000000 Binary files a/src/__pycache__/levmar.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/levmar.cpython-39.pyc b/src/__pycache__/levmar.cpython-39.pyc deleted file mode 100644 index 446a186..0000000 Binary files a/src/__pycache__/levmar.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/lhapdf_funs.cpython-38.pyc b/src/__pycache__/lhapdf_funs.cpython-38.pyc deleted file mode 100644 index 35d4dca..0000000 Binary files a/src/__pycache__/lhapdf_funs.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/lhapdf_funs.cpython-39.pyc b/src/__pycache__/lhapdf_funs.cpython-39.pyc deleted file mode 100644 index fc53565..0000000 Binary files a/src/__pycache__/lhapdf_funs.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/minpars.cpython-39.pyc b/src/__pycache__/minpars.cpython-39.pyc deleted file mode 100644 index f59d8ac..0000000 Binary files a/src/__pycache__/minpars.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/msht_theory.cpython-39.pyc b/src/__pycache__/msht_theory.cpython-39.pyc deleted file mode 100644 index c0babd6..0000000 Binary files a/src/__pycache__/msht_theory.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/outputs.cpython-38.pyc b/src/__pycache__/outputs.cpython-38.pyc deleted file mode 100644 index dcc442c..0000000 Binary files a/src/__pycache__/outputs.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/outputs.cpython-39.pyc b/src/__pycache__/outputs.cpython-39.pyc deleted file mode 100644 index 150bc24..0000000 Binary files a/src/__pycache__/outputs.cpython-39.pyc and /dev/null differ diff --git a/src/__pycache__/pdfs.cpython-38.pyc b/src/__pycache__/pdfs.cpython-38.pyc deleted file mode 100644 index 249fd2d..0000000 Binary files a/src/__pycache__/pdfs.cpython-38.pyc and /dev/null differ diff --git a/src/__pycache__/pdfs.cpython-39.pyc b/src/__pycache__/pdfs.cpython-39.pyc deleted file mode 100644 index cbd4067..0000000 Binary files a/src/__pycache__/pdfs.cpython-39.pyc and /dev/null differ diff --git a/src/chi2s.py b/src/chi2s.py index 8fc94fc..33955dd 100644 --- a/src/chi2s.py +++ b/src/chi2s.py @@ -1,9 +1,14 @@ from global_pars import * +import functools from validphys.api import API import scipy.linalg as la from data_theory import * from validphys.calcutils import calc_chi2 from validphys.covmats import dataset_inputs_t0_covmat_from_systematics, dataset_inputs_covmat_from_systematics + +from validphys.api import API +from validphys.convolution import central_predictions + from pdfs import * from inputs import * from lhapdf_funs import * @@ -71,17 +76,15 @@ def af_matcalc(afree): return afree_mat -def chi2min_fun(afree,jac_calc,hess_calc): - - +def chi2min_fun(afree, jac_calc = False, hess_calc = False, vp_pdf=None): + """ + Compute the chi2 for a MSHTPDF for the parameters arfree. + If jac_calc is True + """ err=False - parin=initpars() - pdfparsii=parset(afree,parin) - dload_pars.xarr_tot=xgrid_calc() - # print('pdfparsi = ',pdfparsi) if not jac_calc and not hess_calc: @@ -95,6 +98,15 @@ def chi2min_fun(afree,jac_calc,hess_calc): pdf_pars.pdfparsi=sumrules(pdfparsii) + # Reset the vp_pdf if not explicitly given + if vp_pdf is None: + parin = initpars() + pdf_parameters_raw = parset(afree, parin, are_free = pdf_pars.par_isf) + pdf_parameters = sumrules(pdf_parameters_raw) + vp_pdf = MSHTPDF(name = "pdf", pdf_parameters = pdf_parameters, pdf_function = "msht") + + # TODO Perhaps at this point we want to update the parameters in vp_pdf or to create a new one? + # note that upon first call at this point the parameters in pdfparsii are equal to those in the PDF if fit_pars.deld_const: pdf_pars.deld_arr=np.zeros((4*pdf_pars.npar_free+1)) @@ -103,9 +115,6 @@ def chi2min_fun(afree,jac_calc,hess_calc): pdf_pars.delu_arr[0]=pdf_pars.pdfparsi[1] # print(pdf_pars.pdfparsi[10]) - - - jac=np.zeros((pdf_pars.npar_free)) hess=np.zeros((pdf_pars.npar_free,pdf_pars.npar_free)) hessp=np.zeros((pdf_pars.npar_free,pdf_pars.npar_free)) @@ -119,11 +128,13 @@ def chi2min_fun(afree,jac_calc,hess_calc): pdflabel_p2arr=np.empty(pdf_pars.npar_free+1, dtype='U256') if(jac_calc): - eps_arr=np.zeros((pdf_pars.npar_free+1)) chi2_pars.eps_arr_newmin=eps_arr - for ip in range(0,pdf_pars.npar_free+1): + # TODO + # this loop creates the LHAPDF grids for each free parameter + pdf_pars.derivatives =[] + for ip in range(0,pdf_pars.npar_free+1): idir_j=pdf_pars.idir+ip name=inout_pars.label+'_run'+str(idir_j) # name=inout_pars.label+'_irep'+str(fit_pars.irep)+'_run'+str(idir_j) @@ -135,24 +146,28 @@ def chi2min_fun(afree,jac_calc,hess_calc): pdf_pars.PDFlabel_cent=name else: parin=pdf_pars.pdfparsi.copy() - if fit_pars.newmin: - (parin1,eps_arr[ip])=parinc(parin,ip-1,1) - chi2_pars.eps_arr_newmin[ip]=eps_arr[ip] - else: - (parin1,eps_arr[ip])=parinc(parin,ip-1,1) + (parin1,eps_arr[ip])=parinc(parin,ip-1,1) + chi2_pars.eps_arr_newmin[ip]=eps_arr[ip] pdf_pars.parinarr[ip,:]=parin1 if fit_pars.deld_const: pdf_pars.deld_arr[ip]=parin1[10] pdf_pars.delu_arr[ip]=parin1[1] - if(pdf_pars.uselha): + # TODO what about this chi2_pars.ipdf_newmin=ip - initlha(name,pdf_pars.lhapdfdir) + if ip > 0: + parameter_index = pdf_pars.par_free_i[ip - 1] + pdf_derivative = vp_pdf.make_derivative(parameter_index) + pdf_pars.derivatives.append(pdf_derivative) + if DEBUG: + initlha(name,pdf_pars.lhapdfdir) + writelha(name,pdf_pars.lhapdfdir,parin1) + pdf_pars.PDFlabel=name pdf_pars.parin_newmin_reset=True - writelha(name,pdf_pars.lhapdfdir,parin1) + ####################### if chi2_pars.diff_2 or fit_pars.pos_const: @@ -175,22 +190,20 @@ def chi2min_fun(afree,jac_calc,hess_calc): pdf_pars.delu_arr[pdf_pars.npar_free+ip]=parin1[1] if(pdf_pars.uselha): - + # TODO what about this 2 initlha(name,pdf_pars.lhapdfdir) pdf_pars.PDFlabel=name + print("Here 2") writelha(name,pdf_pars.lhapdfdir,parin1) - if chi2_pars.diff_2: + if chi2_pars.diff_2: + # TODO: vp_pdf should go through here as well (out0,out1,jac,hessd2)=jaccalc_d2(pdflabel_arr,pdflabel_marr,eps_arr,hess_calc,fit_pars.imindat,fit_pars.imaxdat) out=out0+out1 hess=hessd2.copy() - else: print('JACCALC') - if fit_pars.newmin: - (jac,hess,out0,out1,hessp)=jaccalc_newmin(pdflabel_arr,pdflabel_marr,eps_arr,hess_calc) - else: - (jac,hess,out0,out1,hessp)=jaccalc(pdflabel_arr,pdflabel_marr,eps_arr,hess_calc) + (jac,hess,out0,out1,hessp)=jaccalc_newmin(pdflabel_arr,pdflabel_marr,eps_arr,hess_calc, vp_pdf=vp_pdf) out=out0+out1 @@ -199,7 +212,6 @@ def chi2min_fun(afree,jac_calc,hess_calc): idir_j=pdf_pars.idir+ip name=inout_pars.label+'_run'+str(idir_j) # name=inout_pars.label+'_irep'+str(fit_pars.irep)+'_run'+str(idir_j) - dellha(name) pdf_pars.idir+=pdf_pars.npar_free+1 @@ -209,40 +221,41 @@ def chi2min_fun(afree,jac_calc,hess_calc): idir_j=pdf_pars.idir+ip name=inout_pars.label+'_run'+str(idir_j) # name=inout_pars.label+'_irep'+str(fit_pars.irep)+'_run'+str(idir_j) - dellha(name) pdf_pars.idir+=pdf_pars.npar_free else: + if DEBUG: + # TODO: to be removed + # when computing the chi2 no need to do anything with the PDF - name=inout_pars.label+'_run'+str(pdf_pars.idir) - # name=inout_pars.label+'_irep'+str(fit_pars.irep)+'_run'+str(pdf_pars.idir) + name=inout_pars.label+'_run'+str(pdf_pars.idir) + parin1=pdf_pars.pdfparsi.copy() + pdf_pars.parinarr[0,:]=parin1 + pdf_pars.iPDF=0 - parin1=pdf_pars.pdfparsi.copy() + # write to temp lhapdf grid to be used by nnpdf code + if(pdf_pars.uselha): + # TODO: this might not be needed at all + # in any case here writelha uses always msht + chi2_pars.ipdf_newmin=0 - pdf_pars.parinarr[0,:]=parin1 - pdf_pars.iPDF=0 + initlha(name, pdf_pars.tmp_lhapdfdir) + pdf_pars.PDFlabel=name + writelha(name,pdf_pars.tmp_lhapdfdir,parin1) - # write to temp lhapdf grid to be used by nnpdf code - if(pdf_pars.uselha): - initlha(name,pdf_pars.lhapdfdir) - pdf_pars.PDFlabel=name - chi2_pars.ipdf_newmin=0 - writelha(name,pdf_pars.lhapdfdir,parin1) - # calculate chi2 - chi=chi2totcalc() + print("> Calculating chi2 <") + chi=chi2totcalc(vp_pdf=vp_pdf) out=chi[0]+chi[1] # exp + positivity out0=chi[0] - if(pdf_pars.uselha): - dellha(name) # delete temporary folder pdf_pars.idir+=1 # iterate up so new folder - print('chi2/N_dat=',out/chi2_pars.ndat) - print('chi2tot (no pos)=',out0) - print('pos pen =',chi[1]) - print('chi2tot (no pos)/N_dat=',out0/chi2_pars.ndat) - + ndat = chi2_pars.ndat + print(f"chi2/N_dat={out/ndat:.5}") + print(f"chi2tot (no pos)={out0:.5}") + print(f"pos pen ={chi[1]:.5}") + print(f"chi2tot (no pos)/N_dat={out0/ndat:.5}") # if jac_calc and hess_calc: # print('testing...') @@ -259,13 +272,14 @@ def chi2min_fun(afree,jac_calc,hess_calc): return(out,jac,hess,err,hessp) -def chi2corr(imin,imax): - +def chi2corr(imin, imax, vp_pdf=None, theta_idx=None): + # TODO: add docstr + """ """ if(pdf_closure.pdpdf): (out,theorytot,cov,covin)=chi2corr_pdf() diffs_out=0. else: - (out,theorytot,cov,covin,diffs_out)=chi2corr_global(imin,imax) + (out,theorytot,cov,covin,diffs_out)=chi2corr_global(imin, imax, vp_pdf, theta_idx=theta_idx) return (out,theorytot,cov,covin,diffs_out) @@ -660,8 +674,11 @@ def chi2corr_ind(i): return (out,ndat,dlabel) -def chi2corr_global(imin,imax): - +def chi2corr_global(imin, imax, vp_pdf=None, theta_idx=None): + """Compute chi2 for the global dataset. + Takes from index 0 of the dataset all the way to index imax + as defined in ``global_pars.fit_pars.dataset_40``. + """ if fit_pars.nlo_cuts: # intersection=[{"dataset_inputs": dload_pars.dscomb, "theoryid": 212}] @@ -729,15 +746,23 @@ def chi2corr_global(imin,imax): # outputfile_label=open('outputs/pseudodata/datalabels.dat','w') + # TODO: in principle this entire loop could be replaced by a direct call to calc_chi2 with the following input + # 1. covmat -> needs information on t0 (on/off), datasets, cuts + # 2. data -> needs datasets, cuts, replica seed (if needed) + # 3. theory results -> data, cuts, theory + # if the replicas were prepared beforehand that would speed up things though + + vp_input = {"use_cuts": "internal", "theoryid": fit_pars.theoryidi} + if vp_pdf is not None: + vp_input["pdf"] = vp_pdf + all_ds_input = [] + for i in range(imin,imax+1): dataset_testii=fit_pars.dataset_40[i] - - # print(fit_pars.dataset_40[i]) - inptt = { - "dataset_input": dataset_testii, - "use_cuts": "internal", - "theoryid": fit_pars.theoryidi, - } + + inptt = {**vp_input, "dataset_input": dataset_testii} + all_ds_input.append(dataset_testii) + # if fit_pars.nlo_cuts: # # inptt = { # # "cuts_intersection_spec": [{"theoryid": 212, "theoryid": 211}], @@ -759,8 +784,17 @@ def chi2corr_global(imin,imax): # "use_cuts": "internal", # "theoryid": fit_pars.theoryidi, # } - - theory=theory_calc(i,dataset_testii,inptt,fit_pars.cftrue[i]) + + if vp_pdf is not None and theta_idx is not None: + # computing derivative of th prediction wrt free parameter theta_idx + theory = vp_pdf.derivative_th_predictions("internal", fit_pars.theoryidi, dataset_testii, theta_idx) + elif vp_pdf is not None: + # computing central value of th prediction. + # In order to use this aslo to compute the derivative we need to modify vp API allowing central_predictions to take 2 pdfs as input + theory = API.central_predictions(**inptt).values[:,0] + else: + theory=theory_calc(i,dataset_testii,inptt,fit_pars.cftrue[i]) + fit_pars.preds_stored[str(dataset_testii["dataset"])]=theory # for j in range (0,len(theory)): @@ -827,8 +861,41 @@ def chi2corr_global(imin,imax): # print(len(theorytot)) # exit() + if vp_pdf is not None: + # Get the covmat for all the dataset we have calculated chi2 for. The order is the same as the vector of theories + # TODO: eventually we'll try to skip it and compute the chi2 directly + # TODO: in principle nlo intersenction cut should already be part of vp_input at this stage + cov = API.dataset_inputs_covmat_t0_considered(**vp_input, dataset_inputs = all_ds_input, use_t0=chi2_pars.t0, t0pdfset=vp_pdf) + covin = la.inv(cov) + + if chi2_pars.t0: + dload_pars.covt0=cov + dload_pars.covt0_inv=covin + dload_pars.dcov=0 + # LHL ADDED NEW - so that t0 def is used in minimisation + elif chi2_pars.t0_noderiv: + if dload_pars.dcov==1: + try: + # cov = API.dataset_inputs_t0_covmat_from_systematics(**inpt0) + cov = API.dataset_inputs_covmat_t0_considered(**vp_input, dataset_inputs = all_ds_input, use_t0=True, t0pdfset=vp_pdf) + covin=la.inv(cov) + except (la.LinAlgError,ValueError) as err: + print('t0 cov may be ill behaved, trying exp cov instead...') + cov=dload_pars.covexp + covin=dload_pars.covexp_inv + print('t0 cov2...') + # cov = API.dataset_inputs_covmat_t0_considered(**vp_input, dataset_inputs = all_ds_input, use_t0=True, t0pdfset=vp_pdf) + # # print(cov) + # covin = la.inv(cov) + dload_pars.covt0=cov + dload_pars.covt0_inv=covin + print('...finish') + dload_pars.dcov=0 + else: + dload_pars.covexp=cov + dload_pars.covexp_inv=covin - if(chi2_pars.t0): + elif(chi2_pars.t0): print('t0 cov1...') if fit_pars.nlo_cuts: inpt0 = dict(dataset_inputs=dload_pars.dscomb, theoryid=fit_pars.theoryidi, use_cuts="fromintersection", cuts_intersection_spec=intersection, t0pdfset=pdf_pars.PDFlabel, use_t0=True) @@ -1160,12 +1227,14 @@ def jac_fun(afree): print('Jacobian = ',out) return out -def chi2min(afree): - - +def chi2min(afree = None, vp_pdf=None): + """Compute the chi2 that will be used during the minimization + But disable the computation of the hessian and jacobian. + Takes as input the free parameters of the problem. + """ hess_calc=False jac_calc=False - outarr=chi2min_fun(afree,jac_calc,hess_calc) + outarr=chi2min_fun(afree,jac_calc,hess_calc, vp_pdf=vp_pdf) out=outarr[0] return out @@ -1246,11 +1315,13 @@ def chilim_fill(nd,chi,dlab): chi2_pars.chi0_ind_arr.append(chi) # print(i,chilim/chi,cl68,cl50,cl68/cl50,nd) -def chi2totcalc(): +def chi2totcalc(vp_pdf=None): + """If a VP pdf is used, use VP to compute the chi2""" - - chiarr=np.zeros(fit_pars.imaxdat-fit_pars.imindat) - chiarr[0]=chi2corr(fit_pars.imindat,fit_pars.imaxdat-1)[0] + # Prepare an array to save the chi2 for each dataset + chiarr = np.zeros(fit_pars.imaxdat - fit_pars.imindat) + + chiarr[0] = chi2corr(fit_pars.imindat, fit_pars.imaxdat - 1, vp_pdf=vp_pdf)[0] ndtot=0 chi2totind=0. @@ -1367,9 +1438,10 @@ def chi2totcalc(): out1=0. if fit_pars.pos_const: - out31=pos_calc(fit_pars.pos_data31) + + out31=pos_calc(fit_pars.pos_data31, vp_pdf=vp_pdf) if(fit_pars.pos_40): - out40=pos_calc(fit_pars.pos_data40) + out40=pos_calc(fit_pars.pos_data40, vp_pdf=vp_pdf) chi2pos=out31+out40 out1=chi2pos else: @@ -1429,6 +1501,7 @@ def hess_ij_calc_not0_new(theoryi,theoryj,outi,outj,cov,covin): return out def hess_ij_calc_newmin(diffi,diffj,covin): + """Compute off-diagonal entries of the hessian""" out=diffi@covin@diffj*2. @@ -1562,6 +1635,7 @@ def hess_ii_calc_d2(diff2,cov,covin): return out def hess_ii_calc_newmin(diff2,cov,covin): + """Compute diagonal entries of the hessian""" @@ -1815,7 +1889,10 @@ def jaccalc_d0(label_arr,eps_arr,il,ih): return jacarr -def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): +def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc, vp_pdf=None): + """Compute the derivative of the chi2 wrt the free parameters + (given in Eq.6 https://people.duke.edu/~hpgavin/lm.pdf) and the Hessian + (given after Eq.9 https://people.duke.edu/~hpgavin/lm.pdf)""" print('JACCALC NEWMIN') @@ -1827,7 +1904,9 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): pdf_pars.PDFlabel=label_arr[0].strip() pdf_pars.iPDF=0 - (chiarr[0],theory0,cov0,cov0in,diffs0)=chi2corr(fit_pars.imindat,imax-1) + + # compute chi2, theory predictions, cov, cov_inv, and theory - data + (chiarr[0],theory0,cov0,cov0in,diffs0)=chi2corr(fit_pars.imindat,imax-1,vp_pdf) tarr=[theory0] covarr=[cov0] @@ -1841,7 +1920,15 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): # print(ip) pdf_pars.iPDF=ip pdf_pars.PDFlabel=label_arr[ip].strip() - (chiarr[ip],theory,cov,covin,diffs_out)=chi2corr(fit_pars.imindat,imax-1) + + parameter_index = pdf_pars.par_free_i[ip - 1] + + # compute derivative of the PDF wrt the free parameter parameter_index + pdf_derivative = vp_pdf.make_derivative(parameter_index) + + # compute J_i, i.e. the derivative of the theory predictions wrt the free parameter i=parameter_index (theory) + # NB: for the time being vp_pdf=vp_pdf, but if using API.central_predictions to compute derivative then it should be vp_pdf=pdf_derivative + (chiarr[ip],theory,cov,covin,diffs_out)=chi2corr(fit_pars.imindat,imax-1, vp_pdf=vp_pdf, theta_idx=parameter_index) tarr.append(theory) diffsarr.append(diffs_out) if(chi2_pars.uset0cov): @@ -1860,12 +1947,16 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): # print(tarr[ip]) - test1=-2.*tarr[ip]@cov0in@diffsarr[0] + + # compute the derivative of the chi2 wrt the free parameter parameter_index + # Eq.6 https://people.duke.edu/~hpgavin/lm.pdf + test1=-2.*tarr[ip]@cov0in@diffsarr[0] jacarr[ip]=test1 # print(np.sum(tarr[ip])) if(hess_calc): + # compute the diagonal entries of the hessian, defined as J cov0in J^T if ip==1: tii0=hess_ii_calc_newmin(tarr[ip],cov0,cov0in) print(tii0) @@ -1873,12 +1964,14 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): else: tii0=hess_ii_calc_newmin(tarr[ip],cov0,cov0in) tii.append(tii0) - + # I guess this should also happen only if(hess_calc)? for jp in range(1,ip+1): if ip==jp: + # fill the diagonal hii=tii[ip-1] hessarr[ip,jp]=hii else: + # compute the off-diagonal entries of the hessian (lower-triangular entries) hij=hess_ij_calc_newmin(tarr[ip],tarr[jp],cov0in) hessarr[ip,jp]=hij @@ -1886,8 +1979,10 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): hessarr=np.delete(hessarr,0,0) hessarr=np.delete(hessarr,0,1) + # compute the missing entries of the hessian using the fact that it s symmetric hessarr=hessarr+hessarr.T-np.diag(hessarr.diagonal()) + # save the chi2 out0=chiarr[0] chiarr=np.zeros((pdf_pars.npar_free+1)) @@ -1895,6 +1990,7 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): penarr=np.zeros((pdf_pars.npar_free+1)) if fit_pars.pos_const: + # what about this? for ip in range(0,pdf_pars.npar_free+1): pdf_pars.PDFlabel=label_arr[ip].strip() out31=pos_calc(fit_pars.pos_data31) @@ -1907,6 +2003,7 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): chiarr[ip]=chiarr[ip]+out1 if fit_pars.pos_const: + # what about this? for ip in range(1,pdf_pars.npar_free+1): pdf_pars.PDFlabel=label_arrm[ip].strip() out31m=pos_calc(fit_pars.pos_data31) @@ -1938,7 +2035,8 @@ def jaccalc_newmin(label_arr,label_arrm,eps_arr,hess_calc): return(jacarr,hessarr,out0,out1,hessparr) -def jaccalc(label_arr,label_arrm,eps_arr,hess_calc): +def jaccalc(label_arr,label_arrm,eps_arr,hess_calc, vp_pdf=None): + # TODO print('JACCALC OLDMIN') @@ -1950,7 +2048,7 @@ def jaccalc(label_arr,label_arrm,eps_arr,hess_calc): pdf_pars.PDFlabel=label_arr[0].strip() pdf_pars.iPDF=0 - (chiarr[0],theory0,cov0,cov0in,diffs_out)=chi2corr(fit_pars.imindat,imax-1) + (chiarr[0],theory0,cov0,cov0in,diffs_out)=chi2corr(fit_pars.imindat,imax-1,vp_pdf) tarr=[theory0] covarr=[cov0] @@ -1963,7 +2061,7 @@ def jaccalc(label_arr,label_arrm,eps_arr,hess_calc): # print(ip) pdf_pars.iPDF=ip pdf_pars.PDFlabel=label_arr[ip].strip() - (chiarr[ip],theory,cov,covin,diffs_out)=chi2corr(fit_pars.imindat,imax-1) + (chiarr[ip],theory,cov,covin,diffs_out)=chi2corr(fit_pars.imindat,imax-1,vp_pdf) tarr.append(theory) if(chi2_pars.uset0cov): covarr.append(cov) @@ -2024,7 +2122,7 @@ def jaccalc(label_arr,label_arrm,eps_arr,hess_calc): hessarr=np.delete(hessarr,0,0) hessarr=np.delete(hessarr,0,1) - hessarr=hessarr+hessarr.T-np.diag(hessarr.diagonal()) + hessarr=hessarr+hessarr.T-np.diag(hessarr.diagonal()) # hessian is symmetric, so fill the remaining entries out0=chiarr[0] diff --git a/src/data_theory.py b/src/data_theory.py index f5465e7..25f008f 100644 --- a/src/data_theory.py +++ b/src/data_theory.py @@ -14,7 +14,7 @@ from validphys.kinematics import * import operator import lhapdf -from validphys.commondataparser import load_commondata +from nnpdf_data.commondataparser import load_commondata from validphys.convolution import OP import functools from validphys.pdfbases import evolution @@ -73,18 +73,20 @@ def del_pen_calc(): return (chi0d,chi0u,diffd,diffu,hessd,hessu,idv,iuv) -def pos_calc(pdata): +def pos_calc(pdata, vp_pdf=None): tot=0. totdiff=0. + if vp_pdf is None: + vp_pdf = pdf_pars.PDFlabel for j in range(0,len(pdata)): if fit_pars.theoryidi==40001000 or fit_pars.theoryidi==50001000: - api_predictions = API.positivity_predictions_data_result(theoryid=fit_pars.theoryidi, pdf=pdf_pars.PDFlabel, posdataset=pdata[j],use_cuts="internal") + api_predictions = API.positivity_predictions_data_result(theoryid=fit_pars.theoryidi, pdf=vp_pdf, posdataset=pdata[j],use_cuts="internal") else: - api_predictions = API.positivity_predictions_data_result(theoryid=fit_pars.theoryidi, pdf=pdf_pars.PDFlabel, posdataset=pdata[j]) + api_predictions = API.positivity_predictions_data_result(theoryid=fit_pars.theoryidi, pdf=vp_pdf, posdataset=pdata[j]) out=api_predictions.central_value # print(pdata[j]) @@ -203,6 +205,9 @@ def _predictions_2pdfs_new(dataset, fkfunc, pdf1, pdf2=None): all replicas, central, etc) according to the provided ``fkfunc``, which should have the same interface as e.g. ``fk_predictions``. """ + if not DEBUG: + print("We should not be entering here! Who's calling me?") + raise Exception opfunc = OP[dataset.op] if dataset.cuts is None: @@ -667,4 +672,4 @@ def plot_data_x(idat,ds,pred): # outputfile.writelines(L) # outputfile.write('\n') - \ No newline at end of file + diff --git a/src/global_pars.py b/src/global_pars.py index 3e04ece..2b6a64b 100644 --- a/src/global_pars.py +++ b/src/global_pars.py @@ -1,6 +1,9 @@ import numpy as np from validphys.loader import Loader +DEBUG = False +# TODO: The newmin parameter should always be true + class load_nnpdf: l=Loader() @@ -112,6 +115,8 @@ class pdf_pars: PDFlabel_cent='init' # absolute path to LHAPDF directory where grids are stored lhapdfdir='init' + # path to the temporary LHAPDF files + tmp_lhapdfdir = None # Path # counter to ensure new lhapdf grid used for every new theory evaluation idir=0 # if true then use lhapdf grids for theory evaluation @@ -998,7 +1003,7 @@ class fit_pars: preds_stored={} datapath='' theories_path='' - newmin=False + newmin=True # This should always be set to True dataset_ii_global='' pdf_dict=[] diff --git a/src/inputs.py b/src/inputs.py index 5654ffc..d382b63 100644 --- a/src/inputs.py +++ b/src/inputs.py @@ -1,7 +1,6 @@ from global_pars import * import numpy as np - - +from outputs import BUFFER_F def readincov(): @@ -56,15 +55,18 @@ def readincov(): return (afin,hess,jac) def readin(): + """Read the input file inputs::input_file + + Uses ``np.loadtxt`` to read the file a few times once per flavour. + These parameters are the initial parameters of the fit. + For each set of parameters, the first column represents the parameter value + and the second whether it is a free parameter (1) or whether it should be considered fixed (0) + """ parfree_def=False inputfile='input/'+inout_pars.inputnam - with open(inputfile, 'r') as fp: - x = fp.readlines() - num_lines = len([l for l in x if l.strip(' \n') != '']) - # distuv=np.loadtxt(inputfile,skiprows=1,max_rows=8) nuv=basis_pars.i_uv_max-basis_pars.i_uv_min-1 distuv=np.loadtxt(inputfile,skiprows=1,max_rows=nuv) @@ -181,8 +183,7 @@ def readin(): afin=np.delete(afin,0) pdf_pars.par_free_i=np.delete(pdf_pars.par_free_i,0) - - outputfile=open('outputs/buffer/'+inout_pars.label+'.dat','w') + outputfile = (BUFFER_F / f"{inout_pars.label}.dat").open("w") outputfile.write("Starting new buffer...") outputfile.write("\n") diff --git a/src/levmar.py b/src/levmar.py index 5f35635..e4b6643 100644 --- a/src/levmar.py +++ b/src/levmar.py @@ -259,6 +259,12 @@ def levmar_meth3(afree): return afout def levmar(afree): + """ + Levenberg–Marquardt algorithm, takes the set of free parameters + and runs the minimization. + + Returns the same array with the minimized parameters. + """ lev_comb=False lev_update=True @@ -300,15 +306,13 @@ def levmar(afree): # lmin=1e-30 - hessmax=np.zeros((pdf_pars.npar_free,pdf_pars.npar_free)) hessmaxp=np.zeros((pdf_pars.npar_free,pdf_pars.npar_free)) - + + # TODO use a context manager for this file outputfile=open('outputs/buffer/'+inout_pars.label+'.dat','a') outputfile.write("LM meth 1 ") outputfile.write("\n") - - if del_grat: nsteps=35 @@ -364,7 +368,7 @@ def levmar(afree): # lam=0.001 - + # TODO skip this condition if(min_pars.sgd): hess_calc=True # think has to be for lev update else: @@ -375,11 +379,7 @@ def levmar(afree): dload_pars.dcov=1 - (chi2i,jaci,hessi,err,hessp)=chi2min_fun(af,jac_calc,hess_calc) - - - print('chi2i = ',chi2i) print('jac =',-jaci/2.) diff --git a/src/lhapdf_funs.py b/src/lhapdf_funs.py index 1154f60..22cc5c8 100644 --- a/src/lhapdf_funs.py +++ b/src/lhapdf_funs.py @@ -1,35 +1,31 @@ +from pathlib import Path from global_pars import * from pdfs import * import lhapdf import os import shutil as sh + def initlha(name,lhdir): # dirin=pdf_pars.lhapdfdir+'NNPDF40_nnlo_pch_as_01180/'+'NNPDF40_nnlo_pch_as_01180.info' dirin='input/MSHT20nnlo_as118_mem1.info' - - dirlha=lhdir+name+'/' - - - try: - os.mkdir(dirlha) - except OSError as error: - print(error) + lhdir = Path(lhdir) + dirlha = lhdir / name + dirlha.mkdir(exist_ok = True, parents=True) - sh.copy(dirin,dirlha+name+'.info') + sh.copy(dirin, dirlha / f"{name}.info") def writelha(name,lhdir,parin): + print(f"Writing {name}") + # import ipdb; ipdb.set_trace() - output=lhdir+name+'/'+name+'_0000.dat' - - # print(pdf_pars.parinarr[0,:]) - # print(parin) + lhdir = Path(lhdir) + output = lhdir / name / f"{name}_0000.dat" - with open(output,'w') as outputfile: - + with output.open("w") as outputfile: outputfile.write('PdfType: replica') outputfile.write('\n') outputfile.write('Format: lhagrid1') @@ -38,7 +34,7 @@ def writelha(name,lhdir,parin): # PDFlabelmsht='MSHT20nnlo_as118' PDFlabelmsht='NNPDF40_nnlo_pch_as_01180' - inputf=pdf_pars.lhapdfdir+PDFlabelmsht+'/'+PDFlabelmsht+'_0000.dat' + inputf=Path(pdf_pars.lhapdfdir) / PDFlabelmsht / f"{PDFlabelmsht}_0000.dat" filein=open(inputf,"r") content=filein.readlines() @@ -48,8 +44,6 @@ def writelha(name,lhdir,parin): # xarr=np.loadtxt(inputf,skiprows=4,max_rows=1) # msht xarr=np.loadtxt(inputf,skiprows=3,max_rows=1) - - nx=1000 @@ -113,11 +107,9 @@ def writelha(name,lhdir,parin): # os.quit() for ix in range(0,nx): - for iq in range(0,2): xin=xarr[ix] - - + # TODO: ask vp which is the starting scale for whatever theory is being used if fit_pars.theoryidi==211 or fit_pars.theoryidi==40001000 or fit_pars.theoryidi==50001000: qin=1.00 elif fit_pars.theoryidi==200: @@ -131,7 +123,7 @@ def writelha(name,lhdir,parin): pdfout=pdfs[fit_pars.lhrep].xfxQ(i,xin,qin) else: if iq==0: - if fit_pars.newmin and chi2_pars.ipdf_newmin > 0: + if chi2_pars.ipdf_newmin > 0: pdfout=pdfs_diff(i,xin) else: pdfout=pdfs_msht(i,parin,xin) @@ -145,7 +137,7 @@ def writelha(name,lhdir,parin): # pdfout=pdfs[0].xfxQ(i,xin,qin) else: if iq==0: - if fit_pars.newmin and chi2_pars.ipdf_newmin > 0: + if chi2_pars.ipdf_newmin > 0: pdfout=pdfs_diff(i,xin) else: pdfout=pdfs_msht(i,parin,xin) @@ -157,7 +149,7 @@ def writelha(name,lhdir,parin): pdfout=pdfs[fit_pars.lhrep].xfxQ(21,xin,qin) else: if iq==0: - if fit_pars.newmin and chi2_pars.ipdf_newmin > 0: + if chi2_pars.ipdf_newmin > 0: pdfout=pdfs_diff(0,xin) else: pdfout=pdfs_msht(0,parin,xin) @@ -165,7 +157,6 @@ def writelha(name,lhdir,parin): pdfout=1. pdfarr[5]=pdfout -# np.savetxt(outputfile,pdfarr,fmt="%.7E",delimiter=' ', newline=' ') np.savetxt(outputfile,pdfarr,fmt="%.14E",delimiter=' ', newline=' ') outputfile.write('\n') @@ -265,7 +256,3 @@ def writelha_end(name,lhdir,parin): outputfile.write('\n') outputfile.write('---') - -def dellha(name): - dirlha=pdf_pars.lhapdfdir+name+'/' - sh.rmtree(dirlha) diff --git a/src/outputs.py b/src/outputs.py index 393b8d9..383fb38 100644 --- a/src/outputs.py +++ b/src/outputs.py @@ -2,16 +2,26 @@ from pdfs import * import numpy as np import os -from reportengine.compat import yaml +from reportengine.utils import yaml_safe from lhapdf_funs import * +OUTPUT_F = Path("outputs") +BUFFER_F = OUTPUT_F / "buffer" +PARS_F = OUTPUT_F / "pars" +EVGRIDS_F = OUTPUT_F / "evgrids" +PLOTS_F = OUTPUT_F / "plots" +RES_F = OUTPUT_F / "res" +COV_F = OUTPUT_F / "cov" +for PF in [BUFFER_F, PARS_F, EVGRIDS_F, PLOTS_F, RES_F, COV_F]: + PF.mkdir(exist_ok=True, parents=True) + + def covmatout(hessi,jaci): # hessin=hessi.copy() # hessin=la.inv(hessin) pars=pdf_pars.pdfparsi.copy() - - output='outputs/cov/'+inout_pars.label+'.dat' + output = COV_F / f"{inout_pars.label}.dat" print('call covmatout') print(output) @@ -132,36 +142,19 @@ def evgrido(): test[195,13]=0. if inout_pars.pd_output: - dirgrid0='outputs/evgrids/'+inout_pars.pd_output_lab - dirgrid1='outputs/evgrids/'+inout_pars.pd_output_lab+'/nnfit' - dirgrid='outputs/evgrids/'+inout_pars.pd_output_lab+'/nnfit/replica_'+str(fit_pars.irep+1)+'/' + dirgrid0 = EVGRIDS_F / inout_pars.pd_output_lab else: - dirgrid0='outputs/evgrids/'+inout_pars.label - dirgrid1='outputs/evgrids/'+inout_pars.label+'/nnfit' - dirgrid='outputs/evgrids/'+inout_pars.label+'/nnfit/replica_'+str(fit_pars.irep+1)+'/' - + dirgrid0 = EVGRIDS_F / inout_pars.label - try: - os.mkdir(dirgrid0) - except OSError as error: - print(error) - - try: - os.mkdir(dirgrid1) - except OSError as error: - print(error) - - try: - os.mkdir(dirgrid) - except OSError as error: - print(error) + dirgrid1 = dirgrid0 / "nnfit" + dirgrid = dirgrid1 / f"replica_{str(fit_pars.irep+1)}" + + dirgrid.mkdir(exist_ok = True, parents = True) if inout_pars.pd_output: - output=dirgrid+inout_pars.pd_output_lab+'.exportgrid' + output = dirgrid / f"{inout_pars.pd_output_lab}.exportgrid" else: - output=dirgrid+inout_pars.label+'.exportgrid' - - + output = dirgrid / f"{inout_pars.label}.exportgrid" data = { "replica": 1, @@ -173,7 +166,7 @@ def evgrido(): with open(output,'w') as outputfile: - yaml.dump(data, outputfile) + yaml_safe.dump(data, outputfile) def resout_nofit(pospeni,chi2t0i,chi2expi,n): outputfile=open('outputs/res/'+inout_pars.label+'.dat','w') @@ -313,9 +306,7 @@ def parsout(): def plotout(): - outputdir='outputs/plots/' - - output=outputdir+inout_pars.label+'.dat' + output = PLOTS_F / f"{inout_pars.label}.dat" msht='MSHT20nnlo_as118' nnpdf='NNPDF40_nnlo_pch_as_01180' diff --git a/src/pdfs.py b/src/pdfs.py index 6aed497..019ee12 100644 --- a/src/pdfs.py +++ b/src/pdfs.py @@ -1,11 +1,188 @@ +import functools from global_pars import * from chebyshevs import * +from copy import deepcopy import numpy as np -import os -from scipy.integrate import quadrature -from scipy.misc import derivative as deriv - -def func_pdfs_diff(eps,ipdf,x,iorder): +try: + from scipy.integrate import quadrature + from scipy.misc import derivative as deriv +except ImportError: + from scipy.integrate import quad as quadrature + from scipy.differentiate import derivative as deriv + +from validphys.core import PDF +from validphys.lhapdfset import LHAPDFSet +from validphys.api import API + +def _derivative(func, fl, parameters, x, eps): + """The scipy.misc.derivative function is deprecated and won't exist in newer versions + of scipy. + This implements the same derivative for order 5, evaluated at 0 + already specialized for the PDF functions which take only the variation as input + From: https://github.com/scipy/scipy/blob/92d2a8592782ee19a1161d0bf3fc2241ba78bb63/scipy/_lib/_finite_differences.py#L69 + """ + # TODO: we can probably have the exact derivative and that's it? + weights = np.array([1, -8, 8, -1]) / 12.0 + funvals = np.array([func(fl, p, x) for p in parameters]) + return np.sum(funvals * weights) / np.sum(eps) + +def _derivative_th_prediction(func, use_cuts, theoryid, dataset_input, parameters, eps): + """As _derivative but specialized for the th_predictions function""" + weights = np.array([1, -8, 8, -1]) / 12.0 + funvals = np.array([func(use_cuts, theoryid, dataset_input, p) for p in parameters]) + return np.sum(funvals * weights[:,np.newaxis],axis=0) / np.sum(eps) + + +class MSHTSet(LHAPDFSet): + """Provides a few lhapdf-like functions to trick vp into thinking it is an LHAPDFset + Can work with any function with a signature of (flavour, parameters, x) + """ + + def __init__( + self, + pdf_function, + parameters, + name, + error_type="replicas", + variation=None, + theta_idx=None, + ): + self._error_type = error_type + self._name = name + self._flavors = None + self._lhapdf_set = [None] + + self._parameters = parameters + self._pdf_function = pdf_function + self._derivatives = [] + self._variation = variation + + if variation is not None: + # Compute the 4 variations needed for the derivative + for k in [-2, -1, 1, 2]: + self._derivatives.append( + parinc_newmin( + self._parameters, theta_idx, k * variation, true_idx=True + )[0] + ) + + @functools.cached_property + def is_derivative(self): + """Whether this is a PDF of its derivative""" + return self._variation is not None + + def xfxQ(self, x, Q, n, fl): + """Return the PDF value for one single point for one single member + Note that in this case both scale (Q) and member (n) are ignored. + """ + if fl == 21: + fl = 0 + if self.is_derivative: + return _derivative( + self._pdf_function, fl, self._derivatives, x, self._variation + ) + else: + return self._pdf_function(fl, self._parameters, x) + + def grid_values(self, flavors: np.ndarray, xgrid: np.ndarray, qgrid: np.ndarray): + """Returns the PDF values for every member for the required flavors, x, q + + For the time being assume there is only one Q and only one member + Return shape: (members, flavours, xgrid, qgrid) + """ + out_ret = [] + for fl in flavors: + tmp = [] + for x in xgrid: + tmp.append(self.xfxQ(x, None, None, fl)) + out_ret.append(tmp) + return np.array(out_ret).reshape(1, len(flavors), -1, 1) + + +class MSHTPDF(PDF): + """ + Creates a MSHT PDF object, extends validphys' PDF object + to utilize a MSHT set in the background instead of lhapdf / pdfflow + """ + + def __init__( + self, + name="msht", + boundary=None, + pdf_function="msht", + pdf_parameters=None, + variation=None, + theta_idx=None, + Q=1.0, + ): + if isinstance(pdf_function, str): + if pdf_function != "msht": + raise NotImplementedError(f"{pdf_function=}") + pdf_function = pdfs_msht + + self._pdf_function = pdf_function + self._pdf_parameters = pdf_parameters + self._variation = variation + self._theta_idx = theta_idx + + # Add a hash of the parameter to the name to make them unique and update cached quantities + name = f"{name}_{hash(pdf_parameters.tostring())}" + super().__init__(name, None) + + # Create some fake info: + self._info = {"NumMembers": 1} + + @functools.cached_property + def _lhapdf_set(self): + return MSHTSet( + self._pdf_function, + self._pdf_parameters, + self.name, + variation=self._variation, + theta_idx=self._theta_idx + ) + + def make_derivative(self, idx, eps=1e-5): + """Constructs the derivative of the PDF with respect the parameter idx.""" + variation = np.maximum(1e-12, eps * np.abs(self._pdf_parameters[idx])) + return self.__class__( + "derivative", + pdf_function=self._pdf_function, + pdf_parameters=self._pdf_parameters, + variation=variation, + theta_idx=idx, + ) + + def load(self): + return self._lhapdf_set + + def load_t0(self): + t0_version = deepcopy(self._lhapdf_set) + t0_version._error_type = "t0" + return t0_version + + def th_predictions(self, use_cuts, theoryid, dataset_input, parameters): + """Compute theory predictions given the PDF""" + pdf = self.__class__( + pdf_function=self._pdf_function, + pdf_parameters=parameters + ) + return API.central_predictions(use_cuts=use_cuts, theoryid=theoryid, pdf=pdf, dataset_input=dataset_input).values[:,0] + + def derivative_th_predictions(self, use_cuts, theoryid, dataset_input, theta_idx): + """Compute the derivative of the theory predictions wrt the free parameter theta_idx""" + derivatives = [] + variation = np.maximum(1e-12, 1e-5 * np.abs(self._pdf_parameters[theta_idx])) + # Compute the 4 variations needed for the derivative + for k in [-2, -1, 1, 2]: + derivatives.append( + parinc_newmin( + self._pdf_parameters, theta_idx, k * variation, true_idx=True + )[0] + ) + return _derivative_th_prediction(self.th_predictions, use_cuts, theoryid, dataset_input, derivatives, variation) + +def func_pdfs_diff(eps,ipdf=1,x=0.0,iorder=5): # print(eps) @@ -25,36 +202,33 @@ def func_pdfs_diff(eps,ipdf,x,iorder): return out -def pdfs_diff(ipdf,x): - +def pdfs_diff(ipdf, x=None, parin=None): eps=1e-5 # (pars,eps_out)=parinc_newmin(pdf_pars.parinarr[0,:],chi2_pars.ipdf_newmin-1,eps) - eps=parinc_eps(pdf_pars.parinarr[0,:],chi2_pars.ipdf_newmin-1,eps) - - # # pdfout=pdfs_msht(ipdf,pars,x)-pdfs_msht(ipdf,pdf_pars.parinarr[0,:],x) - # pdfout=func_pdfs_diff(eps,ipdf,x)-func_pdfs_diff(0.,ipdf,x) - # # pdfout/=chi2_pars.eps_arr_newmin[chi2_pars.ipdf_newmin] - # pdfout/=eps - # derivative(func, x0, dx=1.0 + eps=parinc_eps(pdf_pars.parinarr[0,:],chi2_pars.ipdf_newmin-1,eps) pdf_pars.parin_newmin_counter=0 iorder=5 pdfout=deriv(func_pdfs_diff,0.,eps,args=(ipdf,x,iorder),order=iorder) - pdf_pars.parin_newmin_reset=False +# test = functools.partial(func_pdfs_diff, x=x, ipdf=ipdf) +# ret = _derivative(test, eps, ipdf, pdf_pars.parinarr[0,:], x) - # print(pdfout,test) - # os.quit() + mypdf = pdf_pars.derivatives + if pdfout > 1e-5 and chi2_pars.ipdf_newmin >0: + pass + + pdf_pars.parin_newmin_reset=False return pdfout def pdfs_msht(ipdf,pars,x): - - + """ + Compute a MSHT PDF given a set of parameters ``pars'' for x=x + """ etaq=pars[57] - if etaq < 0.0: etaqt=np.power(1.-x,-etaq) else: @@ -766,6 +940,8 @@ def int_g2_msht(ain,xmin): def sumrules(parin): + """ + Modify the set of input parameters so that the sum rules are fulfilled""" out=parin.copy() @@ -845,7 +1021,7 @@ def sumrules(parin): return out def initpars(): - + """Prepare the whole set of initial parameters""" auv=uv_init() adv=dv_init() asea=sea_init() @@ -855,7 +1031,6 @@ def initpars(): adbub=dbub_init() fitcharm=fitcharm_init() - # pdfpars=np.concatenate((auv,adv,asea,asp,ag,asm,adbub)) pdfpars=np.concatenate((auv,adv,asea,asp,ag,asm,adbub,fitcharm)) @@ -922,19 +1097,20 @@ def dbub_init(): return a -def parset(af,parin): - - afi=af.copy() - out=parin.copy() - - - for i in range(1,basis_pars.n_pars): - if pdf_pars.par_isf[i]: - out[i]=afi[0] - afi=np.delete(afi,0) - +def parset(af, parin, are_free=None): + """ + Takes an array of free parameters (``af``) + and an array with all parameters in the problem (``parin``). + Returns an array where the free parameters of ``parin`` are substituted with those in ``af``. + Whether the parameters are to be considered free or not is given by the ``are_free`` array. + Note: the parameters in ``af`` should be order like in ``parin``. + """ + if are_free is None: + are_free = pdf_pars.par_isf + out = parin.copy() + out[are_free.astype(bool)] = af return out def parcheck(pars): @@ -1049,8 +1225,8 @@ def parcheck(pars): if(basis_pars.g_second_term): - if etagm < -1: - print('PARCHECK : etagm < -1') + if etagm < 0: + print('PARCHECK : etagm < 0') err=True if delgm < -1: @@ -1072,9 +1248,12 @@ def parinc_eps(parin,ipar,eps): return eps_n -def parinc_newmin(parin,ipar,eps): +def parinc_newmin(parin,ipar,eps, true_idx=False): - npar=pdf_pars.par_free_i[ipar] + if true_idx: + npar = ipar + else: + npar=pdf_pars.par_free_i[ipar] # eps_n=eps*np.abs(parin[npar]) eps_n=eps