Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 7 additions & 8 deletions benchmarks/bench_yadism.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def yadism_like(self):

def dis_tp_like(self):
new_t_card = {}
new_t_card["grids"] = True
new_t_card["grids"] = False
new_t_card["hid"] = self.t_card["NfFF"]
new_t_card["mass"] = parameters.default_masses(new_t_card["hid"])
new_t_card["fns"] = "fonll"
Expand All @@ -50,7 +50,7 @@ class Observable_card:
def __init__(self, obs_names, q_min, q_max, restype, x_fixed=0.01, q_fixed=30):

x_grid = make_grid(30, 30, x_min=1e-6)
q2_grid = np.geomspace(q_min**2, q_max**2, 60)
q2_grid = np.geomspace(q_min**2, q_max**2, 30)
q2_fixed = q_fixed**2

obs = observables.default_card
Expand All @@ -63,7 +63,7 @@ def __init__(self, obs_names, q_min, q_max, restype, x_fixed=0.01, q_fixed=30):
{"x": float(x_fixed), "Q2": float(q2), "y": 0.5} for q2 in q2_grid
]
kinematics.extend(
[{"x": float(x), "Q2": float(q2_fixed), "y": 0.5} for x in x_grid[4:-3]]
[{"x": float(x), "Q2": float(q2_fixed), "y": 0.5} for x in x_grid[15:-15]]
)
for fx in obs_names:
obs["observables"][fx] = kinematics
Expand Down Expand Up @@ -149,17 +149,16 @@ def benchmarkFO_bottom(pto, pdf_name):
obj = BenchmarkRunner(th_obj, obs_obj, pdf_name)
obj.run()

# TODO: how do we check bottom mass effects in this code?
def benchmarkF_M_charm(pto, pdf_name):
obs_names = [f"F2_charm", f"FL_charm"]
obs_names = ["XSHERANCAVG_charm"] #[f"F2_charm", f"FL_charm"]
obs_obj = Observable_card(obs_names, q_min=1.5, q_max=5, q_fixed=3, restype="M")
th_obj = TheoryCard(pto, hid=4)
obj = BenchmarkRunner(th_obj, obs_obj, pdf_name)
obj.run()


def benchmarkFO_charm(pto, pdf_name):
obs_names = [f"F2_charm", f"FL_charm"]
obs_names = ["XSHERANCAVG_charm"] # [f"F2_charm", f"FL_charm"]
obs_obj = Observable_card(
obs_names, q_min=1.2, q_max=1.5, q_fixed=1.4, restype="FO"
)
Expand All @@ -171,8 +170,8 @@ def benchmarkFO_charm(pto, pdf_name):
if __name__ == "__main__":

pdf_name = "NNPDF40_nnlo_pch_as_01180"
# obj = benchmarkF_M_bottom(pto=1, pdf_name=pdf_name)
# obj = benchmarkF_M_bottom(pto=2, pdf_name=pdf_name)
# obj = benchmarkFO_bottom(pto=1, pdf_name=pdf_name)

obj = benchmarkF_M_charm(pto=2, pdf_name=pdf_name)
# obj = benchmarkFO_charm(pto=2, pdf_name=pdf_name)
obj = benchmarkFO_charm(pto=2, pdf_name=pdf_name)
1 change: 1 addition & 0 deletions dis_tp.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ theory_cards = "project/theory_cards"
operator_cards = "project/operator_cards"
# outputs
results = "project/results"
ymldb = "project/ymldb"
8 changes: 4 additions & 4 deletions src/dis_tp/TildeCoeffFunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ def Cg_3_til_reg(z, Q, p, nf, use_analytic=False):
+ P2(p) * Cg_1_m_reg(z, Q, p, nf - 1)
- (
Cg_1_m_reg(z, Q, p, nf - 1) * Mgg_2_loc(z, p, nf)
+ Convolute(Cg_1_m_reg, Mgg_2_reg, z, Q, p, nf, nf-1)
+ Convolute_plus_matching(Cg_1_m_reg, Mgg_2_sing, z, Q, p, nf, nf-1)
+ Convolute(Cg_1_m_reg, Mgg_2_reg, z, Q, p, nf, nf - 1)
+ Convolute_plus_matching(Cg_1_m_reg, Mgg_2_sing, z, Q, p, nf, nf - 1)
)
- 2
* Cb_0_loc(z, Q, p, nf)
Expand Down Expand Up @@ -152,7 +152,7 @@ def Cq_3_til_reg(z, Q, p, nf, use_analytic=False):
return (
Cq_3_m_reg(z, Q, p, nf - 1)
+ 2 * Cq_2_m_reg(z, Q, p, nf - 1) * Mgg_1_loc(z, p, nf)
- Convolute(Cg_1_m_reg, Mgq_2_reg, z, Q, p, nf, nf-1)
- Convolute(Cg_1_m_reg, Mgq_2_reg, z, Q, p, nf, nf - 1)
- 2
* (
Cb_1_loc(z, Q, p, nf) * Mbq_2(z, p, nf)
Expand Down Expand Up @@ -185,7 +185,7 @@ def CLg_3_til_reg(z, Q, p, nf, use_analytic=False):
+ P2(p) * CLg_1_m_reg(z, Q, p, nf - 1)
- (
CLg_1_m_reg(z, Q, p, nf - 1) * Mgg_2_loc(z, p, nf)
+ Convolute(CLg_1_m_reg, Mgg_2_reg, z, Q, p, nf, nf-1)
+ Convolute(CLg_1_m_reg, Mgg_2_reg, z, Q, p, nf, nf - 1)
+ Convolute_plus_matching(CLg_1_m_reg, Mgg_2_sing, z, Q, p, nf, nf - 1)
)
- 2
Expand Down
38 changes: 33 additions & 5 deletions src/dis_tp/cli/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,14 +124,42 @@ def plot_observable(plot_dir: str, obs: str, order: str, h_id: str):
@pdf
@h_id
@n_cores
@click.argument(
"author",
type=str,
)
@click.option(
"-yad",
"--use_yadism",
is_flag=True,
type=bool,
default=False,
required=False,
help="If True compute the k-factor w.r.t. Yadism",
)
@click.option(
"-th",
"--th_description",
type=str,
default="NNPDF4.0 pch with alphas(MZ)=0.118",
required=False,
help="TheoryInput to be stored in the CF file",
)
def generate_matching_grids(
o_card: str, t_card: str, pdf: str, h_id: int, n_cores: int
o_card: str,
t_card: str,
pdf: str,
h_id: int,
author: str,
n_cores: int,
use_yadism: bool,
th_description: str,
):
"""Generate k-factors.
USAGE: dis_tp k-factors HERA_NC_318GEV_EAVG_SIGMARED_CHARM 400 NNPDF40_nnlo_as_01180 4 -n 4

USAGE: dis_tp k-factors HERACOMB_SIGMARED_C 400 NNPDF40_nnlo_pch_as_01180 4 "Your Name" [-n 4 -yad -th "Theory Input"]
Comment thread
andreab1997 marked this conversation as resolved.
"""

obj = k_factors.KfactorRunner(t_card, o_card, pdf)
obj = k_factors.KfactorRunner(t_card, o_card, pdf, use_yadism)
obj.compute(int(h_id), n_cores)
obj.save_results()
obj.save_results(author, th_input=th_description)
6 changes: 1 addition & 5 deletions src/dis_tp/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,7 @@ def enhance_paths(configs_):
configuration
"""
# required keys without default
for key in [
"theory_cards",
"operator_cards",
"results",
]:
for key in ["theory_cards", "operator_cards", "results", "ymldb"]:
if key not in configs_["paths"]:
raise ValueError(f"Configuration is missing a 'paths.{key}' key")
if pathlib.Path(configs_["paths"][key]).anchor == "":
Expand Down
176 changes: 141 additions & 35 deletions src/dis_tp/k_factors.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pandas as pd
import yadism
import yaml
from datetime import date

from .runner import Runner
from . import configs, parameters
Expand Down Expand Up @@ -39,6 +40,8 @@ def dis_tp_like(self, hid):
new_t_card["mass"] = parameters.default_masses(hid)
new_t_card["fns"] = "fonll"
new_t_card["order"] = "N" * self.t_card["PTO"] + "LO"
if self.t_card["PTO"] == 3:
new_t_card["order"] = "N3LO"
return new_t_card


Expand All @@ -60,6 +63,7 @@ def __init__(self, configs, name):
print("Warning, setting TargetDIS = proton")
obs["TargetDIS"] = "proton"
self.o_card = obs
self.dataset_name = name

def yadism_like(self):
return self.o_card
Expand All @@ -82,52 +86,154 @@ def dis_tp_like(self, pdf_name, restype):


class KfactorRunner:
def __init__(self, t_card_name, o_card_name, pdf_name):
def __init__(self, t_card_name, dataset_name, pdf_name, use_yadism):
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is the use_yadism flag needed for the NNLO part?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes this is a nice feature, you can do kfact both DISTP/DISTP ((pto+1)/pto) or DISTB/YADISM (same order)

Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And can you also do YADISM/YADISM in principle?

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no you can't... do you want to support this feature?

Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe in the future this can be handy but for the moment let's just open an issue (I am doing that)

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice

cfg = configs.load()
cfg = configs.defaults(cfg)

# Load the ymldb file
with open(
cfg["paths"]["ymldb"] / f"{dataset_name}.yaml", encoding="utf-8"
) as f:
ymldb = yaml.safe_load(f)

self.theory = TheoryCard(cfg, t_card_name)
self.observables = Observable_card(cfg, o_card_name)

o_card_names = ymldb["operands"]
self.observables = []
for ocard_list in o_card_names:
for o_card_name in ocard_list:
self.observables.append(Observable_card(cfg, o_card_name))

self.pdf_name = pdf_name
self.use_yadism = use_yadism
self.result_path = cfg["paths"]["results"]
self.dataset_name = ymldb["target_dataset"]
self.operation = ymldb["operation"]
# self.conversion_factor = ymldb["conversion_factor"]
self._results = None

def run_yadism(self):
output = yadism.run_yadism(
self.theory.yadism_like(), self.observables.yadism_like()
)
yad_pred = output.apply_pdf(lhapdf.mkPDF(self.pdf_name))
yad_pred = {}
for observable in self.observables:
output = yadism.run_yadism(
self.theory.yadism_like(), observable.yadism_like()
)
yad_pred[observable.dataset_name] = output.apply_pdf(
lhapdf.mkPDF(self.pdf_name)
)
return yad_pred

def run_dis_tp(self, hid, n_cores):
# TODO: here we nned to run FO and M type
restype = "FO"
runner = Runner(
self.observables.dis_tp_like(self.pdf_name, restype),
self.theory.dis_tp_like(hid),
)
runner.compute(n_cores)
return runner.results
# TODO: how do we treat bottom mass effects in this code?
# TODO: here we need to run FO and M type
restype = "M"
distp_pred = {}
for observable in self.observables:
runner = Runner(
observable.dis_tp_like(self.pdf_name, restype),
self.theory.dis_tp_like(hid),
)
runner.compute(n_cores)
distp_pred[observable.dataset_name] = runner.results
return distp_pred

def compute(self, hid, n_cores):
# TODO: cache results somewahere
yad_log = self.run_yadism()
dis_tp_log = self.run_dis_tp(hid, n_cores)
self._results = self._log(dis_tp_log, yad_log)
# TODO: cache results somewhere
mumerator_log = self.run_dis_tp(hid, n_cores)
if self.use_yadism:
denominator_log = self.run_yadism()
else:
self.theory.t_card["PTO"] -= 1
denominator_log = self.run_dis_tp(hid, n_cores)

@staticmethod
def _log(dis_tp_log, yad_log):
for obs in yad_log:
my_obs = obs.split("_")[0]
yad_df = pd.DataFrame(yad_log[obs]).rename(columns={"result": "yadism"})
dis_tp_df = dis_tp_log[my_obs].rename(columns={"result": "dis_tp"})
log_df = pd.concat([yad_df, dis_tp_df], axis=1).T.drop_duplicates().T

# construct some nice log table
log_df.drop("q", axis=1, inplace=True)
log_df.drop("y", axis=1, inplace=True)
log_df.drop("error", axis=1, inplace=True)
log_df["k-factor"] = np.abs(log_df.dis_tp - log_df.yadism)
return log_df

# TODO: add a save mathod
def save_results(self):
logs_df = self._log(mumerator_log, denominator_log, self.use_yadism)
self._results = self.build_kfactor(logs_df)
print(self._results)

@staticmethod
def _log(mumerator_log, denominator_log, use_yadism):
logs_df = {}
# loop on operands
for (num_name, num), (den_name, den) in zip(
mumerator_log.items(), denominator_log.items()
):

if num_name != den_name:
raise ValueError(
"Numerator dataset name do not coincide with denominator."
)

# loop on SF
for obs in den:
my_obs = obs.split("_")[0]
if use_yadism:
den_df = pd.DataFrame(den[obs]).rename(columns={"result": "yadism"})
num_df = num[my_obs].rename(columns={"result": "dis_tp"})
else:
den_df = den[my_obs].rename(columns={"result": "NNLO"})
num_df = num[my_obs].rename(columns={"result": "N3LO"})
log_df = pd.concat([den_df, num_df], axis=1).T.drop_duplicates().T

# construct some nice log table
log_df.drop("y", axis=1, inplace=True)
if use_yadism:
log_df.drop("q", axis=1, inplace=True)
log_df.drop("error", axis=1, inplace=True)
# log_df["k-factor"] = log_df.dis_tp / log_df.yadism
else:
log_df["Q2"] = log_df.q**2
log_df.drop("q", axis=1, inplace=True)
# log_df["k-factor"] = log_df.N3LO / log_df.NNLO
logs_df[num_name] = log_df

return logs_df

def build_kfactor(self, logs_df):

if self.operation == "null":
for log_df in logs_df.values():
if self.use_yadism:
log_df["k-factor"] = log_df.dis_tp / log_df.yadism
else:
log_df["k-factor"] = log_df.N3LO / log_df.NNLO
return log_df

elif self.operation == "RATIO":
data1, data2 = (*logs_df,)
k_fact_log = logs_df[data1]
if self.use_yadism:
k_fact_log["dis_tp"] = logs_df[data1].dis_tp / logs_df[data2].dis_tp
k_fact_log["yadism"] = logs_df[data1].yadism / logs_df[data2].yadism
k_fact_log["k-factor"] = k_fact_log.dis_tp / k_fact_log.yadism
else:
k_fact_log["N3LO"] = logs_df[data1].N3LO / logs_df[data2].N3LO
k_fact_log["NNLO"] = logs_df[data1].NNLO / logs_df[data2].NNLO
k_fact_log["k-factor"] = k_fact_log.N3LO / k_fact_log.NNLO
return k_fact_log

else:
raise ValueError(f"Operation {self.operation} no implemented")


def save_results(self, author, th_input):

if self.use_yadism:
k_fatctor_type = "N3LO FONLL DIS_TP / N3LO ZM-VFNS Yadism"
else:
k_fatctor_type = "N3LO FONLL DIS_TP / NNLO FONLL DIS_TP"
intro = [
"********************************************************************************\n",
f"SetName: {self.dataset_name}\n",
f"Author: {author}\n",
f"Date: {date.today()}\n",
"CodesUsed: https://github.com/andreab1997/DIS_TP\n",
f"TheoryInput: {th_input}\n",
f"PDFset: {self.pdf_name}\n",
f"Warinings: {k_fatctor_type}\n"
"********************************************************************************\n",
]
res_path = self.result_path / f"CF_QCD_{self.dataset_name}.dat"
print(f"Saving the k-factors in: {res_path}")
with open(res_path, "w", encoding="utf-8") as f:
f.writelines(intro)
f.writelines([f"{k:4f} 0.0000\n" for k in self._results["k-factor"]])
Loading