From 4433b17b13eba7d77ab7419e1f17ca55886a3708 Mon Sep 17 00:00:00 2001 From: Zahari Date: Wed, 20 Jan 2021 14:01:09 +0000 Subject: [PATCH 1/9] Add similarity cuts Add a cut option that throws away data such that predictions two different namespaces are too different, specifically that do not fulfill (threshold < ratio) & (ratio < 1/threshold) where the threshold is set as the `cut_similarity_threshold` parameter in the runcard. The two namespaces are defined as thee cuts_intersection_spec list. Some discussion: - This isn't the smartest code I have ever written. - The use of central_predictions inside load is a bit of a hack, but it is not clear to me it can be done much better in a more "reportengine idiomatic" style. The logic is rather convoluted if you sit down to think about it. In any case validphys.convolution is too convenient for this to use anything else. - Moreover central_predictions is slow because it needs to load fktables as csv. We should probably stick calls lru_cahces in various places. - Tests and documentation are missing at the moment. --- validphys2/src/validphys/config.py | 42 +++++++++++++++++++++++++++--- validphys2/src/validphys/core.py | 23 ++++++++++++++++ 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index 0faab45183..7c69bf8d1f 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -35,6 +35,7 @@ ExperimentInput, CutsPolicy, MatchedCuts, + SimilarCuts, ThCovMatSpec, ) from validphys.loader import ( @@ -362,7 +363,20 @@ def produce_commondata(self, *, dataset_input, use_fitcommondata=False, fit=None except InconsistentMetaDataError as e: raise ConfigError(e) from e - def produce_cuts(self, *, commondata, use_cuts, rules, fit=None, theoryid=None): + def parse_cut_similarity_threshold(self, th: numbers.Real): + """Maximum relative ratio when using `fromsimilarpredictons` cuts.""" + return th + + def produce_cuts( + self, + *, + commondata, + use_cuts, + rules, + fit=None, + theoryid=None, + cut_similarity_threshold=None + ): """Obtain cuts for a given dataset input, based on the appropriate policy.""" # TODO: Put this bit of logic into loader.check_cuts @@ -384,7 +398,10 @@ def produce_cuts(self, *, commondata, use_cuts, rules, fit=None, theoryid=None): if not theoryid: raise ConfigError("theoryid must be specified for internal cuts") return self.loader.check_internal_cuts(commondata, rules) - elif use_cuts is CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE: + elif ( + use_cuts is CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE + or use_cuts is CutsPolicy.FROM_SIMILAR_PREDICTIONS_NAMESPACE + ): cut_list = [] _, nss = self.parse_from_(None, "cuts_intersection_spec", write=False) self._check_dataspecs_type(nss) @@ -401,7 +418,26 @@ def produce_cuts(self, *, commondata, use_cuts, rules, fit=None, theoryid=None): _, nscuts = self.parse_from_(None, "cuts", write=False) cut_list.append(nscuts) ndata = commondata.ndata - return MatchedCuts(cut_list, ndata=ndata) + matched_cuts = MatchedCuts(cut_list, ndata=ndata) + if use_cuts is CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE: + return matched_cuts + else: + if cut_similarity_threshold is None: + raise ConfigError("Expecting `cut_similarity_threshold` key.") + if len(nss) != 2: + raise ConfigError("Can only work with two namespaces") + inps = [] + for ns in nss: + with self.set_context( + ns=self._curr_ns.new_child({"cuts": matched_cuts, **ns,}) + ): + _, ds = self.parse_from_(None, "dataset", write=False) + _, pdf = self.parse_from_(None, "pdf", write=False) + print(ds.cuts) + inps.append((ds, pdf)) + return SimilarCuts(tuple(inps), cut_similarity_threshold) + + raise TypeError("Wrong use_cuts") diff --git a/validphys2/src/validphys/core.py b/validphys2/src/validphys/core.py index f862cd0627..5e7499b49a 100644 --- a/validphys2/src/validphys/core.py +++ b/validphys2/src/validphys/core.py @@ -359,6 +359,8 @@ class CutsPolicy(enum.Enum): NOCUTS = "nocuts" FROMFIT = "fromfit" FROM_CUT_INTERSECTION_NAMESPACE = "fromintersection" + FROM_SIMILAR_PREDICTIONS_NAMESPACE = "fromsimilarpredictions" + class Cuts(TupleComp): def __init__(self, name, path): @@ -396,6 +398,27 @@ def load(self): self._full = True return np.arange(self.ndata) +class SimilarCuts(TupleComp): + def __init__(self, inputs, threshold): + if len(inputs) != 2: + raise ValueError("Expecting two input tuples") + firstcuts, secondcuts = inputs[0][0].cuts, inputs[1][0].cuts + if firstcuts != secondcuts: + raise ValueError("Expecting cuts to be the same for all datasets") + self.inputs = inputs + self.threshold = threshold + super().__init__(self.inputs, self.threshold) + + def load(self): + from validphys.convolution import central_predictions + + first, second = self.inputs + # Compute matched predictions + ratio = central_predictions(*first) / central_predictions(*second) + ratio = ratio.squeeze() + passed = (self.threshold < ratio) & (ratio < 1 / self.threshold) + return passed[passed].index + def cut_mask(cuts): """Return an objects that will act as the cuts when applied as a slice""" From 8c785af7b5e75c457c23c5110025320643cf3315 Mon Sep 17 00:00:00 2001 From: Zahari Date: Thu, 21 Jan 2021 12:30:13 +0000 Subject: [PATCH 2/9] Use the config mechanism to load cuts Use from_: None with the right namespace override instead of calling the production rule directly. There are a few reasons for this: - The rather trivial problem that the cut_similarity_threshold parameter was not propagated because it was not being passed to the function call. The alternative would have been to add the parameter everywhere `cuts` is required. - The more serious problem that we do want to resolve things like dataset with some funky namespace settings. For this to work properly we need to execute the production rule withing the right context, namely one containing the relevant dataset_input within each experiment/data_input. Alternatives are not clear since the construction of `dataset` for the similarity cuts requires inspecting for the cuts. --- validphys2/src/validphys/config.py | 50 +++++++----------------------- 1 file changed, 12 insertions(+), 38 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index 7c69bf8d1f..e29a75c479 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -375,7 +375,6 @@ def produce_cuts( rules, fit=None, theoryid=None, - cut_similarity_threshold=None ): """Obtain cuts for a given dataset input, based on the appropriate policy.""" @@ -422,10 +421,12 @@ def produce_cuts( if use_cuts is CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE: return matched_cuts else: - if cut_similarity_threshold is None: - raise ConfigError("Expecting `cut_similarity_threshold` key.") if len(nss) != 2: raise ConfigError("Can only work with two namespaces") + _, cut_similarity_threshold = self.parse_from_( + None, "cut_similarity_threshold", write=False + ) + inps = [] for ns in nss: with self.set_context( @@ -437,8 +438,6 @@ def produce_cuts( inps.append((ds, pdf)) return SimilarCuts(tuple(inps), cut_similarity_threshold) - - raise TypeError("Wrong use_cuts") def produce_dataset( @@ -514,22 +513,10 @@ def parse_experiment( ) dsinputs = [self.parse_dataset_input(ds) for ds in datasets] - cds = [ - self.produce_commondata( - dataset_input=dsinp, use_fitcommondata=use_fitcommondata, fit=fit - ) - for dsinp in dsinputs - ] - cutinps = [ - self.produce_cuts( - rules=rules, - commondata=cd, - use_cuts=use_cuts, - fit=fit, - theoryid=theoryid, - ) - for cd in cds - ] + cutinps = [] + for dsinp in dsinputs: + with self.set_context(ns=self._curr_ns.new_child({"dataset_input": dsinp})): + cutinps.append(self.parse_from_(None, "cuts", write=False)[1]) # autogenerated func, from elemet_of datasets = [ @@ -1221,23 +1208,10 @@ def produce_data( """A set of datasets where correlated systematics are taken into account """ - # TODO: extract the commondata and cuts and seperate from dataset - cds = [ - self.produce_commondata( - dataset_input=dsinp, use_fitcommondata=use_fitcommondata, fit=fit - ) - for dsinp in data_input - ] - cutinps = [ - self.produce_cuts( - rules=rules, - commondata=cd, - use_cuts=use_cuts, - fit=fit, - theoryid=theoryid, - ) - for cd in cds - ] + cutinps = [] + for dsinp in data_input: + with self.set_context(ns=self._curr_ns.new_child({"dataset_input": dsinp})): + cutinps.append(self.parse_from_(None, "cuts", write=False)[1]) # autogenerated func, from element_of datasets = [ From 0eb5139449f85c01620161dff3c50f5e4bedaa92 Mon Sep 17 00:00:00 2001 From: Zahari Date: Fri, 22 Jan 2021 18:16:52 +0000 Subject: [PATCH 3/9] Simplify dataset parsing Trust automated mechanisms to resolve each dataset given the correct context instead of resolving some dependencies manually for no particular reason. This also allows to clean up the signatures. Take advantage of the code to process `data` for the handling of experiments. --- validphys2/src/validphys/config.py | 57 +++--------------------------- 1 file changed, 5 insertions(+), 52 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index e29a75c479..b1f087a65c 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -490,17 +490,7 @@ def produce_dataset( return ds @configparser.element_of("experiments") - def parse_experiment( - self, - experiment: dict, - *, - theoryid, - use_cuts, - rules, - fit=None, - check_plotting: bool = False, - use_fitcommondata=False, - ): + def parse_experiment(self, experiment: dict): """A set of datasets where correlated systematics are taken into account. It is a mapping where the keys are the experiment name 'experiment' and a list of datasets.""" @@ -510,29 +500,12 @@ def parse_experiment( raise ConfigError( "'experiment' must be a mapping with " "'experiment' and 'datasets', but %s is missing" % e - ) + ) from e dsinputs = [self.parse_dataset_input(ds) for ds in datasets] - cutinps = [] - for dsinp in dsinputs: - with self.set_context(ns=self._curr_ns.new_child({"dataset_input": dsinp})): - cutinps.append(self.parse_from_(None, "cuts", write=False)[1]) - # autogenerated func, from elemet_of - datasets = [ - self.produce_dataset( - rules=rules, - dataset_input=dsinp, - theoryid=theoryid, - cuts=cuts, - fit=fit, - check_plotting=check_plotting, - use_fitcommondata=use_fitcommondata, - ) - for (dsinp, cuts) in zip(dsinputs, cutinps) - ] - return DataGroupSpec(name=name, datasets=datasets, dsinputs=dsinputs) + return self.produce_data(group_name=name, data_input=dsinputs) @configparser.element_of("experiment_inputs") def parse_experiment_input(self, ei: dict): @@ -1197,36 +1170,16 @@ def produce_data( self, data_input, *, - theoryid, - use_cuts, - rules, - fit=None, - check_plotting: bool = False, - use_fitcommondata=False, group_name="data", ): """A set of datasets where correlated systematics are taken into account """ - cutinps = [] + datasets = [] for dsinp in data_input: with self.set_context(ns=self._curr_ns.new_child({"dataset_input": dsinp})): - cutinps.append(self.parse_from_(None, "cuts", write=False)[1]) + datasets.append(self.parse_from_(None, "dataset", write=False)[1]) - # autogenerated func, from element_of - datasets = [ - self.produce_dataset( - rules=rules, - dataset_input=dsinp, - theoryid=theoryid, - cuts=cuts, - fit=fit, - check_plotting=check_plotting, - use_fitcommondata=use_fitcommondata, - ) - for (dsinp, cuts) in zip(data_input, cutinps) - ] - # TODO: get rid of libnnpdf Experiment return DataGroupSpec(name=group_name, datasets=datasets, dsinputs=data_input) def _parse_data_input_from_( From 9213e0822b61adfd8384240a60bedcc036dffc9f Mon Sep 17 00:00:00 2001 From: Zaharid Date: Mon, 25 Jan 2021 14:08:53 +0100 Subject: [PATCH 4/9] Update validphys2/src/validphys/config.py Co-authored-by: wilsonmr <33907451+wilsonmr@users.noreply.github.com> --- validphys2/src/validphys/config.py | 1 - validphys2/src/validphys/dataplots.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index b1f087a65c..c76c2ca4bb 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -434,7 +434,6 @@ def produce_cuts( ): _, ds = self.parse_from_(None, "dataset", write=False) _, pdf = self.parse_from_(None, "pdf", write=False) - print(ds.cuts) inps.append((ds, pdf)) return SimilarCuts(tuple(inps), cut_similarity_threshold) diff --git a/validphys2/src/validphys/dataplots.py b/validphys2/src/validphys/dataplots.py index bcbcd94db1..72fe6ff6d7 100644 --- a/validphys2/src/validphys/dataplots.py +++ b/validphys2/src/validphys/dataplots.py @@ -960,9 +960,9 @@ def plot_dataspecs_positivity( @make_argcheck def _check_display_cuts_requires_use_cuts(display_cuts, use_cuts): check( - not (display_cuts - and use_cuts not in (CutsPolicy.FROMFIT, CutsPolicy.INTERNAL)), - "The display_cuts option requires setting use_cuts to True") + not (display_cuts and use_cuts is CutsPolicy.NOCUTS), + "The display_cuts option requires setting some cuts", + ) @make_argcheck def _check_marker_by(marker_by): From 77c7df4caadec2d2582b4b6b63141e23b6f8b27e Mon Sep 17 00:00:00 2001 From: Zahari Date: Wed, 3 Feb 2021 21:50:29 +0000 Subject: [PATCH 5/9] Change the logic so the ratio is to exp unc --- validphys2/src/validphys/core.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/validphys2/src/validphys/core.py b/validphys2/src/validphys/core.py index 5e7499b49a..3b18d1f2d4 100644 --- a/validphys2/src/validphys/core.py +++ b/validphys2/src/validphys/core.py @@ -410,13 +410,25 @@ def __init__(self, inputs, threshold): super().__init__(self.inputs, self.threshold) def load(self): + # TODO: Update this when a suitable interace becomes available from validphys.convolution import central_predictions + from validphys.commondataparser import load_commondata + from validphys.covmats import covmat_from_systematics first, second = self.inputs + first_ds = first[0] + exp_err = np.sqrt( + np.diag( + covmat_from_systematics( + load_commondata(first_ds.commondata).with_cuts(first_ds.cuts) + ) + ) + ) # Compute matched predictions - ratio = central_predictions(*first) / central_predictions(*second) - ratio = ratio.squeeze() - passed = (self.threshold < ratio) & (ratio < 1 / self.threshold) + delta = (central_predictions(*first) - central_predictions(*second)).squeeze() + ratio = delta / exp_err + print(ratio) + passed = ratio < self.threshold return passed[passed].index From df014332bf2acdba6ea9cce18319616a52ee9657 Mon Sep 17 00:00:00 2001 From: Zahari Date: Fri, 5 Feb 2021 13:53:31 +0000 Subject: [PATCH 6/9] Read datasets properly Read datasets from a dataset_inputs list defined for each of the namespaces. Currently this is much slower that it could (since we are parsing the list every time) but I cannot think of a good way of doing it quickly. --- validphys2/src/validphys/config.py | 32 ++++++++++++++++++++---------- validphys2/src/validphys/core.py | 5 +++-- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index c76c2ca4bb..b0514b4cc8 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -368,13 +368,7 @@ def parse_cut_similarity_threshold(self, th: numbers.Real): return th def produce_cuts( - self, - *, - commondata, - use_cuts, - rules, - fit=None, - theoryid=None, + self, *, commondata, use_cuts, rules, fit=None, theoryid=None, ): """Obtain cuts for a given dataset input, based on the appropriate policy.""" @@ -426,15 +420,33 @@ def produce_cuts( _, cut_similarity_threshold = self.parse_from_( None, "cut_similarity_threshold", write=False ) - + name = commondata.name inps = [] for ns in nss: + with self.set_context(ns=self._curr_ns.new_child({**ns,})): + # TODO: find a way to not duplicate this and use a dict + # instead of a linear search + _, dins = self.parse_from_(None, "dataset_inputs", write=False) + try: + di = next(d for d in dins if d.name == name) + except StopIteration as e: + raise ConfigError( + f"cuts_intersection_spec dataset inputs must define {name}" + ) from e + with self.set_context( - ns=self._curr_ns.new_child({"cuts": matched_cuts, **ns,}) + ns=self._curr_ns.new_child( + { + "dataset_input": di, + "use_cuts": CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE, + **ns, + } + ) ): _, ds = self.parse_from_(None, "dataset", write=False) _, pdf = self.parse_from_(None, "pdf", write=False) - inps.append((ds, pdf)) + print(ds, ds.fkspecs[0].cfactors) + inps.append((ds, pdf)) return SimilarCuts(tuple(inps), cut_similarity_threshold) raise TypeError("Wrong use_cuts") diff --git a/validphys2/src/validphys/core.py b/validphys2/src/validphys/core.py index 3b18d1f2d4..6ddead9b80 100644 --- a/validphys2/src/validphys/core.py +++ b/validphys2/src/validphys/core.py @@ -425,9 +425,10 @@ def load(self): ) ) # Compute matched predictions - delta = (central_predictions(*first) - central_predictions(*second)).squeeze() + delta = np.abs( + (central_predictions(*first) - central_predictions(*second)).squeeze(axis=1) + ) ratio = delta / exp_err - print(ratio) passed = ratio < self.threshold return passed[passed].index From d9c6e63f0b918b4a9215be413772af86667a101c Mon Sep 17 00:00:00 2001 From: Zahari Date: Wed, 3 Mar 2021 14:16:48 +0000 Subject: [PATCH 7/9] Address various code review comments Improve error message a bit and re-use the already computed cuts. --- validphys2/src/validphys/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index b0514b4cc8..d51d849203 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -422,7 +422,7 @@ def produce_cuts( ) name = commondata.name inps = [] - for ns in nss: + for i, ns in enumerate(nss): with self.set_context(ns=self._curr_ns.new_child({**ns,})): # TODO: find a way to not duplicate this and use a dict # instead of a linear search @@ -431,7 +431,7 @@ def produce_cuts( di = next(d for d in dins if d.name == name) except StopIteration as e: raise ConfigError( - f"cuts_intersection_spec dataset inputs must define {name}" + f"cuts_intersection_spec namespace {i}: dataset inputs must define {name}" ) from e with self.set_context( @@ -439,13 +439,13 @@ def produce_cuts( { "dataset_input": di, "use_cuts": CutsPolicy.FROM_CUT_INTERSECTION_NAMESPACE, + "cuts": matched_cuts, **ns, } ) ): _, ds = self.parse_from_(None, "dataset", write=False) _, pdf = self.parse_from_(None, "pdf", write=False) - print(ds, ds.fkspecs[0].cfactors) inps.append((ds, pdf)) return SimilarCuts(tuple(inps), cut_similarity_threshold) From 608191c9d6d387fa81ab971d7ec6179bea9271e4 Mon Sep 17 00:00:00 2001 From: Zahari Date: Wed, 3 Mar 2021 14:39:29 +0000 Subject: [PATCH 8/9] Add minimal tests for similarity cuts --- validphys2/src/validphys/tests/test_cuts.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 validphys2/src/validphys/tests/test_cuts.py diff --git a/validphys2/src/validphys/tests/test_cuts.py b/validphys2/src/validphys/tests/test_cuts.py new file mode 100644 index 0000000000..415759608d --- /dev/null +++ b/validphys2/src/validphys/tests/test_cuts.py @@ -0,0 +1,18 @@ +from validphys.api import API +from validphys.core import SimilarCuts +from validphys.tests.conftest import THEORYID, PDF, DATA + + + +def test_similarity_cuts(): + plain = [{"dataset": dt["dataset"]} for dt in DATA] + inp = { + "theoryid": THEORYID, + "pdf": PDF, + "cut_similarity_threshold": 1.5, + "use_cuts": "fromsimilarpredictions", + "cuts_intersection_spec": [{"dataset_inputs": DATA}, {"dataset_inputs": plain}], + "dataset_input": DATA[1], + } + ds = API.dataset(**inp) + assert isinstance(ds.cuts, SimilarCuts) From 3f9228231153984a630a6a168beba1feb571bd1f Mon Sep 17 00:00:00 2001 From: Zahari Date: Wed, 3 Mar 2021 14:51:37 +0000 Subject: [PATCH 9/9] Add some docs on similarity cuts This is added to the guide, which is otherwise outdated, but this studd isn't anywhere at the moment. --- doc/validphys2/guide.md | 74 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/doc/validphys2/guide.md b/doc/validphys2/guide.md index d4c17a5a09..42a7f9aac1 100644 --- a/doc/validphys2/guide.md +++ b/doc/validphys2/guide.md @@ -1602,8 +1602,16 @@ configuration setting: the cuts for the given dataset. This is useful for example for requiring the common subset of points that pass the cuts at NLO and NNLO. +`use_cuts: 'fromsimilarpredictions'` + ~ Compute the intersection between two namespaces (similar to for + `fromintersection`) but additionally require that the predictions computed for + each dataset across the namespaces are *similar*, specifically that the ratio + between the absolute difference in the predictions and the total experimental + uncertainty is smaller than a given value, `cut_similarity_threshold` that + must be provided. Note that for this to work with different cfactors across + the namespaces, one must provide a different `dataset_inputs` list for each. -The following example demonstrates these options: +The following example demonstrates the first three options: ```yaml meta: @@ -1671,6 +1679,70 @@ for each one individually. With these settings the later three [dataspecs](#general-data-specification-the-dataspec-api) give the same result. +The following example demonstrates the use of `fromsimilarpredictions`: + +```yaml +meta: + title: "Test similarity cuts: Threshold 1,2" + author: Zahari Kassabov + keywords: [test] + +show_total: True + +NNLODatasts: &NNLODatasts +- {dataset: ATLAS_SINGLETOP_TCH_R_7TEV, frac: 1.0, cfac: [QCD]} # N +- {dataset: ATLAS_SINGLETOP_TCH_R_13TEV, frac: 1.0, cfac: [QCD]} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_7TEV_T_RAP_NORM, frac: 1.0, cfac: [QCD]} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_7TEV_TBAR_RAP_NORM, frac: 1.0, cfac: [QCD]} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_8TEV_T_RAP_NORM, frac: 0.75, cfac: [QCD]} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_8TEV_TBAR_RAP_NORM, frac: 0.75, cfac: [QCD]} # N + +NLODatasts: &NLODatasts +- {dataset: ATLAS_SINGLETOP_TCH_R_7TEV, frac: 1.0, cfac: []} # N +- {dataset: ATLAS_SINGLETOP_TCH_R_13TEV, frac: 1.0, cfac: []} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_7TEV_T_RAP_NORM, frac: 1.0, cfac: []} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_7TEV_TBAR_RAP_NORM, frac: 1.0, cfac: []} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_8TEV_T_RAP_NORM, frac: 0.75, cfac: []} # N +- {dataset: ATLAS_SINGLETOP_TCH_DIFF_8TEV_TBAR_RAP_NORM, frac: 0.75, cfac: []} # N + + +dataset_inputs: *NLODatasts + +cuts_intersection_spec: + - theoryid: 52 + pdf: NNPDF31_nlo_as_0118 + dataset_inputs: *NLODatasts + + - theoryid: 53 + pdf: NNPDF31_nlo_as_0118 + dataset_inputs: *NNLODatasts + + +theoryid: 52 +pdf: NNPDF31_nlo_as_0118 + +dataspecs: + + - use_cuts: internal + speclabel: "No cuts" + + + - cut_similarity_threshold: 2 + speclabel: "Threshold 2" + use_cuts: fromsimilarpredictions + + + - cut_similarity_threshold: 1 + speclabel: "Threshold 1" + use_cuts: fromsimilarpredictions + +template_text: | + {@dataspecs_chi2_table@} + +actions_: + - report(main=True) +``` + ### Data theory comparison The name of the data-theory comparison tool is `plot_fancy`. You can