Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions n3fit/src/n3fit/backends/keras_backend/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,13 @@ def tmp(x):
return layer_op(tensor)


def gather(*args, **kwargs):
"""
Gather elements from a tensor along an axis
"""
return tf.gather(*args, **kwargs)


#
# Tensor operations
# f(x: tensor[s]) -> y: tensor
Expand Down Expand Up @@ -326,12 +333,12 @@ def split(*args, **kwargs):
return tf.split(*args, **kwargs)


def scatter_to_one(values, indices=[[1]], output_dim=14):
def scatter_to_one(values, indices, output_shape):
"""
Like scatter_nd initialized to one instead of zero
see full `docs <https://www.tensorflow.org/api_docs/python/tf/scatter_nd>`_
"""
ones = np.ones(output_dim, dtype=np.float32)
ones = np.ones(output_shape, dtype=np.float32)
return tf.tensor_scatter_nd_update(ones, indices, values)


Expand Down
103 changes: 72 additions & 31 deletions n3fit/src/n3fit/layers/msr_normalization.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,57 @@
"""
Definition of the imposition of the Momentum Sum Rule and Valence Sum Rules to in the PDF fit.

In the module level constants ``{MSR/VSR}_COMPONENTS`` the flavours affected by the MSR and VSR are defined.
For the Valence Sum Rule instead `VSR_DENOMINATOR` defines the integral of which flavour are used
to compute the normalization. Note that for a Nf=4 fit `v35=v24=v`.
If the number of flavours were to be changed in the future, this would need to be updated accordingly.
"""
from n3fit.backends import MetaLayer
Comment thread
APJansen marked this conversation as resolved.
from n3fit.backends import operations as op

GLUON_IDX = [[2]]
V_IDX = [[3], [7], [8]]
V3_IDX = [[4]]
V8_IDX = [[5]]
V15_IDX = [[6]]
IDX = {
'photon': 0,
'sigma': 1,
'g': 2,
'v': 3,
'v3': 4,
'v8': 5,
'v15': 6,
'v24': 7,
'v35': 8,
}
MSR_COMPONENTS = ['g']
MSR_DENOMINATORS = {'g': 'g'}
# The VSR normalization factor of component f is given by
# VSR_CONSTANTS[f] / VSR_DENOMINATORS[f]
VSR_COMPONENTS = ['v', 'v35', 'v24', 'v3', 'v8', 'v15']
VSR_CONSTANTS = {
'v': 3.0,
'v35': 3.0,
'v24': 3.0,
'v3': 1.0,
'v8': 3.0,
'v15': 3.0,
}
VSR_DENOMINATORS = {
'v': 'v',
'v35': 'v',
'v24': 'v',
'v3': 'v3',
'v8': 'v8',
'v15': 'v15',
}


class MSR_Normalization(MetaLayer):
"""
Applies the normalisation so that the PDF output fullfills the sum rules
Computes the normalisation factors for the sum rules of the PDFs.
"""

_msr_enabled = False
_vsr_enabled = False

def __init__(self, output_dim=14, mode="ALL", **kwargs):
def __init__(self, mode="ALL", **kwargs):
if mode == True or mode.upper() == "ALL":
self._msr_enabled = True
self._vsr_enabled = True
Expand All @@ -27,20 +62,23 @@ def __init__(self, output_dim=14, mode="ALL", **kwargs):
else:
raise ValueError(f"Mode {mode} not accepted for sum rules")

idx = []
indices = []
self.divisor_indices = []
if self._msr_enabled:
idx += GLUON_IDX
indices += [IDX[c] for c in MSR_COMPONENTS]
self.divisor_indices += [IDX[MSR_DENOMINATORS[c]] for c in MSR_COMPONENTS]
if self._vsr_enabled:
idx += V_IDX + V3_IDX + V8_IDX + V15_IDX

self._out_scatter = op.as_layer(
op.scatter_to_one, op_kwargs={"indices": idx, "output_dim": output_dim}
Comment thread
APJansen marked this conversation as resolved.
)
self.divisor_indices += [IDX[VSR_DENOMINATORS[c]] for c in VSR_COMPONENTS]
indices += [IDX[c] for c in VSR_COMPONENTS]
self.vsr_factors = op.numpy_to_tensor([VSR_CONSTANTS[c] for c in VSR_COMPONENTS])
# Need this extra dimension for the scatter_to_one operation
self.indices = [[i] for i in indices]

super().__init__(**kwargs)

def call(self, pdf_integrated, photon_integral):
"""Imposes the valence and momentum sum rules:
"""
Computes the normalization factors for the PDFs:
A_g = (1-sigma-photon)/g
A_v = A_v24 = A_v35 = 3/V
A_v3 = 1/V_3
Expand All @@ -51,30 +89,33 @@ def call(self, pdf_integrated, photon_integral):

Parameters
----------
pdf_integrated: (Tensor(1,None,14))
pdf_integrated: (Tensor(1, 14))
the integrated PDF
photon_integral: (Tensor(1)):
the integrated photon, not included in PDF
photon_integral: (Tensor(1, 1))
the integrated photon PDF

Returns
-------
normalization_factor: Tensor(14)
The normalization factors per flavour.
"""
y = op.flatten(pdf_integrated)
norm_constants = []
y = pdf_integrated[0] # get rid of the batch dimension
photon_integral = photon_integral[0] # get rid of the batch dimension
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does the photon integral need a batch dimension? You could as well remove the [[ in below (or is it because of symmetry arguments?)

(you can also do directly here photon_integral[0][0] btw)

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suppose this was more relevant when I actually joined them together before being passed to this layer, but still having the same shapes makes it easier to change things I think, even if it's a bit superfluous now.

numerators = []

if self._msr_enabled:
n_ag = [(1.0 - y[GLUON_IDX[0][0] - 1] - photon_integral[0]) / y[GLUON_IDX[0][0]]] * len(
GLUON_IDX
)
norm_constants += n_ag

numerators += [
op.batchit(1.0 - y[IDX['sigma']] - photon_integral[0], batch_dimension=0)
]
Comment thread
APJansen marked this conversation as resolved.
if self._vsr_enabled:
n_av = [3.0 / y[V_IDX[0][0]]] * len(V_IDX)
n_av3 = [1.0 / y[V3_IDX[0][0]]] * len(V3_IDX)
n_av8 = [3.0 / y[V8_IDX[0][0]]] * len(V8_IDX)
n_av15 = [3.0 / y[V15_IDX[0][0]]] * len(V15_IDX)
norm_constants += n_av + n_av3 + n_av8 + n_av15
numerators += [self.vsr_factors]

numerators = op.concatenate(numerators, axis=0)
divisors = op.gather(y, self.divisor_indices, axis=0)

# Fill in the rest of the flavours with 1
norm_constants = op.scatter_to_one(
numerators / divisors, indices=self.indices, output_shape=y.shape
)

return self._out_scatter(norm_constants)
return norm_constants
Comment thread
APJansen marked this conversation as resolved.
3 changes: 2 additions & 1 deletion n3fit/src/n3fit/model_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -680,8 +680,9 @@ def compute_unnormalized_pdf(x, neural_network, compute_preprocessing_factor):
"pdf_xgrid_integration": pdf_integration_grid,
"xgrid_integration": integrator_input,
# The photon is treated separately, need to get its integrals to normalize the pdf
"photon_integral": op.numpy_to_tensor(
"photon_integral": op.numpy_to_tensor([[
0.0 if not photons else photons.integral[i_replica]
]]
),
}
)
Expand Down
5 changes: 2 additions & 3 deletions n3fit/src/n3fit/msr.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,11 +86,10 @@ def generate_msr_model_and_grid(
pdf_integrated = xIntegrator(weights_array, input_shape=(nx,))(pdf_integrand)

# 5. THe input for the photon integral, will be set to 0 if no photons
photon_integral = Input(shape=(), batch_size=1, name='photon_integral')
photon_integral = Input(shape=(1,), batch_size=1, name='photon_integral')

# 6. Compute the normalization factor
# For now set the photon component to None
normalization_factor = MSR_Normalization(output_dim, mode, name="msr_weights")(
normalization_factor = MSR_Normalization(mode, name="msr_weights")(
pdf_integrated, photon_integral
)

Expand Down
85 changes: 85 additions & 0 deletions n3fit/src/n3fit/tests/test_msr.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
import numpy as np

from n3fit.backends import operations as op
from n3fit.layers import MSR_Normalization


def apply_layer_to_fixed_input(layer):
np.random.seed(422)
pdf_integrated = op.numpy_to_tensor(np.random.normal(size=(1, 14)))

photon_integral = op.numpy_to_tensor([np.random.normal(size=(1,))])

return layer(pdf_integrated, photon_integral)


def test_all():
layer = MSR_Normalization(mode='ALL')
output = apply_layer_to_fixed_input(layer)
known_output = op.numpy_to_tensor(
[
1.0,
1.0,
6.7740397,
-3.8735497,
15.276561,
5.9522753,
8.247783,
-3.8735497,
-3.8735497,
1.0,
1.0,
1.0,
1.0,
1.0,
]
)
np.testing.assert_allclose(output, known_output, rtol=1e-5)


def test_msr():
layer = MSR_Normalization(mode='MSR')
output = apply_layer_to_fixed_input(layer)
known_output = op.numpy_to_tensor(
[
1.0,
1.0,
6.7740397,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]
)
np.testing.assert_allclose(output, known_output, rtol=1e-5)


def test_vsr():
layer = MSR_Normalization(mode='VSR')
output = apply_layer_to_fixed_input(layer)
known_output = op.numpy_to_tensor(
[
1.0,
1.0,
1.0,
-3.8735497,
15.276561,
5.9522753,
8.247783,
-3.8735497,
-3.8735497,
1.0,
1.0,
1.0,
1.0,
1.0,
]
)
np.testing.assert_allclose(output, known_output, rtol=1e-5)