From 995c73e436934873a7fce54badac1c8d37ab53b0 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 1 Oct 2025 09:53:31 +0200 Subject: [PATCH 01/32] vectorize integration for single parameters --- .../physical_models/crop/leaf_dynamics.py | 76 +++++++++---------- 1 file changed, 34 insertions(+), 42 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index da24eb3..b652bb8 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -9,7 +9,6 @@ from pcse.decorators import prepare_states from pcse.traitlets import Any from pcse.util import AfgenTrait -from pcse.util import limit DTYPE = torch.float64 # Default data type for tensors in this module @@ -113,6 +112,9 @@ class WOFOST_Leaf_Dynamics(SimulationObject): LAI, TWLV """ + START_DATE = None + MAX_DAYS = 300 + class Parameters(ParamTemplate): RGRLAI = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) SPAN = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) @@ -156,6 +158,7 @@ def initialize(self, day, kiosk, parvalues): :param parvalues: `ParameterProvider` object providing parameters as key/value pairs """ + self.START_DATE = day self.kiosk = kiosk # TODO check if parvalues are already torch.nn.Parameters self.params = self.Parameters(parvalues) @@ -177,10 +180,12 @@ def initialize(self, day, kiosk, parvalues): DWLV = torch.tensor(0.0, dtype=DTYPE) TWLV = WLV + DWLV - # First leaf class (SLA, age and weight) - SLA = torch.tensor([params.SLATB(DVS)], dtype=DTYPE) - LVAGE = torch.tensor([0.0], dtype=DTYPE) - LV = torch.stack([WLV]) + # Initialize leaf classes (SLA, age and weight) + SLA = torch.zeros(self.MAX_DAYS, dtype=DTYPE) + LVAGE = torch.zeros(self.MAX_DAYS, dtype=DTYPE) + LV = torch.zeros(self.MAX_DAYS, dtype=DTYPE) + SLA[0] = params.SLATB(DVS) + LV[0] = WLV # Initial values for leaf area LAIEM = LV[0] * SLA[0] @@ -236,14 +241,14 @@ def calc_rates(self, day, drv): # death due to self shading cause by high LAI DVS = self.kiosk["DVS"] LAICR = 3.2 / p.KDIFTB(DVS) - r.DSLV2 = mask * s.WLV * limit(0.0, 0.03, 0.03 * (s.LAI - LAICR) / LAICR) + r.DSLV2 = mask * s.WLV * torch.clamp(0.03 * (s.LAI - LAICR) / LAICR, 0.0, 0.03) # Death of leaves due to frost damage as determined by # Reduction Factor Frost "RF_FROST" if "RF_FROST" in self.kiosk: r.DSLV3 = mask * s.WLV * k.RF_FROST else: - r.DSLV3 = torch.tensor(0.0, dtype=DTYPE) + r.DSLV3 = torch.zeros_like(s.WLV, dtype=DTYPE) # leaf death equals maximum of water stress, shading and frost r.DSLV = torch.max(torch.stack([r.DSLV1, r.DSLV2, r.DSLV3])) @@ -253,18 +258,9 @@ def calc_rates(self, day, drv): # in DALV. # Note that the actual leaf death is imposed on the array LV during the # state integration step. - DALV = torch.tensor(0.0, dtype=DTYPE) - if p.SPAN.requires_grad: # replacing hard threshold `if lvage > p.SPAN`` - sharpness = 1000.0 # FIXEME - for lv, lvage in zip(s.LV, s.LVAGE, strict=False): - weight = torch.sigmoid((lvage - p.SPAN) * sharpness) - DALV = DALV + weight * lv - else: - for lv, lvage in zip(s.LV, s.LVAGE, strict=False): - if lvage > p.SPAN: - DALV = DALV + lv - - r.DALV = DALV + sharpness = torch.tensor(1000., dtype=DTYPE) # FIXME + weight = torch.sigmoid((s.LVAGE - p.SPAN) * sharpness) + r.DALV = torch.sum(weight * s.LV) # Total death rate leaves r.DRLV = torch.max(r.DSLV, r.DALV) @@ -301,34 +297,30 @@ def integrate(self, day, delt=1.0): tLVAGE = states.LVAGE.clone() tDRLV = rates.DRLV - # leaf death is imposed on leaves by removing leave classes from the - # right side. - for LVweigth in reversed(states.LV): - if tDRLV > 0.0: - if tDRLV >= LVweigth: # remove complete leaf class - tDRLV = tDRLV - LVweigth - tLV = tLV[:-1] # Remove last element - tLVAGE = tLVAGE[:-1] - tSLA = tSLA[:-1] - else: # Decrease value of oldest (rightmost) leave class - tLV[-1] = tLV[-1] - tDRLV - tDRLV = torch.tensor(0.0, dtype=DTYPE) - else: - break - + # Leaf death is imposed on leaves from the oldest ones. + # Calculate the cumulative sum of weights after leaf death, and + # find out which leaf classes are dead (negative weights) + weight_cumsum = tLV.cumsum(0) - tDRLV + is_dead = weight_cumsum < 0 + # Adjust value of oldest leaf class (first non-zero weights) + idx_alive, = (~is_dead).nonzero(as_tuple=True) + idx_oldest = idx_alive[0] + tLV[idx_oldest] = weight_cumsum[idx_oldest] + # Zero out all dead leaf classes + tLV = tLV.where(~is_dead, 0.) # Integration of physiological age - tLVAGE = torch.tensor([age + rates.FYSAGE for age in tLVAGE], dtype=DTYPE) + tLVAGE = tLVAGE + rates.FYSAGE + tLVAGE = tLVAGE.where(~is_dead, 0.) + tSLA = tSLA.where(~is_dead, 0.) # --------- leave growth --------- - # new leaves in class 1 - tLV = torch.cat((torch.tensor([rates.GRLV], dtype=DTYPE), tLV)) - tSLA = torch.cat((torch.tensor([rates.SLAT], dtype=DTYPE), tSLA)) - tLVAGE = torch.cat((torch.tensor([0.0], dtype=DTYPE), tLVAGE)) + idx = int((day - self.START_DATE).days / delt) + tLV[idx] = rates.GRLV + tSLA[idx] = rates.SLAT + tLVAGE[idx] = 0. # calculation of new leaf area - states.LASUM = torch.sum( - torch.stack([lv * sla for lv, sla in zip(tLV, tSLA, strict=False)]) - ) + states.LASUM = torch.sum(tLV * tSLA) states.LAI = self._calc_LAI() states.LAIMAX = torch.max(states.LAI, states.LAIMAX) From 61283414fdbf08f568504ab8bbce21c72872e235 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 1 Oct 2025 09:59:39 +0200 Subject: [PATCH 02/32] apply ruff formatting --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index b652bb8..9e1fc65 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -258,7 +258,7 @@ def calc_rates(self, day, drv): # in DALV. # Note that the actual leaf death is imposed on the array LV during the # state integration step. - sharpness = torch.tensor(1000., dtype=DTYPE) # FIXME + sharpness = torch.tensor(1000.0, dtype=DTYPE) # FIXME weight = torch.sigmoid((s.LVAGE - p.SPAN) * sharpness) r.DALV = torch.sum(weight * s.LV) @@ -303,21 +303,21 @@ def integrate(self, day, delt=1.0): weight_cumsum = tLV.cumsum(0) - tDRLV is_dead = weight_cumsum < 0 # Adjust value of oldest leaf class (first non-zero weights) - idx_alive, = (~is_dead).nonzero(as_tuple=True) + (idx_alive,) = (~is_dead).nonzero(as_tuple=True) idx_oldest = idx_alive[0] tLV[idx_oldest] = weight_cumsum[idx_oldest] # Zero out all dead leaf classes - tLV = tLV.where(~is_dead, 0.) + tLV = tLV.where(~is_dead, 0.0) # Integration of physiological age tLVAGE = tLVAGE + rates.FYSAGE - tLVAGE = tLVAGE.where(~is_dead, 0.) - tSLA = tSLA.where(~is_dead, 0.) + tLVAGE = tLVAGE.where(~is_dead, 0.0) + tSLA = tSLA.where(~is_dead, 0.0) # --------- leave growth --------- idx = int((day - self.START_DATE).days / delt) tLV[idx] = rates.GRLV tSLA[idx] = rates.SLAT - tLVAGE[idx] = 0. + tLVAGE[idx] = 0.0 # calculation of new leaf area states.LASUM = torch.sum(tLV * tSLA) From 3ea31bfe2036a7de5acee331da5f08b86f8b3566 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Tue, 14 Oct 2025 14:02:50 +0200 Subject: [PATCH 03/32] fix typo --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 9e1fc65..7ef4be0 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -121,8 +121,8 @@ class Parameters(ParamTemplate): TBASE = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) PERDL = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) TDWI = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) - SLATB = AfgenTrait() # FIXEME - KDIFTB = AfgenTrait() # FIXEME + SLATB = AfgenTrait() # FIXME + KDIFTB = AfgenTrait() # FIXME class StateVariables(StatesTemplate): LV = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) @@ -338,7 +338,7 @@ def integrate(self, day, delt=1.0): self.states.LVAGE = tLVAGE @prepare_states - def _set_variable_LAI(self, nLAI): # FIXEME + def _set_variable_LAI(self, nLAI): # FIXME """Updates the value of LAI to to the new value provided as input. Related state variables will be updated as well and the increments From c1d771d67e3273733337d539a05fd1bbe402dce4 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Tue, 14 Oct 2025 14:05:58 +0200 Subject: [PATCH 04/32] adapt to any shape of parameters --- .../physical_models/crop/leaf_dynamics.py | 71 ++++++++++--------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 7ef4be0..08cca38 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -181,14 +181,15 @@ def initialize(self, day, kiosk, parvalues): TWLV = WLV + DWLV # Initialize leaf classes (SLA, age and weight) - SLA = torch.zeros(self.MAX_DAYS, dtype=DTYPE) - LVAGE = torch.zeros(self.MAX_DAYS, dtype=DTYPE) - LV = torch.zeros(self.MAX_DAYS, dtype=DTYPE) - SLA[0] = params.SLATB(DVS) - LV[0] = WLV + dims = WLV.shape + SLA = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) + LVAGE = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) + LV = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) + SLA[..., 0] = params.SLATB(DVS) + LV[..., 0] = WLV # Initial values for leaf area - LAIEM = LV[0] * SLA[0] + LAIEM = LV[..., 0] * SLA[..., 0] LASUM = LAIEM LAIEXP = LAIEM LAIMAX = LAIEM @@ -251,38 +252,38 @@ def calc_rates(self, day, drv): r.DSLV3 = torch.zeros_like(s.WLV, dtype=DTYPE) # leaf death equals maximum of water stress, shading and frost - r.DSLV = torch.max(torch.stack([r.DSLV1, r.DSLV2, r.DSLV3])) + r.DSLV = torch.maximum(torch.maximum(r.DSLV1, r.DSLV2), r.DSLV3) # Determine how much leaf biomass classes have to die in states.LV, # given the a life span > SPAN, these classes will be accumulated # in DALV. # Note that the actual leaf death is imposed on the array LV during the # state integration step. + tSPAN = _broadcast_to(p.SPAN, s.LVAGE.shape) # Broadcast to same shape sharpness = torch.tensor(1000.0, dtype=DTYPE) # FIXME - weight = torch.sigmoid((s.LVAGE - p.SPAN) * sharpness) - r.DALV = torch.sum(weight * s.LV) + weight = torch.sigmoid((s.LVAGE - tSPAN) * sharpness) + r.DALV = torch.sum(weight * s.LV, dim=-1) # Total death rate leaves - r.DRLV = torch.max(r.DSLV, r.DALV) + r.DRLV = torch.maximum(r.DSLV, r.DALV) # physiologic ageing of leaves per time step FYSAGE = (drv.TEMP - p.TBASE) / (35.0 - p.TBASE) - r.FYSAGE = mask * torch.max(torch.tensor(0.0, dtype=DTYPE), FYSAGE) + r.FYSAGE = mask * torch.clamp(FYSAGE, 0.0) # specific leaf area of leaves per time step r.SLAT = mask * torch.tensor(p.SLATB(DVS), dtype=DTYPE) # leaf area not to exceed exponential growth curve - if s.LAIEXP < 6.0: - DTEFF = torch.max(torch.tensor(0.0, dtype=DTYPE), drv.TEMP - p.TBASE) - r.GLAIEX = s.LAIEXP * p.RGRLAI * DTEFF - # source-limited increase in leaf area - r.GLASOL = r.GRLV * r.SLAT - # sink-limited increase in leaf area - GLA = torch.min(r.GLAIEX, r.GLASOL) - # adjustment of specific leaf area of youngest leaf class - if r.GRLV > 0.0: - r.SLAT = GLA / r.GRLV + is_lai_exp = s.LAIEXP < 6.0 + DTEFF = torch.clamp(drv.TEMP - p.TBASE, 0.0) + r.GLAIEX = torch.where(is_lai_exp, s.LAIEXP * p.RGRLAI * DTEFF, r.GLAIEX) + # source-limited increase in leaf area + r.GLASOL = torch.where(is_lai_exp, r.GRLV * r.SLAT, r.GLASOL) + # sink-limited increase in leaf area + GLA = torch.minimum(r.GLAIEX, r.GLASOL) + # adjustment of specific leaf area of youngest leaf class + r.SLAT = torch.where(is_lai_exp & (r.GRLV > 0.0), GLA / r.GRLV, r.SLAT) @prepare_states def integrate(self, day, delt=1.0): @@ -295,34 +296,34 @@ def integrate(self, day, delt=1.0): tLV = states.LV.clone() tSLA = states.SLA.clone() tLVAGE = states.LVAGE.clone() - tDRLV = rates.DRLV + tDRLV = _broadcast_to(rates.DRLV, tLV.shape) # Leaf death is imposed on leaves from the oldest ones. # Calculate the cumulative sum of weights after leaf death, and # find out which leaf classes are dead (negative weights) - weight_cumsum = tLV.cumsum(0) - tDRLV + weight_cumsum = tLV.cumsum(dim=-1) - tDRLV is_dead = weight_cumsum < 0 # Adjust value of oldest leaf class (first non-zero weights) - (idx_alive,) = (~is_dead).nonzero(as_tuple=True) + (idx_alive, *_) = (~is_dead).nonzero(as_tuple=True) idx_oldest = idx_alive[0] tLV[idx_oldest] = weight_cumsum[idx_oldest] # Zero out all dead leaf classes - tLV = tLV.where(~is_dead, 0.0) + tLV = torch.where(~is_dead, tLV, 0.0) # Integration of physiological age tLVAGE = tLVAGE + rates.FYSAGE - tLVAGE = tLVAGE.where(~is_dead, 0.0) - tSLA = tSLA.where(~is_dead, 0.0) + tLVAGE = torch.where(~is_dead, tLVAGE, 0.0) + tSLA = torch.where(~is_dead, tSLA, 0.0) # --------- leave growth --------- idx = int((day - self.START_DATE).days / delt) - tLV[idx] = rates.GRLV - tSLA[idx] = rates.SLAT - tLVAGE[idx] = 0.0 + tLV[..., idx] = rates.GRLV + tSLA[..., idx] = rates.SLAT + tLVAGE[..., idx] = 0.0 # calculation of new leaf area - states.LASUM = torch.sum(tLV * tSLA) + states.LASUM = torch.sum(tLV * tSLA, dim=-1) states.LAI = self._calc_LAI() - states.LAIMAX = torch.max(states.LAI, states.LAIMAX) + states.LAIMAX = torch.maximum(states.LAI, states.LAIMAX) # exponential growth curve states.LAIEXP = states.LAIEXP + rates.GLAIEX @@ -397,3 +398,9 @@ def _exist_required_external_variables(kiosk): f" Ensure that all required variables {required_external_vars_at_init}" " are provided." ) + +def _broadcast_to(x, shape): + """Create a view of tensor X with the given shape.""" + # The last dimension has size equal to the number of integration steps + *dims, n_steps = shape + return x.view(*dims, -1).expand((*dims, n_steps)) From 69320a23ed4638b23c1e46aa720bc597d49b6b26 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Tue, 14 Oct 2025 14:13:12 +0200 Subject: [PATCH 05/32] add ruff formatting --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 08cca38..841c0c4 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -259,7 +259,7 @@ def calc_rates(self, day, drv): # in DALV. # Note that the actual leaf death is imposed on the array LV during the # state integration step. - tSPAN = _broadcast_to(p.SPAN, s.LVAGE.shape) # Broadcast to same shape + tSPAN = _broadcast_to(p.SPAN, s.LVAGE.shape) # Broadcast to same shape sharpness = torch.tensor(1000.0, dtype=DTYPE) # FIXME weight = torch.sigmoid((s.LVAGE - tSPAN) * sharpness) r.DALV = torch.sum(weight * s.LV, dim=-1) @@ -399,6 +399,7 @@ def _exist_required_external_variables(kiosk): " are provided." ) + def _broadcast_to(x, shape): """Create a view of tensor X with the given shape.""" # The last dimension has size equal to the number of integration steps From 4241d4d3bbd7c06c4a0f9fea19df25773325c99d Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Tue, 14 Oct 2025 23:25:41 +0200 Subject: [PATCH 06/32] fix in order to deal with arbitrary shapes together with 0-d params --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 841c0c4..ff0eab2 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -402,6 +402,11 @@ def _exist_required_external_variables(kiosk): def _broadcast_to(x, shape): """Create a view of tensor X with the given shape.""" - # The last dimension has size equal to the number of integration steps + if x.dim() == 0: + # For 0-d tensors, we simply broadcast to the given shape + return torch.broadcast_to(x, shape) + # The given shape should match x in all but the last axis, which represents + # the dimension along which the time integration is carried out *dims, n_steps = shape - return x.view(*dims, -1).expand((*dims, n_steps)) + # We first append an axis to x, then expand the last axis + return x.unsqueeze(-1).expand((*dims, n_steps)) From ecfe10c6fdd91445e3d7378288e48e624b8ea5ed Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 15 Oct 2025 15:02:18 +0200 Subject: [PATCH 07/32] remove BMI-related function --- .../physical_models/crop/leaf_dynamics.py | 49 ------------------- 1 file changed, 49 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index ff0eab2..1e0d924 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -338,55 +338,6 @@ def integrate(self, day, delt=1.0): self.states.SLA = tSLA self.states.LVAGE = tLVAGE - @prepare_states - def _set_variable_LAI(self, nLAI): # FIXME - """Updates the value of LAI to to the new value provided as input. - - Related state variables will be updated as well and the increments - to all adjusted state variables will be returned as a dict. - """ - states = self.states - - # Store old values of states - oWLV = states.WLV - oLAI = states.LAI - oTWLV = states.TWLV - oLASUM = states.LASUM - - # Reduce oLAI for pod and stem area. SAI and PAI will not be adjusted - # because this is often only a small component of the total leaf - # area. For all current crop files in WOFOST SPA and SSA are zero - # anyway - SAI = self.kiosk["SAI"] - PAI = self.kiosk["PAI"] - adj_nLAI = torch.max(nLAI - SAI - PAI, 0.0) - adj_oLAI = torch.max(oLAI - SAI - PAI, 0.0) - - # LAI Adjustment factor for leaf biomass LV (rLAI) - if adj_oLAI > 0: - rLAI = adj_nLAI / adj_oLAI - LV = [lv * rLAI for lv in states.LV] - # If adj_oLAI == 0 then add the leave biomass directly to the - # youngest leave age class (LV[0]) - else: - LV = [nLAI / states.SLA[0]] - - states.LASUM = torch.sum( - torch.tensor([lv * sla for lv, sla in zip(LV, states.SLA, strict=False)], dtype=DTYPE) - ) - states.LV = LV - states.LAI = self._calc_LAI() - states.WLV = torch.sum(states.LV) - states.TWLV = states.WLV + states.DWLV - - increments = { - "LAI": states.LAI - oLAI, - "LAISUM": states.LASUM - oLASUM, - "WLV": states.WLV - oWLV, - "TWLV": states.TWLV - oTWLV, - } - return increments - def _exist_required_external_variables(kiosk): """Check if all required external variables are available in the kiosk.""" From 4768a263ca09d05d81ddbe519a00587c4634f4ec Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 17 Oct 2025 11:45:54 +0200 Subject: [PATCH 08/32] fix bug in dimensions handling --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 1e0d924..bef4697 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -303,10 +303,11 @@ def integrate(self, day, delt=1.0): # find out which leaf classes are dead (negative weights) weight_cumsum = tLV.cumsum(dim=-1) - tDRLV is_dead = weight_cumsum < 0 - # Adjust value of oldest leaf class (first non-zero weights) - (idx_alive, *_) = (~is_dead).nonzero(as_tuple=True) + # Adjust value of oldest leaf class, i.e. the first non-zero + # weight along the time axis (the last one) + (*_, idx_alive) = (~is_dead).nonzero(as_tuple=True) idx_oldest = idx_alive[0] - tLV[idx_oldest] = weight_cumsum[idx_oldest] + tLV[..., idx_oldest] = weight_cumsum[..., idx_oldest] # Zero out all dead leaf classes tLV = torch.where(~is_dead, tLV, 0.0) # Integration of physiological age @@ -329,7 +330,7 @@ def integrate(self, day, delt=1.0): states.LAIEXP = states.LAIEXP + rates.GLAIEX # Update leaf biomass states - states.WLV = torch.sum(tLV) + states.WLV = torch.sum(tLV, dim=-1) states.DWLV = states.DWLV + rates.DRLV states.TWLV = states.WLV + states.DWLV From a9b9baa2e3e8cb992bd52e9cb125345fa57251a1 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 17 Oct 2025 11:45:59 +0200 Subject: [PATCH 09/32] add tests --- .../crop/test_leaf_dynamics.py | 190 ++++++++++++++++++ 1 file changed, 190 insertions(+) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 3c1a600..ee71eb3 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -122,6 +122,196 @@ def test_leaf_dynamics_with_engine(self): config_path, ) + def test_leaf_dynamics_with_one_parameter_vector(self): + # prepare model input + test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" + crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] + ( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + external_states, + ) = prepare_engine_input(test_data_path, crop_model_params) + config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") + + # Setting a vector (with one value) for the TDWI parameter + param = "TDWI" + repeated = crop_model_params_provider[param].repeat(10) + crop_model_params_provider.set_override(param, repeated, check=False) + + engine = EngineTestHelper( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + config_path, + external_states, + ) + engine.run_till_terminate() + actual_results = engine.get_output() + + # get expected results from YAML test data + expected_results, expected_precision = get_test_data(test_data_path) + + assert len(actual_results) == len(expected_results) + + for reference, model in zip(expected_results, actual_results, strict=False): + assert reference["DAY"] == model["day"] + assert all( + all(abs(reference[var] - model[var]) < precision) + for var, precision in expected_precision.items() + ) + + def test_leaf_dynamics_with_different_parameter_values(self): + # prepare model input + test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" + crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] + ( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + external_states, + ) = prepare_engine_input(test_data_path, crop_model_params) + config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") + + # Setting a vector with multiple values for the TDWI parameter + param = "TDWI" + test_value = crop_model_params_provider[param] + # We set the value for which test data are available as the last element + param_vec = torch.tensor([test_value - 0.1, test_value + 0.1, test_value]) + crop_model_params_provider.set_override(param, param_vec, check=False) + + engine = EngineTestHelper( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + config_path, + external_states, + ) + engine.run_till_terminate() + actual_results = engine.get_output() + + # get expected results from YAML test data + expected_results, expected_precision = get_test_data(test_data_path) + + assert len(actual_results) == len(expected_results) + + for reference, model in zip(expected_results, actual_results, strict=False): + assert reference["DAY"] == model["day"] + assert all( + # The value for which test data are available is the last element + abs(reference[var] - model[var][-1]) < precision + for var, precision in expected_precision.items() + ) + + def test_leaf_dynamics_with_multiple_parameter_vectors(self): + # prepare model input + test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" + crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] + ( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + external_states, + ) = prepare_engine_input(test_data_path, crop_model_params) + config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") + + # Setting a vector (with one value) for the TDWI and SPAN parameters + for param in ("TDWI", "SPAN"): + repeated = crop_model_params_provider[param].repeat(10) + crop_model_params_provider.set_override(param, repeated, check=False) + + engine = EngineTestHelper( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + config_path, + external_states, + ) + engine.run_till_terminate() + actual_results = engine.get_output() + + # get expected results from YAML test data + expected_results, expected_precision = get_test_data(test_data_path) + + assert len(actual_results) == len(expected_results) + + for reference, model in zip(expected_results, actual_results, strict=False): + assert reference["DAY"] == model["day"] + assert all( + all(abs(reference[var] - model[var]) < precision) + for var, precision in expected_precision.items() + ) + + def test_leaf_dynamics_with_multiple_parameter_arrays(self): + # prepare model input + test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" + crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] + ( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + external_states, + ) = prepare_engine_input(test_data_path, crop_model_params) + config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") + + # Setting an array with arbitrary shape (and one value) for the + # TDWI and SPAN parameters + for param in ("TDWI", "SPAN"): + repeated = crop_model_params_provider[param].broadcast_to((30, 5)) + crop_model_params_provider.set_override(param, repeated, check=False) + + engine = EngineTestHelper( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + config_path, + external_states, + ) + engine.run_till_terminate() + actual_results = engine.get_output() + + # get expected results from YAML test data + expected_results, expected_precision = get_test_data(test_data_path) + + assert len(actual_results) == len(expected_results) + + for reference, model in zip(expected_results, actual_results, strict=False): + assert reference["DAY"] == model["day"] + assert all( + torch.all(abs(reference[var] - model[var]) < precision) + for var, precision in expected_precision.items() + ) + + def test_leaf_dynamics_with_incompatible_parameter_vectors(self): + # prepare model input + test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" + crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] + ( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + external_states, + ) = prepare_engine_input(test_data_path, crop_model_params) + config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") + + # Setting a vector (with one value) for the TDWI and SPAN parameters, + # but with different lengths + crop_model_params_provider.set_override( + "TDWI", crop_model_params_provider["TDWI"].repeat(10), check=False + ) + crop_model_params_provider.set_override( + "SPAN", crop_model_params_provider["SPAN"].repeat(5), check=False + ) + + with pytest.raises(RuntimeError): + EngineTestHelper( + crop_model_params_provider, + weather_data_provider, + agro_management_inputs, + config_path, + external_states, + ) + def test_wofost_pp_with_leaf_dynamics(self): # prepare model input test_data_path = phy_data_folder / "test_potentialproduction_wofost72_01.yaml" From af9dd7c8d81937008d8c4a240e91a26c0a9aa626 Mon Sep 17 00:00:00 2001 From: Francesco Nattino <49899980+fnattino@users.noreply.github.com> Date: Wed, 22 Oct 2025 15:46:52 +0200 Subject: [PATCH 10/32] Update tests/physical_models/crop/test_leaf_dynamics.py Co-authored-by: SarahAlidoost <55081872+SarahAlidoost@users.noreply.github.com> --- tests/physical_models/crop/test_leaf_dynamics.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index d085a56..364b5ae 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -281,6 +281,10 @@ def test_leaf_dynamics_with_multiple_parameter_arrays(self): torch.all(abs(reference[var] - model[var]) < precision) for var, precision in expected_precision.items() ) + assert all ( + model[var].shape == (30, 5) + for var in expected_precision.keys() + ) # check the output shapes def test_leaf_dynamics_with_incompatible_parameter_vectors(self): # prepare model input From fd755d71d335e234591123808063100f094c53d5 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 22 Oct 2025 16:01:44 +0200 Subject: [PATCH 11/32] add comments to the class attributes --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index bef4697..574ccbf 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -112,8 +112,10 @@ class WOFOST_Leaf_Dynamics(SimulationObject): LAI, TWLV """ - START_DATE = None - MAX_DAYS = 300 + # The following parameters are used to initialize and control the arrays that store information + # on the leaf classes during the time integration: leaf area, age, and biomass. + START_DATE = None # Start date of the simulation + MAX_DAYS = 300 # Maximum number of days that can be simulated in one run (i.e. array lenghts) class Parameters(ParamTemplate): RGRLAI = Any(default_value=[torch.tensor(-99.0, dtype=DTYPE)]) From bc275db032cf835ab1e06b5309945003aabbc453 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 22 Oct 2025 16:40:58 +0200 Subject: [PATCH 12/32] add comments on trackability of gradients it can be problematic with conditional statements --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 574ccbf..613a64f 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -262,6 +262,8 @@ def calc_rates(self, day, drv): # Note that the actual leaf death is imposed on the array LV during the # state integration step. tSPAN = _broadcast_to(p.SPAN, s.LVAGE.shape) # Broadcast to same shape + # Using a sigmoid here instead of a conditional statement on the value of + # SPAN because the latter would not allow for the gradient to be tracked. sharpness = torch.tensor(1000.0, dtype=DTYPE) # FIXME weight = torch.sigmoid((s.LVAGE - tSPAN) * sharpness) r.DALV = torch.sum(weight * s.LV, dim=-1) @@ -279,6 +281,10 @@ def calc_rates(self, day, drv): # leaf area not to exceed exponential growth curve is_lai_exp = s.LAIEXP < 6.0 DTEFF = torch.clamp(drv.TEMP - p.TBASE, 0.0) + # NOTE: conditional statements do not allow for the gradient to be + # tracked through the condition. Thus, the gradient with respect to + # parameters that contribute to `is_lai_exp` (e.g. RGRLAI and TBASE) + # are expected to be incorrect. r.GLAIEX = torch.where(is_lai_exp, s.LAIEXP * p.RGRLAI * DTEFF, r.GLAIEX) # source-limited increase in leaf area r.GLASOL = torch.where(is_lai_exp, r.GRLV * r.SLAT, r.GLASOL) @@ -311,6 +317,9 @@ def integrate(self, day, delt=1.0): idx_oldest = idx_alive[0] tLV[..., idx_oldest] = weight_cumsum[..., idx_oldest] # Zero out all dead leaf classes + # NOTE: conditional statements do not allow for the gradient to be + # tracked through the condition. Thus, the gradient with respect to + # parameters that contribute to `is_dead` are expected to be incorrect. tLV = torch.where(~is_dead, tLV, 0.0) # Integration of physiological age tLVAGE = tLVAGE + rates.FYSAGE From 8b9b941cb0d286989d5255edbca3029c33729d1f Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Wed, 22 Oct 2025 17:05:24 +0200 Subject: [PATCH 13/32] apply ruff format --- tests/physical_models/crop/test_leaf_dynamics.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 364b5ae..480e32b 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -281,9 +281,8 @@ def test_leaf_dynamics_with_multiple_parameter_arrays(self): torch.all(abs(reference[var] - model[var]) < precision) for var, precision in expected_precision.items() ) - assert all ( - model[var].shape == (30, 5) - for var in expected_precision.keys() + assert all( + model[var].shape == (30, 5) for var in expected_precision.keys() ) # check the output shapes def test_leaf_dynamics_with_incompatible_parameter_vectors(self): From 4c7877084319d71894b4f1dc19eca40a058ec019 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 10:33:03 +0200 Subject: [PATCH 14/32] bug fix on oldest leaf class identification the same index was used for all parameter values --- .../physical_models/crop/leaf_dynamics.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 613a64f..2074c6c 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -310,21 +310,22 @@ def integrate(self, day, delt=1.0): # Calculate the cumulative sum of weights after leaf death, and # find out which leaf classes are dead (negative weights) weight_cumsum = tLV.cumsum(dim=-1) - tDRLV - is_dead = weight_cumsum < 0 + is_alive = weight_cumsum >= 0 # Adjust value of oldest leaf class, i.e. the first non-zero - # weight along the time axis (the last one) - (*_, idx_alive) = (~is_dead).nonzero(as_tuple=True) - idx_oldest = idx_alive[0] - tLV[..., idx_oldest] = weight_cumsum[..., idx_oldest] + # weight along the time axis (the last dimension). + # Cast argument to int because torch.argmax requires it to be numeric + idx_oldest = torch.argmax(is_alive.type(torch.int), dim=-1, keepdim=True) + new_biomass = torch.take_along_dim(weight_cumsum, indices=idx_oldest, dim=-1) + tLV = torch.scatter(tLV, dim=-1, index=idx_oldest, src=new_biomass) # Zero out all dead leaf classes # NOTE: conditional statements do not allow for the gradient to be # tracked through the condition. Thus, the gradient with respect to - # parameters that contribute to `is_dead` are expected to be incorrect. - tLV = torch.where(~is_dead, tLV, 0.0) + # parameters that contribute to `is_alive` are expected to be incorrect. + tLV = torch.where(is_alive, tLV, 0.0) # Integration of physiological age tLVAGE = tLVAGE + rates.FYSAGE - tLVAGE = torch.where(~is_dead, tLVAGE, 0.0) - tSLA = torch.where(~is_dead, tSLA, 0.0) + tLVAGE = torch.where(is_alive, tLVAGE, 0.0) + tSLA = torch.where(is_alive, tSLA, 0.0) # --------- leave growth --------- idx = int((day - self.START_DATE).days / delt) From c8c260995805ce907c01d2fd41851b7e654e5333 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 15:58:27 +0200 Subject: [PATCH 15/32] fix initialization of variables also check that all parameters have the same shape --- .../physical_models/crop/leaf_dynamics.py | 37 ++++++++++++++++--- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 2074c6c..59f403a 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -8,7 +8,7 @@ from pcse.decorators import prepare_rates from pcse.decorators import prepare_states from pcse.traitlets import Any -from pcse.util import AfgenTrait +from pcse.util import Afgen, AfgenTrait DTYPE = torch.float64 # Default data type for tensors in this module @@ -176,17 +176,17 @@ def initialize(self, day, kiosk, parvalues): DVS = self.kiosk["DVS"] params = self.params + shape = _get_params_shape(params) # Initial leaf biomass WLV = (params.TDWI * (1 - FR)) * FL - DWLV = torch.tensor(0.0, dtype=DTYPE) + DWLV = torch.zeros(shape, dtype=DTYPE) TWLV = WLV + DWLV # Initialize leaf classes (SLA, age and weight) - dims = WLV.shape - SLA = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) - LVAGE = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) - LV = torch.zeros((*dims, self.MAX_DAYS), dtype=DTYPE) + SLA = torch.zeros((*shape, self.MAX_DAYS), dtype=DTYPE) + LVAGE = torch.zeros((*shape, self.MAX_DAYS), dtype=DTYPE) + LV = torch.zeros((*shape, self.MAX_DAYS), dtype=DTYPE) SLA[..., 0] = params.SLATB(DVS) LV[..., 0] = WLV @@ -364,6 +364,31 @@ def _exist_required_external_variables(kiosk): ) +def _get_params_shape(params): + """Get the parameters shape. + + Parameters can have arbitrary number of dimensions, but all parameters that are not zero- + dimensional should have the same shape. + """ + shape = () + for parname in params.trait_names(): + # Skip special traitlets attributes + if parname.startswith("trait"): + continue + param = getattr(params, parname) + # Skip Afgen parameters: + if isinstance(param, Afgen): + continue + # Parameters that are not zero dimensional should all have the same shape + if param.shape and not shape: + shape = param.shape + elif param.shape: + assert param.shape == shape, ( + "All parameters should have the same shape (or have no dimensions)" + ) + return shape + + def _broadcast_to(x, shape): """Create a view of tensor X with the given shape.""" if x.dim() == 0: From 919bb7a3d0f57b6017d647ad82d8185216535624 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 15:58:53 +0200 Subject: [PATCH 16/32] refactor function safer and easier to read --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index 59f403a..b7956c7 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -395,7 +395,6 @@ def _broadcast_to(x, shape): # For 0-d tensors, we simply broadcast to the given shape return torch.broadcast_to(x, shape) # The given shape should match x in all but the last axis, which represents - # the dimension along which the time integration is carried out - *dims, n_steps = shape - # We first append an axis to x, then expand the last axis - return x.unsqueeze(-1).expand((*dims, n_steps)) + # the dimension along which the time integration is carried out. + # We first append an axis to x, then expand to the given shape + return x.unsqueeze(-1).expand(shape) From af72abd1946621888af1650b4d78068d829901a9 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:00:24 +0200 Subject: [PATCH 17/32] add parametrization to tests --- .../crop/test_leaf_dynamics.py | 203 +++++------------- 1 file changed, 52 insertions(+), 151 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 480e32b..5579241 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -2,8 +2,7 @@ from unittest.mock import patch import pytest import torch -import torch.testing -from numpy.testing import assert_almost_equal +from numpy.testing import assert_array_almost_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.leaf_dynamics import WOFOST_Leaf_Dynamics @@ -61,9 +60,7 @@ def forward(self, params_dict): engine.run_till_terminate() results = engine.get_output() - return torch.stack( - [torch.stack([item["LAI"], item["TWLV"]]) for item in results] - ).unsqueeze(0) # shape: [1, time_steps, 2] + return {var: torch.stack([item[var] for item in results]) for var in ["LAI", "TWLV"]} class TestLeafDynamics: @@ -122,7 +119,8 @@ def test_leaf_dynamics_with_engine(self): config_path, ) - def test_leaf_dynamics_with_one_parameter_vector(self): + @pytest.mark.parametrize("param", ["TDWI", "SPAN"]) + def test_leaf_dynamics_with_one_parameter_vector(self, param): # prepare model input test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] @@ -134,8 +132,7 @@ def test_leaf_dynamics_with_one_parameter_vector(self): ) = prepare_engine_input(test_data_path, crop_model_params) config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") - # Setting a vector (with one value) for the TDWI parameter - param = "TDWI" + # Setting a vector (with one value) for the selected parameter repeated = crop_model_params_provider[param].repeat(10) crop_model_params_provider.set_override(param, repeated, check=False) @@ -161,7 +158,11 @@ def test_leaf_dynamics_with_one_parameter_vector(self): for var, precision in expected_precision.items() ) - def test_leaf_dynamics_with_different_parameter_values(self): + @pytest.mark.parametrize("param,delta", [ + ("TDWI", 0.1), + ("SPAN", 5), + ]) + def test_leaf_dynamics_with_different_parameter_values(self, param, delta): # prepare model input test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" crop_model_params = ["SPAN", "TDWI", "TBASE", "PERDL", "RGRLAI"] @@ -173,11 +174,10 @@ def test_leaf_dynamics_with_different_parameter_values(self): ) = prepare_engine_input(test_data_path, crop_model_params) config_path = str(phy_data_folder / "WOFOST_Leaf_Dynamics.conf") - # Setting a vector with multiple values for the TDWI parameter - param = "TDWI" + # Setting a vector with multiple values for the selected parameter test_value = crop_model_params_provider[param] # We set the value for which test data are available as the last element - param_vec = torch.tensor([test_value - 0.1, test_value + 0.1, test_value]) + param_vec = torch.tensor([test_value - delta, test_value + delta, test_value]) crop_model_params_provider.set_override(param, param_vec, check=False) engine = EngineTestHelper( @@ -343,157 +343,58 @@ def test_wofost_pp_with_leaf_dynamics(self): ) -class TestDiffLeafDynamicsTDWI: - def test_gradients_tdwi_lai_leaf_dynamics(self): +class TestDiffLeafDynamics: + @pytest.mark.parametrize("param_name,param_value,out_name", [ + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + ]) + def test_gradients_leaf_dynamics(self, param_name, param_value, out_name): model = get_test_diff_leaf_model() - tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float32)) - output = model({"TDWI": tdwi}) - lai = output[0, :, 0] - loss = lai.sum() + param = torch.nn.Parameter(param_value) + output = model({param_name: param}) + loss = output[out_name].sum() # this is ∂loss/∂tdwi without calling loss.backward(). # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - assert grads is not None, "Gradients for TDWI should not be None" + grads = torch.autograd.grad(loss, param, retain_graph=True)[0] + assert grads is not None, "Gradients should not be None" - tdwi.grad = None # clear any existing gradient + param.grad = None # clear any existing gradient loss.backward() # this is ∂loss/∂tdwi calculated using backpropagation - grad_backward = tdwi.grad - - assert grad_backward is not None, "Backward gradients for TDWI should not be None" - assert grad_backward == grads, "Forward and backward gradients for TDWI should match" - - def test_gradients_tdwi_lai_leaf_dynamics_numerical(self): + grad_backward = param.grad + + assert grad_backward is not None, "Backward gradients should not be None" + assert torch.all(grad_backward == grads), "Forward and backward gradients should match" + + @pytest.mark.parametrize("param_name,param_value,out_name", [ + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + ]) + def test_gradients_leaf_dynamics_numerical(self, param_name, param_value, out_name): # first check if the numerical gradient isnot zero i.e. the parameter has an effect - tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) - output_index = 0 # LAI is at index 0 + param = torch.nn.Parameter(param_value) numerical_grad = calculate_numerical_grad( - get_test_diff_leaf_model, "TDWI", tdwi, output_index + get_test_diff_leaf_model, param_name, param.data, out_name ) # this is Δloss/Δtdwi model = get_test_diff_leaf_model() - output = model({"TDWI": tdwi}) - lai = output[0, :, output_index] - loss = lai.sum() - # this is ∂loss/∂tdwi, for comparison with numerical gradient - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - - assert_almost_equal(numerical_grad, grads.item(), decimal=3) - - def test_gradients_tdwi_twlv_leaf_dynamics(self): - # prepare model input - model = get_test_diff_leaf_model() - tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float32)) - output = model({"TDWI": tdwi}) - twlv = output[0, :, 1] - loss = twlv.sum() - - # this is ∂loss/∂tdwi - # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - assert grads is not None, "Gradients for TDWI should not be None" - - tdwi.grad = None # clear any existing gradient - loss.backward() - # this is ∂loss/∂tdwi calculated using backpropagation - grad_backward = tdwi.grad - - assert grad_backward is not None, "Backward gradients for TDWI should not be None" - assert grad_backward == grads, "Forward and backward gradients for TDWI should match" - - def test_gradients_tdwi_twlv_leaf_dynamics_numerical(self): - # first check if the numerical gradient isnot zero i.e. the parameter has an effect - tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) - output_index = 1 # TWLV is at index 1 - numerical_grad = calculate_numerical_grad( - get_test_diff_leaf_model, "TDWI", tdwi, output_index - ) # this is Δloss/Δtdwi - - model = get_test_diff_leaf_model() - output = model({"TDWI": tdwi}) - twlv = output[0, :, output_index] - loss = twlv.sum() - # this is ∂loss/∂tdwi, for comparison with numerical gradient - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - - assert_almost_equal(numerical_grad, grads.item(), decimal=3) - - -class TestDiffLeafDynamicsSPAN: - def test_gradients_span_lai_leaf_dynamics(self): - # prepare model input - model = get_test_diff_leaf_model() - span = torch.nn.Parameter(torch.tensor(30, dtype=torch.float32)) - output = model({"SPAN": span}) - lai = output[0, :, 0] - loss = lai.sum() + output = model({param_name: param}) + loss = output[out_name].sum() - # this is ∂loss/∂span - # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, span, retain_graph=True)[0] - assert grads is not None, "Gradients for SPAN should not be None" - - span.grad = None # clear any existing gradient - loss.backward() - # this is ∂loss/∂span calculated using backpropagation - grad_backward = span.grad - - assert grad_backward is not None, "Backward gradients for SPAN should not be None" - assert grad_backward == grads, "Forward and backward gradients for SPAN should match" - - def test_gradients_span_lai_leaf_dynamics_numerical(self): - # first check if the numerical gradient isnot zero i.e. the parameter has an effect - span = torch.nn.Parameter(torch.tensor(30, dtype=torch.float64)) - output_index = 0 # LAI is at index 0 - numerical_grad = calculate_numerical_grad( - get_test_diff_leaf_model, "SPAN", span, output_index - ) # this is Δloss/Δspan - - model = get_test_diff_leaf_model() - output = model({"SPAN": span}) - lai = output[0, :, output_index] - loss = lai.sum() - # this is ∂loss/∂tdwi, for comparison with numerical gradient - grads = torch.autograd.grad(loss, span, retain_graph=True)[0] - - assert_almost_equal(numerical_grad, grads.item(), decimal=3) - - def test_gradients_span_twlv_leaf_dynamics(self): - # prepare model input - model = get_test_diff_leaf_model() - span = torch.nn.Parameter(torch.tensor(30, dtype=torch.float32)) - output = model({"SPAN": span}) - twlv = output[0, :, 1] - loss = twlv.sum() - - # this is ∂loss/∂span - # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, span, retain_graph=True)[0] - assert grads is not None, "Gradients for SPAN should not be None" - - span.grad = None # clear any existing gradient - loss.backward() - # this is ∂loss/∂span calculated using backpropagation - grad_backward = span.grad - - assert grad_backward is not None, "Backward gradients for SPAN should not be None" - assert grad_backward == grads, "Forward and backward gradients for SPAN should match" - - def test_gradients_span_twlv_leaf_dynamics_numerical(self): - # first check if the numerical gradient isnot zero i.e. the parameter has an effect - span = torch.nn.Parameter(torch.tensor(30, dtype=torch.float64)) - output_index = 1 # TWLV is at index 1 - numerical_grad = calculate_numerical_grad( - get_test_diff_leaf_model, "SPAN", span, output_index - ) # this is Δloss/Δspan - - model = get_test_diff_leaf_model() - output = model({"SPAN": span}) - twlv = output[0, :, output_index] - loss = twlv.sum() # this is ∂loss/∂tdwi, for comparison with numerical gradient - grads = torch.autograd.grad(loss, span, retain_graph=True)[0] + grads = torch.autograd.grad(loss, param, retain_graph=True)[0] - assert numerical_grad == 0.0 - assert_almost_equal(numerical_grad, grads.item(), decimal=3) + assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From 2752563330200fe1d859187a7bd0a8d0e2b40eaf Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:00:59 +0200 Subject: [PATCH 18/32] fix expected error --- tests/physical_models/crop/test_leaf_dynamics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 5579241..83f86e5 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -306,7 +306,7 @@ def test_leaf_dynamics_with_incompatible_parameter_vectors(self): "SPAN", crop_model_params_provider["SPAN"].repeat(5), check=False ) - with pytest.raises(RuntimeError): + with pytest.raises(AssertionError): EngineTestHelper( crop_model_params_provider, weather_data_provider, From 8679bc3c82238929069ed5a3d2b0e477a783b1f2 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:01:46 +0200 Subject: [PATCH 19/32] adapt function to calculate gradients numerically --- src/diffwofost/physical_models/utils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/diffwofost/physical_models/utils.py b/src/diffwofost/physical_models/utils.py index dea2a9c..68ddfb7 100644 --- a/src/diffwofost/physical_models/utils.py +++ b/src/diffwofost/physical_models/utils.py @@ -286,18 +286,18 @@ def get_test_data(file_path): return inputs["ModelResults"], inputs["Precision"] -def calculate_numerical_grad(get_model_fn, param_name, param_value, output_index): +def calculate_numerical_grad(get_model_fn, param_name, param_value, out_name): """Calculate the numerical gradient of output with respect to a parameter.""" delta = 1e-6 - p_plus = param_value.item() + delta - p_minus = param_value.item() - delta + p_plus = param_value + delta + p_minus = param_value - delta model = get_model_fn() - output = model({param_name: torch.nn.Parameter(torch.tensor(p_plus, dtype=torch.float64))}) - loss_plus = output[0, :, output_index].sum() + output = model({param_name: torch.nn.Parameter(p_plus)}) + loss_plus = output[out_name].sum(dim=0) model = get_model_fn() - output = model({param_name: torch.nn.Parameter(torch.tensor(p_minus, dtype=torch.float64))}) - loss_minus = output[0, :, output_index].sum() + output = model({param_name: torch.nn.Parameter(p_minus)}) + loss_minus = output[out_name].sum(dim=0) - return (loss_plus.item() - loss_minus.item()) / (2 * delta) + return (loss_plus.data - loss_minus.data) / (2 * delta) From eb4e0849587370a7484047401275fbe5e69022c4 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:02:17 +0200 Subject: [PATCH 20/32] adapt root dynamics test to fit changes in leaf dynamics --- .../crop/test_root_dynamics.py | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/tests/physical_models/crop/test_root_dynamics.py b/tests/physical_models/crop/test_root_dynamics.py index 02ed12f..cfffdf4 100644 --- a/tests/physical_models/crop/test_root_dynamics.py +++ b/tests/physical_models/crop/test_root_dynamics.py @@ -2,7 +2,7 @@ from unittest.mock import patch import pytest import torch -from numpy.testing import assert_almost_equal +from numpy.testing import assert_array_almost_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.root_dynamics import WOFOST_Root_Dynamics @@ -60,9 +60,7 @@ def forward(self, params_dict): engine.run_till_terminate() results = engine.get_output() - return torch.stack([torch.stack([item["RD"], item["TWRT"]]) for item in results]).unsqueeze( - 0 - ) # shape: [1, time_steps, 2] + return {var: torch.stack([item[var] for item in results]) for var in ["RD", "TWRT"]} class TestRootDynamics: @@ -154,7 +152,7 @@ def test_gradients_tdwi_rd_root_dynamics(self): model = get_test_diff_root_model() tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float32)) output = model({"TDWI": tdwi}) - rd = output[0, :, 0] + rd = output["RD"] loss = rd.sum() # this is ∂loss/∂tdwi without calling loss.backward(). @@ -174,28 +172,28 @@ def test_gradients_tdwi_rd_root_dynamics(self): def test_gradients_tdwi_rd_root_dynamics_numerical(self): tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) - output_index = 0 # Index 0 is "RD" + output_name = "RD" numerical_grad = calculate_numerical_grad( - get_test_diff_root_model, "TDWI", tdwi, output_index + get_test_diff_root_model, "TDWI", tdwi, output_name ) model = get_test_diff_root_model() output = model({"TDWI": tdwi}) - rd = output[0, :, output_index] # Index 0 is "RD" + rd = output[output_name] loss = rd.sum() # this is ∂loss/∂tdwi, for comparison with numerical gradient grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] # in this test, grads is very small - assert_almost_equal(numerical_grad, grads.item(), decimal=3) + assert_array_almost_equal(numerical_grad, grads.item(), decimal=3) def test_gradients_tdwi_twrt_root_dynamics(self): # prepare model input model = get_test_diff_root_model() tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float32)) output = model({"TDWI": tdwi}) - twrt = output[0, :, 1] # Index 1 is "TWRT" + twrt = output["TWRT"] loss = twrt.sum() # this is ∂loss/∂tdwi @@ -215,18 +213,18 @@ def test_gradients_tdwi_twrt_root_dynamics(self): def test_gradients_tdwi_twrt_root_dynamics_numerical(self): tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) - output_index = 1 # Index 1 is "TWRT" + output_name = "TWRT" # Index 1 is "TWRT" numerical_grad = calculate_numerical_grad( - get_test_diff_root_model, "TDWI", tdwi, output_index + get_test_diff_root_model, "TDWI", tdwi.data, output_name ) model = get_test_diff_root_model() output = model({"TDWI": tdwi}) - twrt = output[0, :, output_index] + twrt = output[output_name] loss = twrt.sum() # this is ∂loss/∂tdwi, for comparison with numerical gradient grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] # in this test, grads is very small - assert_almost_equal(numerical_grad, grads.item(), decimal=3) + assert_array_almost_equal(numerical_grad, grads.item(), decimal=3) From 7148a5d7d47bd5ac0a574f67d566123e965dc817 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:12:07 +0200 Subject: [PATCH 21/32] fix comments to the more generic parameter --- tests/physical_models/crop/test_leaf_dynamics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 83f86e5..8923a4f 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -360,14 +360,14 @@ def test_gradients_leaf_dynamics(self, param_name, param_value, out_name): output = model({param_name: param}) loss = output[out_name].sum() - # this is ∂loss/∂tdwi without calling loss.backward(). + # this is ∂loss/∂param without calling loss.backward(). # this is called forward gradient here because it is calculated without backpropagation. grads = torch.autograd.grad(loss, param, retain_graph=True)[0] assert grads is not None, "Gradients should not be None" param.grad = None # clear any existing gradient loss.backward() - # this is ∂loss/∂tdwi calculated using backpropagation + # this is ∂loss/∂param calculated using backpropagation grad_backward = param.grad assert grad_backward is not None, "Backward gradients should not be None" @@ -388,13 +388,13 @@ def test_gradients_leaf_dynamics_numerical(self, param_name, param_value, out_na param = torch.nn.Parameter(param_value) numerical_grad = calculate_numerical_grad( get_test_diff_leaf_model, param_name, param.data, out_name - ) # this is Δloss/Δtdwi + ) # this is Δloss/Δparam model = get_test_diff_leaf_model() output = model({param_name: param}) loss = output[out_name].sum() - # this is ∂loss/∂tdwi, for comparison with numerical gradient + # this is ∂loss/∂param, for comparison with numerical gradient grads = torch.autograd.grad(loss, param, retain_graph=True)[0] assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From d7b311b2f4b74ddc1733c9eeb25f4ce2b72e3e4a Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Thu, 23 Oct 2025 16:21:53 +0200 Subject: [PATCH 22/32] apply ruff format --- .../crop/test_leaf_dynamics.py | 57 +++++++++++-------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 8923a4f..b63e2a7 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -158,10 +158,13 @@ def test_leaf_dynamics_with_one_parameter_vector(self, param): for var, precision in expected_precision.items() ) - @pytest.mark.parametrize("param,delta", [ - ("TDWI", 0.1), - ("SPAN", 5), - ]) + @pytest.mark.parametrize( + "param,delta", + [ + ("TDWI", 0.1), + ("SPAN", 5), + ], + ) def test_leaf_dynamics_with_different_parameter_values(self, param, delta): # prepare model input test_data_path = phy_data_folder / "test_leafdynamics_wofost72_01.yaml" @@ -344,16 +347,19 @@ def test_wofost_pp_with_leaf_dynamics(self): class TestDiffLeafDynamics: - @pytest.mark.parametrize("param_name,param_value,out_name", [ - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), - ]) + @pytest.mark.parametrize( + "param_name,param_value,out_name", + [ + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + ], + ) def test_gradients_leaf_dynamics(self, param_name, param_value, out_name): model = get_test_diff_leaf_model() param = torch.nn.Parameter(param_value) @@ -373,16 +379,19 @@ def test_gradients_leaf_dynamics(self, param_name, param_value, out_name): assert grad_backward is not None, "Backward gradients should not be None" assert torch.all(grad_backward == grads), "Forward and backward gradients should match" - @pytest.mark.parametrize("param_name,param_value,out_name", [ - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), - ]) + @pytest.mark.parametrize( + "param_name,param_value,out_name", + [ + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), + ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + ], + ) def test_gradients_leaf_dynamics_numerical(self, param_name, param_value, out_name): # first check if the numerical gradient isnot zero i.e. the parameter has an effect param = torch.nn.Parameter(param_value) From c952e0f197fae3d06815ce1b9c05e309da6143e7 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:13:38 +0200 Subject: [PATCH 23/32] upack some of the parametrized tests add one class per parameter --- .../crop/test_leaf_dynamics.py | 142 ++++++++++++++---- 1 file changed, 111 insertions(+), 31 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index b63e2a7..6222abb 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -2,7 +2,7 @@ from unittest.mock import patch import pytest import torch -from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_array_almost_equal, assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.leaf_dynamics import WOFOST_Leaf_Dynamics @@ -346,64 +346,144 @@ def test_wofost_pp_with_leaf_dynamics(self): ) -class TestDiffLeafDynamics: +class TestDiffLeafDynamicsTDWI: @pytest.mark.parametrize( - "param_name,param_value,out_name", + "param_value,out_name", [ - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + (torch.tensor(0.2, dtype=torch.float64), "LAI"), + (torch.tensor(0.2, dtype=torch.float64), "TWLV"), + (torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + (torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), ], ) - def test_gradients_leaf_dynamics(self, param_name, param_value, out_name): + def test_gradients_leaf_dynamics(self, param_value, out_name): model = get_test_diff_leaf_model() - param = torch.nn.Parameter(param_value) - output = model({param_name: param}) + tdwi = torch.nn.Parameter(param_value) + output = model({"TDWI": tdwi}) loss = output[out_name].sum() # this is ∂loss/∂param without calling loss.backward(). # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, param, retain_graph=True)[0] + grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] assert grads is not None, "Gradients should not be None" - param.grad = None # clear any existing gradient + tdwi.grad = None # clear any existing gradient loss.backward() # this is ∂loss/∂param calculated using backpropagation - grad_backward = param.grad + grad_backward = tdwi.grad assert grad_backward is not None, "Backward gradients should not be None" assert torch.all(grad_backward == grads), "Forward and backward gradients should match" @pytest.mark.parametrize( - "param_name,param_value,out_name", + "param_value,out_name", [ - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor(0.2, dtype=torch.float64), "TWLV"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), - ("TDWI", torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor(30, dtype=torch.float64), "TWLV"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "LAI"), - ("SPAN", torch.tensor([25, 30, 35], dtype=torch.float64), "TWLV"), + (torch.tensor(0.2, dtype=torch.float64), "LAI"), + (torch.tensor(0.2, dtype=torch.float64), "TWLV"), + (torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "LAI"), + (torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64), "TWLV"), ], ) - def test_gradients_leaf_dynamics_numerical(self, param_name, param_value, out_name): + def test_gradients_leaf_dynamics_numerical(self, param_value, out_name): # first check if the numerical gradient isnot zero i.e. the parameter has an effect - param = torch.nn.Parameter(param_value) + tdwi = torch.nn.Parameter(param_value) numerical_grad = calculate_numerical_grad( - get_test_diff_leaf_model, param_name, param.data, out_name + get_test_diff_leaf_model, "TDWI", tdwi.data, out_name ) # this is Δloss/Δparam model = get_test_diff_leaf_model() - output = model({param_name: param}) + output = model({"TDWI": tdwi}) loss = output[out_name].sum() # this is ∂loss/∂param, for comparison with numerical gradient - grads = torch.autograd.grad(loss, param, retain_graph=True)[0] + grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] + + assert_array_almost_equal(numerical_grad, grads.data, decimal=3) + + +class TestDiffLeafDynamicsSPAN: + @pytest.mark.parametrize( + "param_value", + [torch.tensor(30, dtype=torch.float64), torch.tensor([25, 30, 35], dtype=torch.float64)], + ) + def test_gradients_lai_leaf_dynamics(self, param_value): + model = get_test_diff_leaf_model() + span = torch.nn.Parameter(param_value) + output = model({"SPAN": span}) + loss = output["LAI"].sum() + + # this is ∂loss/∂param without calling loss.backward(). + # this is called forward gradient here because it is calculated without backpropagation. + grads = torch.autograd.grad(loss, span, retain_graph=True)[0] + assert grads is not None, "Gradients should not be None" + + span.grad = None # clear any existing gradient + loss.backward() + # this is ∂loss/∂param calculated using backpropagation + grad_backward = span.grad + + assert grad_backward is not None, "Backward gradients should not be None" + assert torch.all(grad_backward == grads), "Forward and backward gradients should match" + + @pytest.mark.parametrize( + "param_value", + [torch.tensor(30, dtype=torch.float64), torch.tensor([25, 30, 35], dtype=torch.float64)], + ) + def test_gradients_lai_leaf_dynamics_numerical(self, param_value): + # first check if the numerical gradient isnot zero i.e. the parameter has an effect + span = torch.nn.Parameter(param_value) + numerical_grad = calculate_numerical_grad( + get_test_diff_leaf_model, "SPAN", span.data, "LAI" + ) # this is Δloss/Δparam + + model = get_test_diff_leaf_model() + output = model({"SPAN": span}) + loss = output["LAI"].sum() + + # this is ∂loss/∂param, for comparison with numerical gradient + grads = torch.autograd.grad(loss, span, retain_graph=True)[0] + + assert_array_almost_equal(numerical_grad, grads.data, decimal=3) + + @pytest.mark.parametrize( + "param_value", + [torch.tensor(30, dtype=torch.float64), torch.tensor([25, 30, 35], dtype=torch.float64)], + ) + def test_gradients_twlv_leaf_dynamics(self, param_value): + model = get_test_diff_leaf_model() + span = torch.nn.Parameter(param_value) + output = model({"SPAN": span}) + loss = output["TWLV"].sum() + + # this is ∂loss/∂param without calling loss.backward(). + # this is called forward gradient here because it is calculated without backpropagation. + grads = torch.autograd.grad(loss, span, retain_graph=True)[0] + assert grads is not None, "Gradients should not be None" + + span.grad = None # clear any existing gradient + loss.backward() + # this is ∂loss/∂param calculated using backpropagation + grad_backward = span.grad + + assert grad_backward is not None, "Backward gradients should not be None" + assert torch.all(grad_backward == grads), "Forward and backward gradients should match" + + @pytest.mark.parametrize( + "param_value", + [torch.tensor(30, dtype=torch.float64), torch.tensor([25, 30, 35], dtype=torch.float64)], + ) + def test_gradients_leaf_twlv_dynamics_numerical(self, param_value): + # first check if the numerical gradient isnot zero i.e. the parameter has an effect + span = torch.nn.Parameter(param_value) + numerical_grad = calculate_numerical_grad( + get_test_diff_leaf_model, "SPAN", span.data, "TWLV" + ) # this is Δloss/Δparam + + model = get_test_diff_leaf_model() + output = model({"SPAN": span}) + loss = output["TWLV"].sum() + + # this is ∂loss/∂param, for comparison with numerical gradient + grads = torch.autograd.grad(loss, span, retain_graph=True)[0] assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From 74f0b1773af2119dfd180234fceb9ca56a246d49 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:14:35 +0200 Subject: [PATCH 24/32] readd check on gradient being zero for TWLV w.r.t. SPAN --- tests/physical_models/crop/test_leaf_dynamics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 6222abb..4e58976 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -486,4 +486,5 @@ def test_gradients_leaf_twlv_dynamics_numerical(self, param_value): # this is ∂loss/∂param, for comparison with numerical gradient grads = torch.autograd.grad(loss, span, retain_graph=True)[0] + assert_array_equal(numerical_grad, 0.0) assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From 85a5b55b4d7b8fa452fdb40655b764f88a98be9d Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:19:10 +0200 Subject: [PATCH 25/32] fix tests on gradient of RD w.r.t. TDWI --- .../crop/test_root_dynamics.py | 28 ++----------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/tests/physical_models/crop/test_root_dynamics.py b/tests/physical_models/crop/test_root_dynamics.py index cfffdf4..7e12d27 100644 --- a/tests/physical_models/crop/test_root_dynamics.py +++ b/tests/physical_models/crop/test_root_dynamics.py @@ -2,7 +2,7 @@ from unittest.mock import patch import pytest import torch -from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_array_almost_equal, assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.root_dynamics import WOFOST_Root_Dynamics @@ -155,20 +155,7 @@ def test_gradients_tdwi_rd_root_dynamics(self): rd = output["RD"] loss = rd.sum() - # this is ∂loss/∂tdwi without calling loss.backward(). - # this is called forward gradient here because it is calculated without backpropagation. - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - - assert grads is not None, "Gradients for TDWI should not be None" - - tdwi.grad = None # clear any existing gradient - loss.backward() - - # this is ∂loss/∂tdwi calculated using backpropagation - grad_backward = tdwi.grad - - assert grad_backward is not None, "Backward gradients for TDWI should not be None" - assert grad_backward == grads, "Forward and backward gradients for TDWI should match" + assert loss.grad_fn is None # tdwi does not contribute to rd def test_gradients_tdwi_rd_root_dynamics_numerical(self): tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) @@ -177,16 +164,7 @@ def test_gradients_tdwi_rd_root_dynamics_numerical(self): get_test_diff_root_model, "TDWI", tdwi, output_name ) - model = get_test_diff_root_model() - output = model({"TDWI": tdwi}) - rd = output[output_name] - loss = rd.sum() - - # this is ∂loss/∂tdwi, for comparison with numerical gradient - grads = torch.autograd.grad(loss, tdwi, retain_graph=True)[0] - - # in this test, grads is very small - assert_array_almost_equal(numerical_grad, grads.item(), decimal=3) + assert_array_equal(numerical_grad, 0.0) # tdwi does not contribute to rd def test_gradients_tdwi_twrt_root_dynamics(self): # prepare model input From 8aaa76f5bc31f637e6294e450f732263b5ef71c8 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:25:44 +0200 Subject: [PATCH 26/32] lint fix of import statements --- tests/physical_models/crop/test_leaf_dynamics.py | 3 ++- tests/physical_models/crop/test_root_dynamics.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 4e58976..24a210c 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -2,7 +2,8 @@ from unittest.mock import patch import pytest import torch -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.leaf_dynamics import WOFOST_Leaf_Dynamics diff --git a/tests/physical_models/crop/test_root_dynamics.py b/tests/physical_models/crop/test_root_dynamics.py index 7e12d27..a214bdf 100644 --- a/tests/physical_models/crop/test_root_dynamics.py +++ b/tests/physical_models/crop/test_root_dynamics.py @@ -2,7 +2,8 @@ from unittest.mock import patch import pytest import torch -from numpy.testing import assert_array_almost_equal, assert_array_equal +from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.root_dynamics import WOFOST_Root_Dynamics From 6abf44ea9c34e0d5cd9d62e06ee9328f3b5ca482 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:27:38 +0200 Subject: [PATCH 27/32] add tolerance to test --- tests/physical_models/crop/test_leaf_dynamics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index 24a210c..ba2da09 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -487,5 +487,5 @@ def test_gradients_leaf_twlv_dynamics_numerical(self, param_value): # this is ∂loss/∂param, for comparison with numerical gradient grads = torch.autograd.grad(loss, span, retain_graph=True)[0] - assert_array_equal(numerical_grad, 0.0) + assert_array_almost_equal(numerical_grad, 0.0) assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From 053f62f90c8207d33d4400068fa3e9772387c6f4 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:31:53 +0200 Subject: [PATCH 28/32] linting fix --- tests/physical_models/crop/test_leaf_dynamics.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index ba2da09..fc81e5a 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -3,7 +3,6 @@ import pytest import torch from numpy.testing import assert_array_almost_equal -from numpy.testing import assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.leaf_dynamics import WOFOST_Leaf_Dynamics From c03fb0668eea58a3dca993a3fe7c4fdb6610e494 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:37:01 +0200 Subject: [PATCH 29/32] replace comparison to use autogradient --- tests/physical_models/crop/test_leaf_dynamics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/physical_models/crop/test_leaf_dynamics.py b/tests/physical_models/crop/test_leaf_dynamics.py index fc81e5a..eded0c5 100644 --- a/tests/physical_models/crop/test_leaf_dynamics.py +++ b/tests/physical_models/crop/test_leaf_dynamics.py @@ -486,5 +486,5 @@ def test_gradients_leaf_twlv_dynamics_numerical(self, param_value): # this is ∂loss/∂param, for comparison with numerical gradient grads = torch.autograd.grad(loss, span, retain_graph=True)[0] - assert_array_almost_equal(numerical_grad, 0.0) + assert_array_almost_equal(grads.data, 0.0) assert_array_almost_equal(numerical_grad, grads.data, decimal=3) From 234a17cdfc571c40428fb4e40126da26be33f58b Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Fri, 24 Oct 2025 14:42:01 +0200 Subject: [PATCH 30/32] linting fix --- src/diffwofost/physical_models/crop/leaf_dynamics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/diffwofost/physical_models/crop/leaf_dynamics.py b/src/diffwofost/physical_models/crop/leaf_dynamics.py index b7956c7..8e24d1a 100644 --- a/src/diffwofost/physical_models/crop/leaf_dynamics.py +++ b/src/diffwofost/physical_models/crop/leaf_dynamics.py @@ -8,7 +8,8 @@ from pcse.decorators import prepare_rates from pcse.decorators import prepare_states from pcse.traitlets import Any -from pcse.util import Afgen, AfgenTrait +from pcse.util import Afgen +from pcse.util import AfgenTrait DTYPE = torch.float64 # Default data type for tensors in this module From de555c22966414c30d3f31a0277cc0330fcff979 Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Mon, 27 Oct 2025 09:52:03 +0100 Subject: [PATCH 31/32] remove test on numerical gradient of twrt wrt rd (is zero) --- tests/physical_models/crop/test_root_dynamics.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/physical_models/crop/test_root_dynamics.py b/tests/physical_models/crop/test_root_dynamics.py index a214bdf..9404a4b 100644 --- a/tests/physical_models/crop/test_root_dynamics.py +++ b/tests/physical_models/crop/test_root_dynamics.py @@ -158,15 +158,6 @@ def test_gradients_tdwi_rd_root_dynamics(self): assert loss.grad_fn is None # tdwi does not contribute to rd - def test_gradients_tdwi_rd_root_dynamics_numerical(self): - tdwi = torch.nn.Parameter(torch.tensor(0.2, dtype=torch.float64)) - output_name = "RD" - numerical_grad = calculate_numerical_grad( - get_test_diff_root_model, "TDWI", tdwi, output_name - ) - - assert_array_equal(numerical_grad, 0.0) # tdwi does not contribute to rd - def test_gradients_tdwi_twrt_root_dynamics(self): # prepare model input model = get_test_diff_root_model() From 0bf92eb6da43a9f952ad88d43907ce748ff5e17b Mon Sep 17 00:00:00 2001 From: Francesco Nattino Date: Mon, 27 Oct 2025 09:55:15 +0100 Subject: [PATCH 32/32] remove unused import statement --- tests/physical_models/crop/test_root_dynamics.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/physical_models/crop/test_root_dynamics.py b/tests/physical_models/crop/test_root_dynamics.py index 9404a4b..7ae9893 100644 --- a/tests/physical_models/crop/test_root_dynamics.py +++ b/tests/physical_models/crop/test_root_dynamics.py @@ -3,7 +3,6 @@ import pytest import torch from numpy.testing import assert_array_almost_equal -from numpy.testing import assert_array_equal from pcse.engine import Engine from pcse.models import Wofost72_PP from diffwofost.physical_models.crop.root_dynamics import WOFOST_Root_Dynamics