Skip to content

Fixing behaviour of T Model with zero DBH values #525

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 8 commits into
base: develop
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions docs/source/users/demography/t_model.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ The {meth}`~pyrealm.demography.tmodel.StemAllometry` class provides the
data for data exploration.

```{code-cell} ipython3
single_allometry.to_pandas()
single_allometry.to_pandas().transpose()
```

However, the DBH values can also be a column array (an `N` x 1 array). In this case, the
Expand All @@ -98,8 +98,8 @@ predictions arranged with each PFT as a column and each DBH prediction as a row.
makes them convenient to plot using `matplotlib`.

```{code-cell} ipython3
# Column array of DBH values from 0 to 1.6 metres
dbh_col = np.arange(0, 1.6, 0.01)[:, None]
# Column array of DBH values from 0.01 to 1.6 metres
dbh_col = np.arange(0.01, 1.6, 0.01)[:, None]
# Get the predictions
allometries = StemAllometry(stem_traits=flora, at_dbh=dbh_col)
```
Expand Down Expand Up @@ -135,7 +135,7 @@ The {meth}`~pyrealm.demography.core.PandasExporter.to_pandas()` method of the
the values are stacked into columns along with a index showing the different cohorts.

```{code-cell} ipython3
allometries.to_pandas()
allometries.to_pandas().transpose()
```

## Productivity allocation
Expand Down Expand Up @@ -181,7 +181,7 @@ The {meth}`~pyrealm.demography.core.PandasExporter.to_pandas()` method of the
export data for exploration.

```{code-cell} ipython3
single_allocation.to_pandas()
single_allocation.to_pandas().transpose()
```

Using a column array of potential GPP values can be used to predict multiple estimates of
Expand Down Expand Up @@ -286,5 +286,5 @@ As before, the {meth}`~pyrealm.demography.core.PandasExporter.to_pandas()` metho
the data for each stem:

```{code-cell} ipython3
allocation.to_pandas()
allocation.to_pandas().transpose()
```
38 changes: 30 additions & 8 deletions pyrealm/demography/community.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
import json
import sys
import uuid
from dataclasses import dataclass, field
from dataclasses import InitVar, dataclass, field
from pathlib import Path
from typing import Any, ClassVar

Expand Down Expand Up @@ -173,15 +173,16 @@ class Cohorts(PandasExporter, CohortMethods):
count_attr: ClassVar[str] = "n_cohorts"

# Instance attributes
dbh_values: NDArray[np.float64]
n_individuals: NDArray[np.int_]
pft_names: NDArray[np.str_]
_cohort_id: NDArray[np.str_] = field(init=False)
_dbh_values: NDArray[np.float64] = field(init=False)
n_cohorts: int = field(init=False)
dbh_values: InitVar[NDArray[np.float64]]

__experimental__ = True

def __post_init__(self) -> None:
def __post_init__(self, dbh_values: NDArray[np.float64]) -> None:
"""Validation of cohorts data."""

# TODO - validation - maybe make this optional to reduce workload within
Expand All @@ -191,19 +192,23 @@ def __post_init__(self) -> None:

# Check cohort data types
if not (
isinstance(self.dbh_values, np.ndarray)
isinstance(dbh_values, np.ndarray)
and isinstance(self.n_individuals, np.ndarray)
and isinstance(self.pft_names, np.ndarray)
):
raise ValueError("Cohort data not passed as numpy arrays")

# Check the cohort inputs are of equal length
try:
check_input_shapes(self.dbh_values, self.n_individuals, self.dbh_values)
check_input_shapes(self.pft_names, self.n_individuals, dbh_values)
except ValueError:
raise ValueError("Cohort arrays are of unequal length")

self.n_cohorts = self.dbh_values.size
# Set the DBH values to trigger validation
setattr(self, "dbh_values", dbh_values)

# Additional attributes
self.n_cohorts = dbh_values.size
self._cohort_id = np.array([str(uuid.uuid4()) for _ in range(self.n_cohorts)])

@property
Expand All @@ -218,6 +223,23 @@ def cohort_id(self, values: NDArray[np.str_]) -> None:

self._cohort_id = values

# Ignoring this redefinition - the name is used above as an init-only argument that
# is then redefined as a class property with a setter protecting from negative
# values.

@property # type: ignore [no-redef]
def dbh_values(self) -> NDArray[np.float64]:
"""The diameter at breast height of the cohorts (m)."""
return self._dbh_values

@dbh_values.setter
def dbh_values(self, values: NDArray[np.float64]) -> None:
"""Setter function for DBH values, enforcing strictly positive values."""
if np.any(values <= 0):
raise ValueError("DBH values must be strictly positive")

self._dbh_values = values


class CohortSchema(Schema):
"""A validation schema for Cohort data objects.
Expand Down Expand Up @@ -477,7 +499,7 @@ def __post_init__(

# Populate the stem allometry
self.stem_allometry = StemAllometry(
stem_traits=self.stem_traits, at_dbh=self.cohorts.dbh_values
stem_traits=self.stem_traits, at_dbh=self.cohorts._dbh_values
)

@classmethod
Expand Down Expand Up @@ -602,7 +624,7 @@ def add_cohorts(self, new_data: Cohorts) -> None:
self.stem_traits.add_cohort_data(new_data=new_stem_traits)

new_stem_allometry = StemAllometry(
stem_traits=new_stem_traits, at_dbh=new_data.dbh_values
stem_traits=new_stem_traits, at_dbh=new_data._dbh_values
)
self.stem_allometry.add_cohort_data(new_data=new_stem_allometry)

Expand Down
Loading