Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 11 additions & 2 deletions pysteps/nowcasts/anvil.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from scipy.ndimage import gaussian_filter
from pysteps import cascade, extrapolation
from pysteps.nowcasts.utils import nowcast_main_loop
from pysteps.nowcasts import utils as nowcast_utils
from pysteps.timeseries import autoregression
from pysteps import utils

Expand Down Expand Up @@ -184,7 +185,7 @@ def forecast(
starttime_init = time.time()

m, n = vil.shape[1:]
vil = vil.copy()
vil = vil[-(ar_order+2) :].copy()

if rainrate is None and apply_rainrate_mask:
rainrate_mask = vil[-1, :] < 0.1
Expand Down Expand Up @@ -273,6 +274,10 @@ def worker(vil, i):
gamma[i, 0, :], gamma[i, 1, :]
)

nowcast_utils.print_corrcoefs(
np.nanmean(gamma, axis=(-2,-1))
)

# estimate the parameters of the ARI models
phi = []
for i in range(n_cascade_levels):
Expand All @@ -284,6 +289,10 @@ def worker(vil, i):
phi_ = _estimate_ar1_params(gamma[i, :])
phi.append(phi_)

nowcast_utils.print_ar_params(
np.nanmean(np.array(phi), axis=(-2,-1))
)

vil_dec = vil_dec[:, -(ar_order + 1) :, :]

if measure_time:
Expand Down Expand Up @@ -339,7 +348,7 @@ def _check_inputs(vil, rainrate, velocity, timesteps, ar_order):
"rainrate.shape = %s, but a two-dimensional array expected"
% str(rainrate.shape)
)
if vil.shape[0] != ar_order + 2:
if vil.shape[0] < ar_order + 2:
raise ValueError(
"vil.shape[0] = %d, but vil.shape[0] = ar_order + 2 = %d required"
% (vil.shape[0], ar_order + 2)
Expand Down
110 changes: 101 additions & 9 deletions pysteps/verification/detcatscores.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@

import collections
import numpy as np
from pysteps.verification.spatialscores import smooth_fields, normalize_forecast_obs


def det_cat_fct(pred, obs, thr, scores="", axis=None):
def det_cat_fct(pred, obs, thr, scores="", axis=None, return_table=False):
"""
Calculate simple and skill scores for deterministic categorical
(dichotomous) forecasts.
Expand Down Expand Up @@ -94,7 +94,7 @@ def det_cat_fct(pred, obs, thr, scores="", axis=None):

contab = det_cat_fct_init(thr, axis)
det_cat_fct_accum(contab, pred, obs)
return det_cat_fct_compute(contab, scores)
return det_cat_fct_compute(contab, scores, return_table)


def det_cat_fct_init(thr, axis=None):
Expand Down Expand Up @@ -198,11 +198,22 @@ def det_cat_fct_accum(contab, pred, obs):
obs = obs[None, :]
axis = (0,)
axis = tuple([a for a in axis if a >= 0])


# Auto-detect binary inputs: unique values subset of {0,1} or {False,True}
def is_binary(arr):
vals = np.unique(arr[~np.isnan(arr)])
return np.all(np.isin(vals, [0, 1, False, True]))

binary_input = is_binary(pred) and is_binary(obs)

# apply threshold
predb = pred > contab["thr"]
obsb = obs > contab["thr"]

if binary_input:
predb = pred.astype(bool)
obsb = obs.astype(bool)
else:
predb = pred > contab["thr"]
obsb = obs > contab["thr"]

# calculate hits, misses, false positives, correct rejects
H_idx = np.logical_and(predb == 1, obsb == 1)
F_idx = np.logical_and(predb == 1, obsb == 0)
Expand Down Expand Up @@ -263,7 +274,7 @@ def det_cat_fct_merge(contab_1, contab_2):
return contab


def det_cat_fct_compute(contab, scores=""):
def det_cat_fct_compute(contab, scores="", return_table=False):
"""
Compute simple and skill scores for deterministic categorical
(dichotomous) forecasts from a contingency table object.
Expand Down Expand Up @@ -332,8 +343,14 @@ def get_iterable(x):
M = 1.0 * contab["misses"] # false negatives
F = 1.0 * contab["false_alarms"] # false positives
R = 1.0 * contab["correct_negatives"] # true negatives

result = {}
if return_table:
result["H"] = H
result["M"] = M
result["F"] = F
result["R"] = R

for score in scores:
# catch None passed as score
if score is None:
Expand Down Expand Up @@ -401,3 +418,78 @@ def get_iterable(x):
result["F1"] = F1

return result


def det_cat_fct_spatial(pred, obs, thr, scales, scores="", axis=None, return_table=False, smooth_thr=0.01):
"""
Perform neighborhood-based categorical verification across thresholds and scales.

Parameters
----------
pred : np.ndarray
Forecast array. Shape can be:
- (n_members, n_leadtimes, y, x)
- (n_leadtimes, y, x)
- (n_members, y, x)
- (y, x)
obs : np.ndarray
Observation array. Shape can be:
- (n_leadtimes, y, x)
- (y, x)
thr : float or list of float
Threshold(s) for binarization.
scales : list of int
Smoothing window sizes.
axis : int or tuple of int, optional
Axes over which to aggregate contingency table.
scores : str or list of str, optional
Score names to compute. If empty, all supported scores are used.
smooth_thr : float
Threshold to re-binarize smoothed fields.

Returns
-------
results : dict
Dictionary of score arrays:
results[score_name] = np.ndarray of shape (n_thresholds, n_scales, n_members, n_leadtimes)
"""
thr = np.atleast_1d(thr)
scales = np.atleast_1d(scales)

# Normalize forecast and observation shapes
pred, obs = normalize_forecast_obs(pred, obs)
n_members, n_leadtimes, y, x = pred.shape
n_thresholds = len(thr)
n_scales = len(scales)

results = None # Delay initialization until first score_dict is available

# Loop over thresholds, scales, members, and lead times
for i, thr_val in enumerate(thr):
for s, scale in enumerate(scales):
for m in range(n_members):
for t in range(n_leadtimes):
fcst_bin = (pred[m, t] > thr_val).astype(float)
obs_bin = (obs[t] > thr_val).astype(float)

# Smooth and re-binarize
S_f, S_o = smooth_fields(fcst_bin, obs_bin, scale)
S_f_bin = S_f > smooth_thr
S_o_bin = S_o > smooth_thr

# Compute scores using det_cat_fct
score_dict = det_cat_fct(S_f_bin, S_o_bin, thr_val, scores, axis, return_table)

# Initialize results dictionary on first pass
if results is None:
results = {
key: np.empty((n_thresholds, n_scales, n_members, n_leadtimes))
for key in score_dict.keys()
}

# Store each score
for score_name, value in score_dict.items():
results[score_name][i, s, m, t] = value

return results

27 changes: 21 additions & 6 deletions pysteps/verification/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ def get_method(name, type="deterministic"):
+------------+--------------------------------------------------------+
| FSS | fractions skill score |
+------------+--------------------------------------------------------+
| FSS_ens | fractions skill score for ensemble forecast |
+------------+--------------------------------------------------------+
| SAL | Structure-Amplitude-Location score |
+------------+--------------------------------------------------------+

Expand Down Expand Up @@ -164,12 +166,12 @@ def get_method(name, type="deterministic"):
type = type.lower()

if type == "deterministic":
from .detcatscores import det_cat_fct
from .detcatscores import det_cat_fct, det_cat_fct_spatial
from .detcontscores import det_cont_fct
from .spatialscores import binary_mse, fss
from .spatialscores import binary_mse, fss, fss_ens
from .salscores import sal

# categorical
# categorical or spatial categorical
if name in [
"acc",
"bias",
Expand All @@ -184,12 +186,23 @@ def get_method(name, type="deterministic"):
"pod",
"sedi",
]:

def f(fct, obs, **kwargs):
return det_cat_fct(fct, obs, kwargs.pop("thr"), [name])

if "scales" in kwargs:
return det_cat_fct_spatial(
fct,
obs,
thr=kwargs.pop("thr", 0.0),
scales=kwargs.pop("scales"),
axis=kwargs.pop("axis", None),
scores=[name],
smooth_thr=kwargs.pop("smooth_thr", 0.0),
)[name.upper()]
else:
return det_cat_fct(fct, obs, kwargs.pop("thr"), [name])[name.upper()]

return f


# continuous
elif name in [
"beta",
Expand Down Expand Up @@ -217,6 +230,8 @@ def f(fct, obs, **kwargs):
return binary_mse
elif name == "fss":
return fss
elif name == "fss_ens":
return fss_ens
elif name == "sal":
return sal

Expand Down
Loading