Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
116 changes: 3 additions & 113 deletions pysteps/io/nowcast_importers.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@

from pysteps.decorators import postprocess_import
from pysteps.exceptions import MissingOptionalDependency, DataModelError
import xarray as xr

try:
import netCDF4
Expand All @@ -81,7 +82,6 @@
NETCDF4_IMPORTED = False


@postprocess_import(dtype="single")
def import_netcdf_pysteps(filename, onerror="warn", **kwargs):
"""
Read a nowcast or an ensemble of nowcasts from a NetCDF file conforming
Expand Down Expand Up @@ -119,122 +119,12 @@ def import_netcdf_pysteps(filename, onerror="warn", **kwargs):
onerror = onerror.lower()
if onerror not in ["warn", "raise"]:
raise ValueError("'onerror' keyword must be 'warn' or 'raise'.")

try:
ds = netCDF4.Dataset(filename, "r")

var_names = list(ds.variables.keys())

if "precip_intensity" in var_names:
precip = ds.variables["precip_intensity"]
unit = "mm/h"
accutime = None
transform = None
elif "precip_accum" in var_names:
precip = ds.variables["precip_accum"]
unit = "mm"
accutime = None
transform = None
elif "hourly_precip_accum" in var_names:
precip = ds.variables["hourly_precip_accum"]
unit = "mm"
accutime = 60.0
transform = None
elif "reflectivity" in var_names:
precip = ds.variables["reflectivity"]
unit = "dBZ"
accutime = None
transform = "dB"
else:
raise DataModelError(
"Non CF compilant file: "
"the netCDF file does not contain any "
"supported variable name.\n"
"Supported names: 'precip_intensity', 'hourly_precip_accum', "
"or 'reflectivity'\n"
"file: " + filename
)

precip = precip[...].squeeze().astype(float)

if isinstance(precip, np.ma.MaskedArray):
invalid_mask = np.ma.getmaskarray(precip)
precip = precip.data
precip[invalid_mask] = np.nan

metadata = {}

time_var = ds.variables["time"]
leadtimes = time_var[:] / 60.0 # minutes leadtime
metadata["leadtimes"] = leadtimes
timestamps = netCDF4.num2date(time_var[:], time_var.units)
metadata["timestamps"] = timestamps

if "polar_stereographic" in var_names:
vn = "polar_stereographic"

attr_dict = {}
for attr_name in ds.variables[vn].ncattrs():
attr_dict[attr_name] = ds[vn].getncattr(attr_name)

proj_str = _convert_grid_mapping_to_proj4(attr_dict)
metadata["projection"] = proj_str

# geodata
metadata["xpixelsize"] = abs(ds.variables["x"][1] - ds.variables["x"][0])
metadata["ypixelsize"] = abs(ds.variables["y"][1] - ds.variables["y"][0])

xmin = np.min(ds.variables["x"]) - 0.5 * metadata["xpixelsize"]
xmax = np.max(ds.variables["x"]) + 0.5 * metadata["xpixelsize"]
ymin = np.min(ds.variables["y"]) - 0.5 * metadata["ypixelsize"]
ymax = np.max(ds.variables["y"]) + 0.5 * metadata["ypixelsize"]

# TODO: this is only a quick solution
metadata["x1"] = xmin
metadata["y1"] = ymin
metadata["x2"] = xmax
metadata["y2"] = ymax

metadata["yorigin"] = "upper" # TODO: check this

# TODO: Read the metadata to the dictionary.
if (accutime is None) and (leadtimes.size > 1):
accutime = leadtimes[1] - leadtimes[0]
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = transform
metadata["zerovalue"] = np.nanmin(precip)
if metadata["zerovalue"] == np.nanmax(precip):
metadata["threshold"] = metadata["zerovalue"]
else:
metadata["threshold"] = np.nanmin(precip[precip > metadata["zerovalue"]])

ds.close()

return precip, metadata
dataset = xr.open_dataset(filename)
return dataset
except Exception as er:
if onerror == "warn":
print("There was an error processing the file", er)
return None, None
else:
raise er


def _convert_grid_mapping_to_proj4(grid_mapping):
gm_keys = list(grid_mapping.keys())

# TODO: implement more projection types here
if grid_mapping["grid_mapping_name"] == "polar_stereographic":
proj_str = "+proj=stere"
proj_str += " +lon_0=%s" % grid_mapping["straight_vertical_longitude_from_pole"]
proj_str += " +lat_0=%s" % grid_mapping["latitude_of_projection_origin"]
if "standard_parallel" in gm_keys:
proj_str += " +lat_ts=%s" % grid_mapping["standard_parallel"]
if "scale_factor_at_projection_origin" in gm_keys:
proj_str += " +k_0=%s" % grid_mapping["scale_factor_at_projection_origin"]
proj_str += " +x_0=%s" % grid_mapping["false_easting"]
proj_str += " +y_0=%s" % grid_mapping["false_northing"]

return proj_str
else:
return None
2 changes: 1 addition & 1 deletion pysteps/pystepsrc
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
"fmi_geotiff": {
"root_path": "./radar/fmi/geotiff",
"path_fmt": "%Y%m%d",
"fn_pattern": "%Y%m%d%H%M_FINUTM.tif",
"fn_pattern": "%Y%m%d%H%M_FINUTM",
"fn_ext": "tif",
"importer": "geotiff",
"timestep": 5,
Expand Down
20 changes: 18 additions & 2 deletions pysteps/tests/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@
_reference_dates = dict()
_reference_dates["bom"] = datetime(2018, 6, 16, 10, 0)
_reference_dates["fmi"] = datetime(2016, 9, 28, 16, 0)
_reference_dates["fmi_geotiff"] = datetime(2016, 9, 28, 16, 0)
_reference_dates["knmi"] = datetime(2010, 8, 26, 0, 0)
_reference_dates["mch"] = datetime(2015, 5, 15, 16, 30)
_reference_dates["dwd"] = datetime(2025, 6, 4, 17, 0)
_reference_dates["opera"] = datetime(2018, 8, 24, 18, 0)
_reference_dates["saf"] = datetime(2018, 6, 1, 7, 0)
_reference_dates["mrms"] = datetime(2019, 6, 10, 0, 0)
_reference_dates["rmi"] = datetime(2021, 7, 4, 18, 5)


def assert_dataset_equivalent(dataset1: xr.Dataset, dataset2: xr.Dataset) -> None:
Expand Down Expand Up @@ -65,11 +67,14 @@ def get_precipitation_fields(
Get a precipitation field from the archive to be used as reference.

Source: bom
Reference time: 2018/06/16 10000 UTC
Reference time: 2018/06/16 1000 UTC

Source: fmi
Reference time: 2016/09/28 1600 UTC

Source: fmi_geotiff
Reference time: 2016/09/28 1600 UTC

Source: knmi
Reference time: 2010/08/26 0000 UTC

Expand All @@ -88,6 +93,9 @@ def get_precipitation_fields(
Source: mrms
Reference time: 2019/06/10 0000 UTC

Source: rmi
Reference time: 2021/07/04 1805 UTC

Parameters
----------

Expand All @@ -110,7 +118,7 @@ def get_precipitation_fields(
If it is a float, represents the length of the space window that is
used to upscale the fields.

source: {"bom", "fmi" , "knmi", "mch", "opera", "saf", "mrms"}, optional
source: {"bom", "fmi" , "fmi_geotiff", "knmi", "mch", "dwd", "opera", "saf", "mrms", "rmi"}, optional
Name of the data source to be used.

log_transform: bool
Expand Down Expand Up @@ -138,6 +146,10 @@ def get_precipitation_fields(
if source == "fmi":
pytest.importorskip("pyproj")

if source == "fmi_geotiff":
pytest.importorskip("pyproj")
pytest.importorskip("osgeo")

if source == "knmi":
pytest.importorskip("h5py")

Expand All @@ -156,6 +168,10 @@ def get_precipitation_fields(
if source == "mrms":
pytest.importorskip("pygrib")

if source == "rmi":
pytest.importorskip("rasterio")
pytest.importorskip("pyproj")

try:
date = _reference_dates[source]
except KeyError:
Expand Down
7 changes: 6 additions & 1 deletion pysteps/tests/test_ensscores.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,14 @@
from pysteps.tests.helpers import get_precipitation_fields
from pysteps.verification import ensscores

precip = get_precipitation_fields(num_next_files=10, return_raw=True)
precip_dataset = get_precipitation_fields(num_next_files=10, return_raw=True)
np.random.seed(42)

# XR: extract values from array because score functions are not xarray compatible
precip_var = precip_dataset.attrs["precip_var"]
precip = precip_dataset[precip_var].values


# rankhist
test_data = [
(precip[:10], precip[-1], None, True, 11),
Expand Down
58 changes: 31 additions & 27 deletions pysteps/tests/test_io_dwd_hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,26 @@

import pytest

import pysteps
from pysteps.tests.helpers import smart_assert, get_precipitation_fields

pytest.importorskip("h5py")

# Test for RADOLAN RY product

precip_ry, metadata_ry = get_precipitation_fields(
precip_dataset = get_precipitation_fields(
num_prev_files=0,
num_next_files=0,
return_raw=False,
return_raw=True,
metadata=True,
source="dwd",
log_transform=False,
importer_kwargs=dict(qty="RATE"),
)

precip_var = precip_dataset.attrs["precip_var"]
precip_dataarray = precip_dataset[precip_var]


def test_io_import_dwd_hdf5_ry_shape():
"""Test the importer DWD HDF5."""
assert precip_ry.shape == (1200, 1100)
assert precip_dataarray.shape == (1, 1200, 1100)


# Test_metadata
Expand All @@ -36,29 +35,34 @@ def test_io_import_dwd_hdf5_ry_shape():

# List of (variable,expected,tolerance) tuples
test_ry_attrs = [
("projection", expected_proj, None),
("ll_lon", 3.566994635, 1e-10),
("ll_lat", 45.69642538, 1e-10),
("ur_lon", 18.73161645, 1e-10),
("ur_lat", 55.84543856, 1e-10),
("x1", -500.0, 1e-6),
("y1", -1199500.0, 1e-6),
("x2", 1099500.0, 1e-6),
("y2", 500.0, 1e-6),
("xpixelsize", 1000.0, 1e-10),
("xpixelsize", 1000.0, 1e-10),
("cartesian_unit", "m", None),
("yorigin", "upper", None),
("institution", "ORG:78,CTY:616,CMT:Deutscher Wetterdienst [email protected]", None),
("accutime", 5.0, 1e-10),
("unit", "mm/h", None),
("transform", None, None),
("zerovalue", 0.0, 1e-6),
("threshold", 0.12, 1e-6),
(precip_dataset.attrs["projection"], expected_proj, None),
(float(precip_dataset.lon.isel(x=0, y=0).values), 3.57220017, 1e-8),
(float(precip_dataset.lat.isel(x=0, y=0).values), 45.70099971, 1e-8),
(float(precip_dataset.lon.isel(x=-1, y=-1).values), 18.72270377, 1e-8),
(float(precip_dataset.lat.isel(x=-1, y=-1).values), 55.84175857, 1e-8),
(precip_dataset.x.isel(x=0).values, 0.0, 1e-3),
(precip_dataset.y.isel(y=0).values, -1199000.0, 1e-6),
(precip_dataset.x.isel(x=-1).values, 1099000.0, 1e-6),
(precip_dataset.y.isel(y=-1).values, 0.0, 1e-3),
(precip_dataset.x.attrs["stepsize"], 1000.0, 1e-10),
(precip_dataset.y.attrs["stepsize"], 1000.0, 1e-10),
(precip_dataset.x.attrs["units"], "m", None),
(precip_dataset.y.attrs["units"], "m", None),
(
precip_dataset.attrs["institution"],
"ORG:78,CTY:616,CMT:Deutscher Wetterdienst [email protected]",
None,
),
(precip_dataarray.attrs["accutime"], 5.0, 1e-10),
(precip_dataset.time.attrs["stepsize"], 5.0, 1e-10),
(precip_dataarray.attrs["units"], "mm/h", None),
(precip_dataarray.attrs["transform"], None, None),
(precip_dataarray.attrs["zerovalue"], 0.0, 1e-6),
(precip_dataarray.attrs["threshold"], 0.12, 1e-6),
]


@pytest.mark.parametrize("variable, expected, tolerance", test_ry_attrs)
def test_io_import_dwd_hdf5_ry_metadata(variable, expected, tolerance):
"""Test the importer OPERA HDF5."""
smart_assert(metadata_ry[variable], expected, tolerance)
smart_assert(variable, expected, tolerance)
47 changes: 24 additions & 23 deletions pysteps/tests/test_io_fmi_geotiff.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
import os
# -*- coding: utf-8 -*-

import pytest

import pysteps
from pysteps.tests.helpers import smart_assert
from pysteps.tests.helpers import smart_assert, get_precipitation_fields

pytest.importorskip("pyproj")
pytest.importorskip("osgeo")

root_path = pysteps.rcparams.data_sources["fmi_geotiff"]["root_path"]
filename = os.path.join(
root_path,
"20160928",
"201609281600_FINUTM.tif",
precip_dataset = get_precipitation_fields(
num_prev_files=0,
num_next_files=0,
return_raw=True,
metadata=True,
source="fmi_geotiff",
log_transform=False,
)
precip, _, metadata = pysteps.io.import_fmi_geotiff(filename)

precip_var = precip_dataset.attrs["precip_var"]
precip_dataarray = precip_dataset[precip_var]


def test_io_import_fmi_geotiff_shape():
"""Test the shape of the read file."""
assert precip.shape == (7316, 4963)
assert precip_dataarray.shape == (1, 7316, 4963)


expected_proj = (
Expand All @@ -28,19 +28,20 @@ def test_io_import_fmi_geotiff_shape():

# test_geodata: list of (variable,expected,tolerance) tuples
test_geodata = [
("projection", expected_proj, None),
("x1", -196593.0043142295908183, 1e-10),
("x2", 1044176.9413554778, 1e-10),
("y1", 6255329.6988206729292870, 1e-10),
("y2", 8084432.005259146, 1e-10),
("xpixelsize", 250.0040188736061566, 1e-6),
("ypixelsize", 250.0139839309011904, 1e-6),
("cartesian_unit", "m", None),
("yorigin", "upper", None),
(precip_dataset.attrs["projection"], expected_proj, None),
(precip_dataset.attrs["institution"], "Finnish Meteorological Institute", None),
(precip_dataset.x.isel(x=0).values, -196468.00230479, 1e-10),
(precip_dataset.y.isel(y=0).values, 6255454.70581264, 1e-10),
(precip_dataset.x.isel(x=-1).values, 1044051.93934604, 1e-10),
(precip_dataset.y.isel(y=-1).values, 8084306.99826718, 1e-10),
(precip_dataset.x.attrs["stepsize"], 250.0040188736061566, 1e-10),
(precip_dataset.y.attrs["stepsize"], 250.0139839309011904, 1e-10),
(precip_dataset.x.attrs["units"], "m", None),
(precip_dataset.y.attrs["units"], "m", None),
]


@pytest.mark.parametrize("variable, expected, tolerance", test_geodata)
def test_io_import_fmi_pgm_geodata(variable, expected, tolerance):
"""Test the GeoTIFF and metadata reading."""
smart_assert(metadata[variable], expected, tolerance)
smart_assert(variable, expected, tolerance)
Loading