diff --git a/.gitignore b/.gitignore index 90f4a10ed5f..686c7efa701 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,9 @@ __pycache__ .hypothesis/ # temp files from docs build +doc/*.nc doc/auto_gallery -doc/example.nc +doc/rasm.zarr doc/savefig # C extensions @@ -72,4 +73,3 @@ xarray/tests/data/*.grib.*.idx Icon* .ipynb_checkpoints -doc/rasm.zarr diff --git a/doc/conf.py b/doc/conf.py index 93174c6aaec..bc56033891a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -28,9 +28,9 @@ print("python exec:", sys.executable) print("sys.path:", sys.path) -if "conda" in sys.executable: +if "CONDA_DEFAULT_ENV" in os.environ or "conda" in sys.executable: print("conda environment:") - subprocess.run(["conda", "list"]) + subprocess.run([os.environ.get("CONDA_EXE", "conda"), "list"]) else: print("pip environment:") subprocess.run([sys.executable, "-m", "pip", "list"]) diff --git a/doc/getting-started-guide/quick-overview.rst b/doc/getting-started-guide/quick-overview.rst index 5bb5bb88ad3..bdd4e0df413 100644 --- a/doc/getting-started-guide/quick-overview.rst +++ b/doc/getting-started-guide/quick-overview.rst @@ -215,13 +215,15 @@ You can directly read and write xarray objects to disk using :py:meth:`~xarray.D .. ipython:: python ds.to_netcdf("example.nc") - xr.open_dataset("example.nc") + reopened = xr.open_dataset("example.nc") + reopened .. ipython:: python :suppress: import os + reopened.close() os.remove("example.nc") diff --git a/doc/internals/zarr-encoding-spec.rst b/doc/internals/zarr-encoding-spec.rst index 082d7984f59..6f8501a6b11 100644 --- a/doc/internals/zarr-encoding-spec.rst +++ b/doc/internals/zarr-encoding-spec.rst @@ -63,3 +63,10 @@ re-open it directly with Zarr: print(os.listdir("rasm.zarr")) print(zgroup.tree()) dict(zgroup["Tair"].attrs) + +.. ipython:: python + :suppress: + + import shutil + + shutil.rmtree("rasm.zarr") diff --git a/doc/user-guide/dask.rst b/doc/user-guide/dask.rst index 4998cc68828..ebbbaa199f6 100644 --- a/doc/user-guide/dask.rst +++ b/doc/user-guide/dask.rst @@ -55,6 +55,8 @@ argument to :py:func:`~xarray.open_dataset` or using the .. ipython:: python :suppress: + import os + import numpy as np import pandas as pd import xarray as xr @@ -129,6 +131,11 @@ will return a ``dask.delayed`` object that can be computed later. with ProgressBar(): results = delayed_obj.compute() +.. ipython:: python + :suppress: + + os.remove("manipulated-example-data.nc") # Was not opened. + .. note:: When using Dask's distributed scheduler to write NETCDF4 files, @@ -147,13 +154,6 @@ A dataset can also be converted to a Dask DataFrame using :py:meth:`~xarray.Data Dask DataFrames do not support multi-indexes so the coordinate variables from the dataset are included as columns in the Dask DataFrame. -.. ipython:: python - :suppress: - - import os - - os.remove("example-data.nc") - os.remove("manipulated-example-data.nc") Using Dask with xarray ---------------------- @@ -210,7 +210,7 @@ Dask arrays using the :py:meth:`~xarray.Dataset.persist` method: .. ipython:: python - ds = ds.persist() + persisted = ds.persist() :py:meth:`~xarray.Dataset.persist` is particularly useful when using a distributed cluster because the data will be loaded into distributed memory @@ -232,11 +232,6 @@ chunk size depends both on your data and on the operations you want to perform. With xarray, both converting data to a Dask arrays and converting the chunk sizes of Dask arrays is done with the :py:meth:`~xarray.Dataset.chunk` method: -.. ipython:: python - :suppress: - - ds = ds.chunk({"time": 10}) - .. ipython:: python rechunked = ds.chunk({"latitude": 100, "longitude": 100}) @@ -508,6 +503,11 @@ Notice that the 0-shaped sizes were not printed to screen. Since ``template`` ha expected = ds + 10 + 10 mapped.identical(expected) +.. ipython:: python + :suppress: + + ds.close() # Closes "example-data.nc". + os.remove("example-data.nc") .. tip:: diff --git a/doc/user-guide/io.rst b/doc/user-guide/io.rst index 16b8708231e..713b5d7382b 100644 --- a/doc/user-guide/io.rst +++ b/doc/user-guide/io.rst @@ -11,6 +11,8 @@ format (recommended). .. ipython:: python :suppress: + import os + import numpy as np import pandas as pd import xarray as xr @@ -84,6 +86,13 @@ We can load netCDF files to create a new Dataset using ds_disk = xr.open_dataset("saved_on_disk.nc") ds_disk +.. ipython:: python + :suppress: + + # Close "saved_on_disk.nc", but retain the file until after closing or deleting other + # datasets that will refer to it. + ds_disk.close() + Similarly, a DataArray can be saved to disk using the :py:meth:`DataArray.to_netcdf` method, and loaded from disk using the :py:func:`open_dataarray` function. As netCDF files @@ -204,11 +213,6 @@ You can view this encoding information (among others) in the Note that all operations that manipulate variables other than indexing will remove encoding information. -.. ipython:: python - :suppress: - - ds_disk.close() - .. _combining multiple files: @@ -484,13 +488,13 @@ and currently raises a warning unless ``invalid_netcdf=True`` is set: da.to_netcdf("complex.nc", engine="h5netcdf", invalid_netcdf=True) # Reading it back - xr.open_dataarray("complex.nc", engine="h5netcdf") + reopened = xr.open_dataarray("complex.nc", engine="h5netcdf") + reopened .. ipython:: python :suppress: - import os - + reopened.close() os.remove("complex.nc") .. warning:: @@ -723,16 +727,19 @@ To export just the dataset schema without the data itself, use the ds.to_dict(data=False) -This can be useful for generating indices of dataset contents to expose to -search indices or other automated data discovery tools. - .. ipython:: python :suppress: - import os - + # We're now done with the dataset named `ds`. Although the `with` statement closed + # the dataset, displaying the unpickled pickle of `ds` re-opened "saved_on_disk.nc". + # However, `ds` (rather than the unpickled dataset) refers to the open file. Delete + # `ds` to close the file. + del ds os.remove("saved_on_disk.nc") +This can be useful for generating indices of dataset contents to expose to +search indices or other automated data discovery tools. + .. _io.rasterio: Rasterio diff --git a/doc/user-guide/weather-climate.rst b/doc/user-guide/weather-climate.rst index 893e7b50429..6db0396ee4e 100644 --- a/doc/user-guide/weather-climate.rst +++ b/doc/user-guide/weather-climate.rst @@ -218,13 +218,15 @@ For data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports: .. ipython:: python da.to_netcdf("example-no-leap.nc") - xr.open_dataset("example-no-leap.nc") + reopened = xr.open_dataset("example-no-leap.nc") + reopened .. ipython:: python :suppress: import os + reopened.close() os.remove("example-no-leap.nc") - And resampling along the time dimension for data indexed by a :py:class:`~xarray.CFTimeIndex`: diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 93271cca3e4..f7249f192bd 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -38,6 +38,9 @@ Bug fixes Documentation ~~~~~~~~~~~~~ +- Delete files of datasets saved to disk while building the documentation and enable + building on Windows via `sphinx-build` (:pull:`6237`). + By `Stan West `_. Internal Changes ~~~~~~~~~~~~~~~~