Skip to content

Deprecate old pandas support #1530

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Aug 31, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,6 @@ before_install:
install:
- conda env create --file ci/requirements-$CONDA_ENV.yml
- source activate test_env
# scipy should not have been installed, but it's included in older versions of
# the conda pandas package
- if [[ "$CONDA_ENV" == "py27-min" ]]; then
conda remove scipy;
fi
- python setup.py install

script:
Expand Down
4 changes: 2 additions & 2 deletions ci/requirements-py27-min.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ name: test_env
dependencies:
- python=2.7
- pytest
- numpy==1.9.3
- pandas==0.15.0
- numpy==1.11
- pandas==0.18.0
- pip:
- coveralls
- pytest-cov
4 changes: 2 additions & 2 deletions doc/installing.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ Required dependencies
---------------------

- Python 2.7, 3.4, 3.5, or 3.6
- `numpy <http://www.numpy.org/>`__ (1.7 or later)
- `pandas <http://pandas.pydata.org/>`__ (0.15.0 or later)
- `numpy <http://www.numpy.org/>`__ (1.11 or later)
- `pandas <http://pandas.pydata.org/>`__ (0.18.0 or later)

Optional dependencies
---------------------
Expand Down
7 changes: 7 additions & 0 deletions doc/whats-new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ What's New
v0.9.7 (unreleased)
-------------------

Backward Incompatible Changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

- Old numpy < 1.11 and pandas < 0.18 are no longer supported (:issue:`1512`).
By `Keisuke Fujii <https://github.com/fujiisoup>`_.


Enhancements
~~~~~~~~~~~~

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
'Topic :: Scientific/Engineering',
]

INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.15.0']
INSTALL_REQUIRES = ['numpy >= 1.11', 'pandas >= 0.18.0']
TESTS_REQUIRE = ['pytest >= 2.7.1']

DESCRIPTION = "N-D labeled arrays and datasets in Python"
Expand Down
7 changes: 3 additions & 4 deletions xarray/core/duck_array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,10 @@ def isnull(data):
where = _dask_or_eager_func('where', n_array_args=3)
insert = _dask_or_eager_func('insert')
take = _dask_or_eager_func('take')
broadcast_to = _dask_or_eager_func('broadcast_to', npcompat)
broadcast_to = _dask_or_eager_func('broadcast_to')

concatenate = _dask_or_eager_func('concatenate', list_of_args=True)
stack = _dask_or_eager_func('stack', npcompat, list_of_args=True)
stack = _dask_or_eager_func('stack', list_of_args=True)

array_all = _dask_or_eager_func('all')
array_any = _dask_or_eager_func('any')
Expand Down Expand Up @@ -232,8 +232,7 @@ def f(values, axis=None, skipna=None, **kwargs):
std = _create_nan_agg_method('std', numeric_only=True)
var = _create_nan_agg_method('var', numeric_only=True)
median = _create_nan_agg_method('median', numeric_only=True)
prod = _create_nan_agg_method('prod', numeric_only=True, np_compat=True,
no_bottleneck=True)
prod = _create_nan_agg_method('prod', numeric_only=True, no_bottleneck=True)
cumprod = _create_nan_agg_method('cumprod', numeric_only=True, np_compat=True,
no_bottleneck=True, keep_dims=True)
cumsum = _create_nan_agg_method('cumsum', numeric_only=True, np_compat=True,
Expand Down
201 changes: 2 additions & 199 deletions xarray/core/npcompat.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,139 +4,11 @@
import numpy as np

try:
from numpy import broadcast_to, stack, nanprod, nancumsum, nancumprod
from numpy import nancumsum, nancumprod
except ImportError: # pragma: no cover
# Code copied from newer versions of NumPy (v1.10 to v1.12).
# Code copied from newer versions of NumPy (v1.12).
# Used under the terms of NumPy's license, see licenses/NUMPY_LICENSE.

def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array

def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'zerosize_ok', 'refs_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result

def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.

Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).

Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.

Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.

Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)

def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.

.. versionadded:: 1.10.0

Parameters
----------
arrays : sequence of ndarrays
Each array must have the same shape.
axis : int, optional
The axis along which the arrays will be stacked.

Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.

Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)

>>> np.stack(arrays, axis=1).shape
(3, 10, 4)

>>> np.stack(arrays, axis=2).shape
(3, 4, 10)

>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])

>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [np.asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')

shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')

result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim

sl = (slice(None),) * axis + (np.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return np.concatenate(expanded_arrays, axis=axis)

def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
Expand Down Expand Up @@ -178,75 +50,6 @@ def _replace_nan(a, val):
np.copyto(a, val, where=mask)
return a, mask

def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as zero.

One is returned for slices that are all-NaN or empty.

.. versionadded:: 1.10.0

Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
elements are summed. By default, the dtype of `a` is used. An
exception is when `a` has an integer type with less precision than
the platform (u)intp. In that case, the default will be either
(u)int32 or (u)int64 depending on whether the platform is 32 or 64
bits. For inexact inputs, dtype must be inexact.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``. If provided, it must have the same shape as the
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details. The casting of NaN to integer can yield
unexpected results.
keepdims : bool, optional
If True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the original `arr`.

Returns
-------
y : ndarray or numpy scalar

See Also
--------
numpy.prod : Product across array propagating NaNs.
isnan : Show which elements are NaN.

Notes
-----
Numpy integer arithmetic is modular. If the size of a product exceeds
the size of an integer accumulator, its value will wrap around and the
result will be incorrect. Specifying ``dtype=double`` can alleviate
that problem.

Examples
--------
>>> np.nanprod(1)
1
>>> np.nanprod([1])
1
>>> np.nanprod([1, np.nan])
1.0
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
array([ 3., 2.])

"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)

def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating
Expand Down
4 changes: 2 additions & 2 deletions xarray/tests/test_computation.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def test_apply_output_core_dimension():

def stack_negative(obj):
def func(x):
return xr.core.npcompat.stack([x, -x], axis=-1)
return np.stack([x, -x], axis=-1)
result = apply_ufunc(func, obj, output_core_dims=[['sign']])
if isinstance(result, (xr.Dataset, xr.DataArray)):
result.coords['sign'] = [1, -1]
Expand Down Expand Up @@ -303,7 +303,7 @@ def func(x):

def original_and_stack_negative(obj):
def func(x):
return (x, xr.core.npcompat.stack([x, -x], axis=-1))
return (x, np.stack([x, -x], axis=-1))
result = apply_ufunc(func, obj, output_core_dims=[[], ['sign']])
if isinstance(result[1], (xr.Dataset, xr.DataArray)):
result[1].coords['sign'] = [1, -1]
Expand Down
2 changes: 0 additions & 2 deletions xarray/tests/test_dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -1442,8 +1442,6 @@ def test_reduce(self):
expected = DataArray(5, {'c': -999})
self.assertDataArrayIdentical(expected, actual)

@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
reason='requires numpy version 1.10.0 or later')
# skip due to bug in older versions of numpy.nanpercentile
def test_quantile(self):
for q in [0.25, [0.50], [0.25, 0.75]]:
Expand Down
2 changes: 0 additions & 2 deletions xarray/tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -2923,8 +2923,6 @@ def mean_only_one_axis(x, axis):
with self.assertRaisesRegexp(TypeError, 'non-integer axis'):
ds.reduce(mean_only_one_axis, ['x', 'y'])

@pytest.mark.skipif(LooseVersion(np.__version__) < LooseVersion('1.10.0'),
reason='requires numpy version 1.10.0 or later')
def test_quantile(self):

ds = create_test_data(seed=123)
Expand Down