Skip to content

Backport PR #45212 on branch 1.4.x (CLN: suppress warnings) #45269

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v1.4.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,7 @@ Indexing

Missing
^^^^^^^
- Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`)
- Bug in :meth:`DataFrame.fillna` with limit and no method ignores axis='columns' or ``axis = 1`` (:issue:`40989`, :issue:`17399`)
- Bug in :meth:`DataFrame.fillna` not replacing missing values when using a dict-like ``value`` and duplicate column names (:issue:`43476`)
- Bug in constructing a :class:`DataFrame` with a dictionary ``np.datetime64`` as a value and ``dtype='timedelta64[ns]'``, or vice-versa, incorrectly casting instead of raising (:issue:`44428`)
- Bug in :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` with ``inplace=True`` not writing to the underlying array(s) in-place (:issue:`44749`)
Expand Down
4 changes: 2 additions & 2 deletions pandas/_libs/lib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ cpdef ndarray[object] ensure_string_array(
return out

arr = arr.to_numpy()
elif not isinstance(arr, np.ndarray):
elif not util.is_array(arr):
arr = np.array(arr, dtype="object")

result = np.asarray(arr, dtype="object")
Expand All @@ -729,7 +729,7 @@ cpdef ndarray[object] ensure_string_array(
continue

if not checknull(val):
if not isinstance(val, np.floating):
if not util.is_float_object(val):
# f"{val}" is faster than str(val)
result[i] = f"{val}"
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -767,7 +767,7 @@ def _quantile(
We assume that all impacted cases are 1D-only.
"""
mask = np.atleast_2d(np.asarray(self.isna()))
npvalues = np.atleast_2d(np.asarray(self))
npvalues: np.ndarray = np.atleast_2d(np.asarray(self))

res = quantile_with_mask(
npvalues,
Expand Down
20 changes: 10 additions & 10 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
# TODO: ExtensionDtype.can_hold_na?
return dtype
elif dtype.kind == "b":
return np.dtype(object)
return _dtype_obj
elif dtype.kind in ["i", "u"]:
return np.dtype(np.float64)
return dtype
Expand Down Expand Up @@ -522,7 +522,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
# with object dtype there is nothing to promote, and the user can
# pass pretty much any weird fill_value they like
raise ValueError("fill_value must be a scalar")
dtype = np.dtype(object)
dtype = _dtype_obj
return dtype, fill_value

kinds = ["i", "u", "f", "c", "m", "M"]
Expand All @@ -532,7 +532,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
return dtype, fv

elif isna(fill_value):
dtype = np.dtype(object)
dtype = _dtype_obj
if fill_value is None:
# but we retain e.g. pd.NA
fill_value = np.nan
Expand All @@ -551,7 +551,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
# fv = dta._validate_setitem_value(fill_value)
# return dta.dtype, fv
# except (ValueError, TypeError):
# return np.dtype(object), fill_value
# return _dtype_obj, fill_value
if isinstance(fill_value, date) and not isinstance(fill_value, datetime):
# deprecate casting of date object to match infer_dtype_from_scalar
# and DatetimeArray._validate_setitem_value
Expand Down Expand Up @@ -699,7 +699,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj,
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype: DtypeObj = np.dtype(object)
dtype: DtypeObj = _dtype_obj

# a 1-element ndarray
if isinstance(val, np.ndarray):
Expand All @@ -718,13 +718,13 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj,
# instead of np.empty (but then you still don't want things
# coming out as np.str_!

dtype = np.dtype(object)
dtype = _dtype_obj

elif isinstance(val, (np.datetime64, datetime)):
try:
val = Timestamp(val)
except OutOfBoundsDatetime:
return np.dtype(object), val
return _dtype_obj, val

# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
Expand All @@ -736,13 +736,13 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> tuple[DtypeObj,
dtype = DatetimeTZDtype(unit="ns", tz=val.tz)
else:
# return datetimetz as object
return np.dtype(object), val
return _dtype_obj, val

elif isinstance(val, (np.timedelta64, timedelta)):
try:
val = Timedelta(val)
except (OutOfBoundsTimedelta, OverflowError):
dtype = np.dtype(object)
dtype = _dtype_obj
else:
dtype = np.dtype("m8[ns]")
val = np.timedelta64(val.value, "ns")
Expand Down Expand Up @@ -1911,7 +1911,7 @@ def construct_1d_arraylike_from_scalar(
try:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
except OutOfBoundsDatetime:
dtype = np.dtype(object)
dtype = _dtype_obj

if isinstance(dtype, ExtensionDtype):
cls = dtype.construct_array_type()
Expand Down
2 changes: 2 additions & 0 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import (
TYPE_CHECKING,
Hashable,
Sequence,
)
import warnings

Expand Down Expand Up @@ -2016,6 +2017,7 @@ def _ensure_iterable_column_indexer(self, column_indexer):
"""
Ensure that our column indexer is something that can be iterated over.
"""
ilocs: Sequence[int]
if is_integer(column_indexer):
ilocs = [column_indexer]
elif isinstance(column_indexer, slice):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/array_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1203,7 +1203,7 @@ def make_empty(self, axes=None) -> SingleArrayManager:
"""Return an empty ArrayManager with index/array of length 0"""
if axes is None:
axes = [Index([], dtype=object)]
array = np.array([], dtype=self.dtype)
array: np.ndarray = np.array([], dtype=self.dtype)
return type(self)([array], axes)

@classmethod
Expand Down
6 changes: 2 additions & 4 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1090,14 +1090,12 @@ def iset(
# containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))

# Check if we can use _iset_single fastpath
loc = cast(int, loc)
blkno = self.blknos[loc]
blk = self.blocks[blkno]
if len(blk._mgr_locs) == 1: # TODO: fastest way to check this?
return self._iset_single(
# error: Argument 1 to "_iset_single" of "BlockManager" has
# incompatible type "Union[int, slice, ndarray[Any, Any]]";
# expected "int"
loc, # type:ignore[arg-type]
loc,
value,
inplace=inplace,
blkno=blkno,
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -1025,7 +1025,7 @@ def get_empty_frame(data) -> DataFrame:
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
index = Index(range(len(data)))
return DataFrame(index=index)

# if all NaN
Expand All @@ -1035,7 +1035,7 @@ def get_empty_frame(data) -> DataFrame:
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
levels = levels.insert(len(levels), np.nan)

# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
Expand Down
5 changes: 5 additions & 0 deletions pandas/tests/frame/test_ufunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,11 @@ def test_alignment_deprecation_many_inputs(request):
)
request.node.add_marker(mark)

mark = pytest.mark.filterwarnings(
"ignore:`np.MachAr` is deprecated.*:DeprecationWarning"
)
request.node.add_marker(mark)

@vectorize([float64(float64, float64, float64)])
def my_ufunc(x, y, z):
return x + y + z
Expand Down
3 changes: 3 additions & 0 deletions pandas/tests/io/test_parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@
with catch_warnings():
# `np.bool` is a deprecated alias...
filterwarnings("ignore", "`np.bool`", category=DeprecationWarning)
# accessing pd.Int64Index in pd namespace
filterwarnings("ignore", ".*Int64Index.*", category=FutureWarning)

import fastparquet

_HAVE_FASTPARQUET = True
Expand Down
3 changes: 2 additions & 1 deletion pandas/tests/plotting/test_datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
Index,
NaT,
Series,
concat,
isna,
to_datetime,
)
Expand Down Expand Up @@ -244,7 +245,7 @@ def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range("2001-1-1", "2001-1-10")
ts = Series(range(len(rng)), index=rng)
ts = ts[:3].append(ts[5:])
ts = concat([ts[:3], ts[5:]])
ts.plot(ax=ax)
assert not hasattr(ax, "freq")

Expand Down
4 changes: 4 additions & 0 deletions pandas/tests/test_downstream.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,10 @@ def test_oo_optimized_datetime_index_unpickle():
# patsy needs to update their imports
"ignore:Using or importing the ABCs from 'collections:DeprecationWarning"
)
@pytest.mark.filterwarnings(
# numpy 1.22
"ignore:`np.MachAr` is deprecated.*:DeprecationWarning"
)
def test_statsmodels():

statsmodels = import_module("statsmodels") # noqa:F841
Expand Down
8 changes: 8 additions & 0 deletions pandas/util/_test_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,14 @@ def safe_import(mod_name: str, min_version: str | None = None):
message=".*decorator is deprecated since Python 3.8.*",
)

# fastparquet import accesses pd.Int64Index
warnings.filterwarnings(
"ignore",
category=FutureWarning,
module="fastparquet",
message=".*Int64Index.*",
)

try:
mod = __import__(mod_name)
except ImportError:
Expand Down