Skip to content

TST: fix FIXMEs #44151

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Oct 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -978,7 +978,7 @@ def astype_dt64_to_dt64tz(
stacklevel=level,
)

# FIXME: GH#33401 this doesn't match DatetimeArray.astype, which
# GH#33401 this doesn't match DatetimeArray.astype, which
# goes through the `not via_utc` path
return values.tz_localize("UTC").tz_convert(dtype.tz)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,7 @@ def fillna(self, value, downcast=None):
cat = self._data.fillna(value)
except (ValueError, TypeError):
# invalid fill_value
if not self.isna().any():
if not self.hasnans:
# nothing to fill, we can get away without casting
return self.copy()
return self.astype(object).fillna(value, downcast=downcast)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/tools/timedeltas.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def to_timedelta(arg, unit=None, errors="raise"):

def _coerce_scalar_to_timedelta_type(r, unit="ns", errors="raise"):
"""Convert string 'r' to a timedelta object."""
result: Timedelta | NaTType # TODO: alias?
result: Timedelta | NaTType

try:
result = Timedelta(r, unit)
Expand Down
6 changes: 2 additions & 4 deletions pandas/tests/arrays/boolean/test_logical.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,8 @@ def test_empty_ok(self, all_logical_operators):
result = getattr(a, op_name)(False)
tm.assert_extension_array_equal(a, result)

# FIXME: dont leave commented-out
# TODO: pd.NA
# result = getattr(a, op_name)(pd.NA)
# tm.assert_extension_array_equal(a, result)
result = getattr(a, op_name)(pd.NA)
tm.assert_extension_array_equal(a, result)

def test_logical_length_mismatch_raises(self, all_logical_operators):
op_name = all_logical_operators
Expand Down
11 changes: 7 additions & 4 deletions pandas/tests/extension/base/casting.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
import pytest

from pandas.compat import np_version_under1p20
import pandas.util._test_decorators as td

import pandas as pd
Expand Down Expand Up @@ -30,10 +31,12 @@ def test_astype_object_frame(self, all_data):
assert isinstance(result._mgr.arrays[0], np.ndarray)
assert result._mgr.arrays[0].dtype == np.dtype(object)

# FIXME: these currently fail; dont leave commented-out
# check that we can compare the dtypes
# cmp = result.dtypes.equals(df.dtypes)
# assert not cmp.any()
# earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
# instead of returning False
if not np_version_under1p20:
# check that we can compare the dtypes
comp = result.dtypes == df.dtypes
assert not comp.any()

def test_tolist(self, data):
result = pd.Series(data).tolist()
Expand Down
11 changes: 7 additions & 4 deletions pandas/tests/extension/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np
import pytest

from pandas.compat import np_version_under1p20
from pandas.errors import PerformanceWarning

from pandas.core.dtypes.common import is_object_dtype
Expand Down Expand Up @@ -374,10 +375,12 @@ def test_astype_object_frame(self, all_data):
result = df.astype(object)
assert is_object_dtype(result._mgr.arrays[0].dtype)

# FIXME: these currently fail; dont leave commented-out
# check that we can compare the dtypes
# comp = result.dtypes.equals(df.dtypes)
# assert not comp.any()
# earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
# instead of returning False
if not np_version_under1p20:
# check that we can compare the dtypes
comp = result.dtypes == df.dtypes
assert not comp.any()

def test_astype_str(self, data):
result = pd.Series(data[:5]).astype(str)
Expand Down
8 changes: 3 additions & 5 deletions pandas/tests/frame/indexing/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,13 +576,11 @@ def test_ix_multi_take(self):
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)

# FIXME: dont leave commented-out
""" #1321
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
"""

def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
Expand Down
57 changes: 22 additions & 35 deletions pandas/tests/generic/test_frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,9 @@ def test_metadata_propagation_indiv_resample(self):
result = df.resample("1T")
self.check_metadata(df, result)

def test_metadata_propagation_indiv(self):
def test_metadata_propagation_indiv(self, monkeypatch):
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__

np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
DataFrame._metadata = ["filename"]
df1.filename = "fname1.csv"
df2.filename = "fname2.csv"

def finalize(self, other, method=None, **kwargs):

Expand All @@ -107,41 +98,37 @@ def finalize(self, other, method=None, **kwargs):
left, right = other.left, other.right
value = getattr(left, name, "") + "|" + getattr(right, name, "")
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ""))

return self

DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
assert result.filename == "fname1.csv|fname2.csv"

# concat
# GH 6927
DataFrame._metadata = ["filename"]
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
df1.filename = "foo"

def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat":
elif method == "concat":
value = "+".join(
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
)
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
object.__setattr__(self, name, getattr(other, name, ""))

return self

DataFrame.__finalize__ = finalize
with monkeypatch.context() as m:
m.setattr(DataFrame, "_metadata", ["filename"])
m.setattr(DataFrame, "__finalize__", finalize)

np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
DataFrame._metadata = ["filename"]
df1.filename = "fname1.csv"
df2.filename = "fname2.csv"

result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
assert result.filename == "fname1.csv|fname2.csv"

result = pd.concat([df1, df1])
assert result.filename == "foo+foo"
# concat
# GH#6927
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
df1.filename = "foo"

# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize # FIXME: use monkeypatch
result = pd.concat([df1, df1])
assert result.filename == "foo+foo"

def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
Expand Down
23 changes: 9 additions & 14 deletions pandas/tests/generic/test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def test_metadata_propagation_indiv_resample(self):
result = ts.resample("1T").apply(lambda x: x.sum())
self.check_metadata(ts, result)

def test_metadata_propagation_indiv(self):
def test_metadata_propagation_indiv(self, monkeypatch):
# check that the metadata matches up on the resulting ops

ser = Series(range(3), range(3))
Expand All @@ -120,12 +120,6 @@ def test_metadata_propagation_indiv(self):
result = ser.T
self.check_metadata(ser, result)

_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ["name", "filename"]
ser.filename = "foo"
ser2.filename = "bar"

def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat" and name == "filename":
Expand All @@ -142,12 +136,13 @@ def finalize(self, other, method=None, **kwargs):

return self

Series.__finalize__ = finalize
with monkeypatch.context() as m:
m.setattr(Series, "_metadata", ["name", "filename"])
m.setattr(Series, "__finalize__", finalize)

result = pd.concat([ser, ser2])
assert result.filename == "foo+bar"
assert result.name is None
ser.filename = "foo"
ser2.filename = "bar"

# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize # FIXME: use monkeypatch
result = pd.concat([ser, ser2])
assert result.filename == "foo+bar"
assert result.name is None
7 changes: 5 additions & 2 deletions pandas/tests/indexes/timedeltas/methods/test_insert.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,14 +87,17 @@ def test_insert_nat(self, null):
def test_insert_invalid_na(self):
idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx")

# FIXME: assert_index_equal fails if we pass a different
# instance of np.datetime64("NaT")
item = np.datetime64("NaT")
result = idx.insert(0, item)

expected = Index([item] + list(idx), dtype=object, name="idx")
tm.assert_index_equal(result, expected)

# Also works if we pass a different dt64nat object
item2 = np.datetime64("NaT")
result = idx.insert(0, item2)
tm.assert_index_equal(result, expected)

@pytest.mark.parametrize(
"item", [0, np.int64(0), np.float64(0), np.array(0), np.datetime64(456, "us")]
)
Expand Down