Skip to content

REF: collect methods by topic #36173

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 7, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 82 additions & 66 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,56 +393,6 @@ def __init__(
self._dtype = self._dtype.update_dtype(dtype)
self._codes = coerce_indexer_dtype(codes, dtype.categories)

@property
def categories(self):
"""
The categories of this categorical.

Setting assigns new values to each category (effectively a rename of
each individual category).

The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.

Assigning to `categories` is a inplace operation!

Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories

See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories

@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype

@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered

@property
def dtype(self) -> CategoricalDtype:
"""
Expand All @@ -458,10 +408,6 @@ def _constructor(self) -> Type["Categorical"]:
def _from_sequence(cls, scalars, dtype=None, copy=False):
return Categorical(scalars, dtype=dtype)

def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None

def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
"""
Coerce this type to another dtype
Expand Down Expand Up @@ -640,6 +586,59 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None):

return cls(codes, dtype=dtype, fastpath=True)

# ------------------------------------------------------------------
# Categories/Codes/Ordered

@property
def categories(self):
"""
The categories of this categorical.

Setting assigns new values to each category (effectively a rename of
each individual category).

The assigned value has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.

Assigning to `categories` is a inplace operation!

Raises
------
ValueError
If the new categories do not validate as categories or if the
number of new categories is unequal the number of old categories

See Also
--------
rename_categories : Rename categories.
reorder_categories : Reorder categories.
add_categories : Add new categories.
remove_categories : Remove the specified categories.
remove_unused_categories : Remove categories which are not used.
set_categories : Set the categories to the specified ones.
"""
return self.dtype.categories

@categories.setter
def categories(self, categories):
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if self.dtype.categories is not None and len(self.dtype.categories) != len(
new_dtype.categories
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
self._dtype = new_dtype

@property
def ordered(self) -> Ordered:
"""
Whether the categories have an ordered relationship.
"""
return self.dtype.ordered

@property
def codes(self) -> np.ndarray:
"""
Expand Down Expand Up @@ -1104,6 +1103,8 @@ def remove_unused_categories(self, inplace=False):
if not inplace:
return cat

# ------------------------------------------------------------------

def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Expand Down Expand Up @@ -1192,6 +1193,9 @@ def map(self, mapper):
__le__ = _cat_compare_op(operator.le)
__ge__ = _cat_compare_op(operator.ge)

# -------------------------------------------------------------
# Validators; ideally these can be de-duplicated

def _validate_insert_value(self, value) -> int:
code = self.categories.get_indexer([value])
if (code == -1) and not (is_scalar(value) and isna(value)):
Expand Down Expand Up @@ -1241,6 +1245,8 @@ def _validate_fill_value(self, fill_value):
)
return fill_value

# -------------------------------------------------------------

def __array__(self, dtype=None) -> np.ndarray:
"""
The numpy array interface.
Expand Down Expand Up @@ -1758,6 +1764,10 @@ def __contains__(self, key) -> bool:
# ------------------------------------------------------------------
# Rendering Methods

def _formatter(self, boxed=False):
# Defer to CategoricalFormatter's formatter.
return None

def _tidy_repr(self, max_vals=10, footer=True) -> str:
"""
a short repr displaying only max_vals and an optional (but default
Expand Down Expand Up @@ -1987,7 +1997,9 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]:
result = dict(zip(categories, _result))
return result

# reduction ops #
# ------------------------------------------------------------------
# Reductions

def _reduce(self, name: str, skipna: bool = True, **kwargs):
func = getattr(self, name, None)
if func is None:
Expand Down Expand Up @@ -2090,6 +2102,9 @@ def mode(self, dropna=True):
codes = sorted(htable.mode_int64(ensure_int64(codes), dropna))
return self._constructor(values=codes, dtype=self.dtype, fastpath=True)

# ------------------------------------------------------------------
# ExtensionArray Interface

def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
Expand Down Expand Up @@ -2179,6 +2194,18 @@ def equals(self, other: object) -> bool:
return np.array_equal(self._codes, other_codes)
return False

@property
def _can_hold_na(self):
return True

@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import union_categoricals

return union_categoricals(to_concat)

# ------------------------------------------------------------------

def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
Expand Down Expand Up @@ -2217,17 +2244,6 @@ def describe(self):

return result

# Implement the ExtensionArray interface
@property
def _can_hold_na(self):
return True

@classmethod
def _concat_same_type(self, to_concat):
from pandas.core.dtypes.concat import union_categoricals

return union_categoricals(to_concat)

def isin(self, values) -> np.ndarray:
"""
Check whether `values` are contained in Categorical.
Expand Down
29 changes: 17 additions & 12 deletions pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,11 +433,6 @@ def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.astype("object")

def _maybe_cast_indexer(self, key):
code = self.categories.get_loc(key)
code = self.codes.dtype.type(code)
return code

@doc(Index.where)
def where(self, cond, other=None):
# TODO: Investigate an alternative implementation with
Expand Down Expand Up @@ -537,6 +532,14 @@ def _reindex_non_unique(self, target):

return new_target, indexer, new_indexer

# --------------------------------------------------------------------
# Indexing Methods

def _maybe_cast_indexer(self, key):
code = self.categories.get_loc(key)
code = self.codes.dtype.type(code)
return code

@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
Expand Down Expand Up @@ -619,6 +622,15 @@ def _convert_arr_indexer(self, keyarr):
def _convert_index_indexer(self, keyarr):
return self._shallow_copy(keyarr)

@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label

return super()._maybe_cast_slice_bound(label, side, kind)

# --------------------------------------------------------------------

def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
Expand All @@ -628,13 +640,6 @@ def take_nd(self, *args, **kwargs):
)
return self.take(*args, **kwargs)

@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side, kind):
if kind == "loc":
return label

return super()._maybe_cast_slice_bound(label, side, kind)

def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Expand Down
3 changes: 3 additions & 0 deletions pandas/core/indexes/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,9 @@ def snap(self, freq="S"):
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)

# --------------------------------------------------------------------
# Indexing Methods

def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Expand Down
Loading