Skip to content

ENH: add DataFrame.is_unique method #37565

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/source/reference/frame.rst
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,7 @@ Reindexing / selection / label manipulation
DataFrame.head
DataFrame.idxmax
DataFrame.idxmin
DataFrame.is_unique
DataFrame.last
DataFrame.reindex
DataFrame.reindex_like
Expand Down
1 change: 1 addition & 0 deletions doc/source/whatsnew/v1.3.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ Other enhancements
- :meth:`.Styler.set_tooltips_class` and :meth:`.Styler.set_table_styles` amended to optionally allow certain css-string input arguments (:issue:`39564`)
- :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None``. (:issue:`39359`)
- :meth:`Series.loc.__getitem__` and :meth:`Series.loc.__setitem__` with :class:`MultiIndex` now raising helpful error message when indexer has too many dimensions (:issue:`35349`)
- Added :meth:`DataFrame.is_unique` method for finding columns with unique values (:issue:`37565`)
- :meth:`pandas.read_stata` and :class:`StataReader` support reading data from compressed files.


Expand Down
2 changes: 1 addition & 1 deletion pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,7 +1108,7 @@ def nunique(self, dropna: bool = True) -> int:
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Return True if values in the object are unique, else False.

Returns
-------
Expand Down
54 changes: 54 additions & 0 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -5372,6 +5372,59 @@ def drop_duplicates(
else:
return result

def is_unique(
self, subset: Optional[Union[Hashable, Sequence[Hashable]]] = None
) -> Series:
"""
Return boolean Series denoting which columns have unique values.

.. versionadded:: 1.3.0

Parameters
----------
subset : column label or sequence of labels, optional
Only check subset of columns for uniques. By default checks all columns.

Returns
-------
Series

See Also
--------
DataFrame.duplicated : Indicate duplicate rows.

Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.is_unique()
name True
class False
max_speed True
dtype: bool
>>> df.is_unique(["name", "class"])
name True
class False
dtype: bool
"""
if subset is not None:
subset = com.maybe_make_list(subset)
self = self[subset]

if len(self.columns):
return self.apply(lambda x: x.is_unique)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why does this not work on empties?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A dataframe with no columns doesn't call the inner func in apply (here lambda x: x.is_unique), so the output (empty) series has to guess its own dtype. It guesses float64, which is wrong, so we have to special-case it.

else:
return self._constructor_sliced(dtype=bool)

def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
Expand Down Expand Up @@ -5405,6 +5458,7 @@ def duplicated(
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
DataFrame.is_unique : Indicate columns with unique values.

Examples
--------
Expand Down
57 changes: 57 additions & 0 deletions pandas/tests/frame/methods/test_is_unique.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import pytest

from pandas import DataFrame, Series
import pandas._testing as tm


@pytest.mark.parametrize(
"frame, expected",
[
# single column
[DataFrame(), Series(dtype=bool)],
[DataFrame({"a": ["x"]}), Series({"a": True})],
[DataFrame({"a": ["x", "y"]}), Series({"a": True})],
[DataFrame({"a": ["x", "x"]}), Series({"a": False})],
[DataFrame({"a": ["x", "y", "y"]}), Series({"a": False})],
# multiple columns
[DataFrame(columns=["a", "b"]), Series({"a": True, "b": True})],
[DataFrame({"a": ["x"], "b": ["y"]}), Series({"a": True, "b": True})],
[
DataFrame({"a": ["x", "y"], "b": ["x", "x"]}),
Series({"a": True, "b": False}),
],
# multiple columns, same column name
[DataFrame(columns=["a", "a"]), Series([True, True], index=["a", "a"])],
[
DataFrame([["x", "y"]], columns=["a", "a"]),
Series([True, True], index=["a", "a"]),
],
[
DataFrame([["x", "y"], ["y", "y"]], columns=["a", "a"]),
Series([True, False], index=["a", "a"]),
],
],
)
def test_is_unique(frame, expected):
# GH37565
result = frame.is_unique()
tm.assert_series_equal(result, expected)


@pytest.mark.parametrize(
"frame, subset, expected",
[
[DataFrame(columns=["a", "b"]), ["a"], Series({"a": True})],
[DataFrame({"a": ["x"], "b": ["y"]}), "a", Series({"a": True})],
[DataFrame({"a": ["x"], "b": ["y"]}), ["a"], Series({"a": True})],
[
DataFrame({"a": ["x", "y"], "b": ["x", "x"]}),
["a", "b"],
Series({"a": True, "b": False}),
],
],
)
def test_is_unique_subsetting(frame, subset, expected):
# GH37565
result = frame.is_unique(subset=subset)
tm.assert_series_equal(result, expected)