Skip to content

STY: De-privatize imported names #36235

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 9, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_libs/interval.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ from pandas._libs.tslibs.util cimport (
is_timedelta64_object,
)

_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])


cdef class IntervalMixin:
Expand Down Expand Up @@ -318,7 +318,7 @@ cdef class Interval(IntervalMixin):
self._validate_endpoint(left)
self._validate_endpoint(right)

if closed not in _VALID_CLOSED:
if closed not in VALID_CLOSED:
raise ValueError(f"invalid option for 'closed': {closed}")
if not left <= right:
raise ValueError("left side of interval must be <= right side")
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/_arrow_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import numpy as np
import pyarrow

from pandas.core.arrays.interval import _VALID_CLOSED
from pandas.core.arrays.interval import VALID_CLOSED

_pyarrow_version_ge_015 = LooseVersion(pyarrow.__version__) >= LooseVersion("0.15")

Expand Down Expand Up @@ -83,7 +83,7 @@ class ArrowIntervalType(pyarrow.ExtensionType):
def __init__(self, subtype, closed):
# attributes need to be set first before calling
# super init (as that calls serialize)
assert closed in _VALID_CLOSED
assert closed in VALID_CLOSED
self._closed = closed
if not isinstance(subtype, pyarrow.DataType):
subtype = pyarrow.type_for_alias(str(subtype))
Expand Down
12 changes: 8 additions & 4 deletions pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,12 @@

from pandas._config import get_option

from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds
from pandas._libs.interval import (
VALID_CLOSED,
Interval,
IntervalMixin,
intervals_to_interval_bounds,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender

Expand Down Expand Up @@ -42,7 +47,6 @@
from pandas.core.indexers import check_array_indexer
from pandas.core.indexes.base import ensure_index

_VALID_CLOSED = {"left", "right", "both", "neither"}
_interval_shared_docs = {}

_shared_docs_kwargs = dict(
Expand Down Expand Up @@ -475,7 +479,7 @@ def _validate(self):
* left and right have the same missing values
* left is always below right
"""
if self.closed not in _VALID_CLOSED:
if self.closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {self.closed}"
raise ValueError(msg)
if len(self.left) != len(self.right):
Expand Down Expand Up @@ -1012,7 +1016,7 @@ def closed(self):
)
)
def set_closed(self, closed):
if closed not in _VALID_CLOSED:
if closed not in VALID_CLOSED:
msg = f"invalid option for 'closed': {closed}"
raise ValueError(msg)

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@
BlockIndex,
IntIndex,
SparseArray,
_make_index,
make_sparse_index,
)
from pandas.core.arrays.sparse.dtype import SparseDtype
4 changes: 2 additions & 2 deletions pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1556,15 +1556,15 @@ def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy
else:
indices = mask.nonzero()[0].astype(np.int32)

index = _make_index(length, indices, kind)
index = make_sparse_index(length, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
# TODO: copy
return sparsified_values, index, fill_value


def _make_index(length, indices, kind):
def make_sparse_index(length, indices, kind):

if kind == "block" or isinstance(kind, BlockIndex):
locs, lens = splib.get_blocks(indices)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/engines.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def _evaluate(self) -> None:
pass


_engines: Dict[str, Type[AbstractEngine]] = {
ENGINES: Dict[str, Type[AbstractEngine]] = {
"numexpr": NumExprEngine,
"python": PythonEngine,
}
14 changes: 7 additions & 7 deletions pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
from pandas._libs.lib import no_default
from pandas.util._validators import validate_bool_kwarg

from pandas.core.computation.engines import _engines
from pandas.core.computation.expr import Expr, _parsers
from pandas.core.computation.engines import ENGINES
from pandas.core.computation.expr import PARSERS, Expr
from pandas.core.computation.parsing import tokenize_string
from pandas.core.computation.scope import ensure_scope

Expand Down Expand Up @@ -43,8 +43,8 @@ def _check_engine(engine: Optional[str]) -> str:
if engine is None:
engine = "numexpr" if NUMEXPR_INSTALLED else "python"

if engine not in _engines:
valid_engines = list(_engines.keys())
if engine not in ENGINES:
valid_engines = list(ENGINES.keys())
raise KeyError(
f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
)
Expand Down Expand Up @@ -75,9 +75,9 @@ def _check_parser(parser: str):
KeyError
* If an invalid parser is passed
"""
if parser not in _parsers:
if parser not in PARSERS:
raise KeyError(
f"Invalid parser '{parser}' passed, valid parsers are {_parsers.keys()}"
f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
)


Expand Down Expand Up @@ -341,7 +341,7 @@ def eval(
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)

# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng = ENGINES[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,7 @@ def __init__(
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
self.terms = self.parse()

@property
Expand Down Expand Up @@ -814,4 +814,4 @@ def names(self):
return frozenset(term.name for term in com.flatten(self.terms))


_parsers = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
4 changes: 2 additions & 2 deletions pandas/core/config_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,9 +314,9 @@ def use_numba_cb(key):


def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
from pandas.io.formats.printing import enable_data_resource_formatter

_enable_data_resource_formatter(cf.get_option(key))
enable_data_resource_formatter(cf.get_option(key))


def is_terminal() -> bool:
Expand Down
10 changes: 5 additions & 5 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@
GroupBy,
_agg_template,
_apply_docs,
_group_selection_context,
_transform_template,
get_groupby,
group_selection_context,
)
from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
Expand Down Expand Up @@ -230,7 +230,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
raise NotImplementedError(
"Numba engine can only be used with a single function."
)
with _group_selection_context(self):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
Expand Down Expand Up @@ -685,7 +685,7 @@ def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):

from pandas.core.reshape.merge import _get_join_indexers
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut

if bins is not None and not np.iterable(bins):
Expand Down Expand Up @@ -787,7 +787,7 @@ def value_counts(

right = [diff.cumsum() - 1, codes[-1]]

_, idx = _get_join_indexers(left, right, sort=False, how="left")
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)

if sort:
Expand Down Expand Up @@ -942,7 +942,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
raise NotImplementedError(
"Numba engine can only be used with a single function."
)
with _group_selection_context(self):
with group_selection_context(self):
data = self._selected_obj
result, index = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
Expand Down
20 changes: 10 additions & 10 deletions pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,9 +459,9 @@ def f(self):


@contextmanager
def _group_selection_context(groupby: "_GroupBy"):
def group_selection_context(groupby: "_GroupBy"):
"""
Set / reset the _group_selection_context.
Set / reset the group_selection_context.
"""
groupby._set_group_selection()
try:
Expand Down Expand Up @@ -737,7 +737,7 @@ def pipe(self, func, *args, **kwargs):
def _make_wrapper(self, name: str) -> Callable:
assert name in self._apply_allowlist

with _group_selection_context(self):
with group_selection_context(self):
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._obj_with_exclusions, name)
Expand Down Expand Up @@ -868,7 +868,7 @@ def f(g):
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column

with _group_selection_context(self):
with group_selection_context(self):
return self._python_apply_general(f, self._selected_obj)

return result
Expand Down Expand Up @@ -994,7 +994,7 @@ def _agg_general(
alias: str,
npfunc: Callable,
):
with _group_selection_context(self):
with group_selection_context(self):
# try a cython aggregation if we can
try:
return self._cython_agg_general(
Expand Down Expand Up @@ -1499,7 +1499,7 @@ def var(self, ddof: int = 1):
)
else:
func = lambda x: x.var(ddof=ddof)
with _group_selection_context(self):
with group_selection_context(self):
return self._python_agg_general(func)

@Substitution(name="groupby")
Expand Down Expand Up @@ -1658,7 +1658,7 @@ def ohlc(self) -> DataFrame:

@doc(DataFrame.describe)
def describe(self, **kwargs):
with _group_selection_context(self):
with group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
Expand Down Expand Up @@ -1963,7 +1963,7 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra
nth_values = list(set(n))

nth_array = np.array(nth_values, dtype=np.intp)
with _group_selection_context(self):
with group_selection_context(self):

mask_left = np.in1d(self._cumcount_array(), nth_array)
mask_right = np.in1d(
Expand Down Expand Up @@ -2226,7 +2226,7 @@ def ngroup(self, ascending: bool = True):
5 0
dtype: int64
"""
with _group_selection_context(self):
with group_selection_context(self):
index = self._selected_obj.index
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
if not ascending:
Expand Down Expand Up @@ -2287,7 +2287,7 @@ def cumcount(self, ascending: bool = True):
5 0
dtype: int64
"""
with _group_selection_context(self):
with group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return self._obj_1d_constructor(cumcounts, index)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3660,15 +3660,15 @@ def _join_multi(self, other, how, return_indexers=True):
return result

def _join_non_unique(self, other, how="left", return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
from pandas.core.reshape.merge import get_join_indexers

# We only get here if dtypes match
assert self.dtype == other.dtype

lvalues = self._get_engine_target()
rvalues = other._get_engine_target()

left_idx, right_idx = _get_join_indexers(
left_idx, right_idx = get_join_indexers(
[lvalues], [rvalues], how=how, sort=True
)

Expand Down
1 change: 0 additions & 1 deletion pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@
if TYPE_CHECKING:
from pandas import CategoricalIndex # noqa:F401

_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)

_index_doc_kwargs.update(
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -859,7 +859,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):

def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(
return get_join_indexers(
self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how
)

Expand Down Expand Up @@ -1298,7 +1298,7 @@ def _validate(self, validate: str):
raise ValueError("Not a valid argument for validate")


def _get_join_indexers(
def get_join_indexers(
left_keys, right_keys, sort: bool = False, how: str = "inner", **kwargs
):
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/printing.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def pprint_thing_encoded(
return value.encode(encoding, errors)


def _enable_data_resource_formatter(enable: bool) -> None:
def enable_data_resource_formatter(enable: bool) -> None:
if "IPython" not in sys.modules:
# definitely not in IPython
return
Expand Down
Loading