Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions pandas/_libs/tslibs/timestamps.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ from pandas._libs.tslibs.tzconversion import (
# Constants
_zero_time = datetime_time(0, 0)
_no_input = object()
PY36 = sys.version_info >= (3, 6)

# ----------------------------------------------------------------------

Expand Down Expand Up @@ -982,9 +981,8 @@ default 'raise'
else:
kwargs = {'year': dts.year, 'month': dts.month, 'day': dts.day,
'hour': dts.hour, 'minute': dts.min, 'second': dts.sec,
'microsecond': dts.us, 'tzinfo': _tzinfo}
if PY36:
kwargs['fold'] = fold
'microsecond': dts.us, 'tzinfo': _tzinfo,
'fold': fold}
ts_input = datetime(**kwargs)

ts = convert_datetime_to_tsobject(ts_input, _tzinfo)
Expand Down
1 change: 0 additions & 1 deletion pandas/compat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import sys
import warnings

PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
PY38 = sys.version_info >= (3, 8)
PYPY = platform.python_implementation() == "PyPy"
Expand Down
11 changes: 2 additions & 9 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"""

import collections
from collections import OrderedDict, abc
from collections import abc
from datetime import datetime, timedelta
from functools import partial
import inspect
Expand All @@ -14,7 +14,6 @@
import numpy as np

from pandas._libs import lib, tslibs
from pandas.compat import PY36

from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
Expand Down Expand Up @@ -216,13 +215,7 @@ def try_sort(iterable):


def dict_keys_to_ordered_list(mapping):
# when pandas drops support for Python < 3.6, this function
# can be replaced by a simple list(mapping.keys())
if PY36 or isinstance(mapping, OrderedDict):
keys = list(mapping.keys())
else:
keys = try_sort(mapping)
return keys
return list(mapping.keys())


def asarray_tuplesafe(values, dtype=None):
Expand Down
7 changes: 1 addition & 6 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

from pandas._libs import algos, lib
from pandas._libs.tslibs import conversion
from pandas.compat import PY36

from pandas.core.dtypes.dtypes import (
CategoricalDtype,
Expand Down Expand Up @@ -1278,11 +1277,7 @@ def _is_unorderable_exception(e: TypeError) -> bool:
boolean
Whether or not the exception raised is an unorderable exception.
"""

if PY36:
return "'>' not supported between instances of" in str(e)

return "unorderable" in str(e)
return "'>' not supported between instances of" in str(e)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and update docstring here



def is_numeric_v_string_like(a, b):
Expand Down
18 changes: 2 additions & 16 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
from pandas._config import get_option

from pandas._libs import algos as libalgos, lib
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Expand Down Expand Up @@ -3548,21 +3547,8 @@ def assign(self, **kwargs):
"""
data = self.copy()

# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)

# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data

def _sanitize_column(self, key, value, broadcast=True):
Expand Down
5 changes: 1 addition & 4 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,7 @@ class NDFrame(PandasObject, SelectionMixin):
_data = None # type: BlockManager

if TYPE_CHECKING:
# TODO(PY36): replace with _attrs : Dict[Hashable, Any]
# We need the TYPE_CHECKING, because _attrs is not a class attribute
# and Py35 doesn't support the new syntax.
_attrs = {} # type: Dict[Optional[Hashable], Any]
_attrs = {} # type: Dict[Hashable, Any]
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think the optional should be removed. This looks like an inconsistency between the code and the TODO.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and the intent was to remove the TYPE_CHECKING block and use py3.6 syntax for the variable annotation


# ----------------------------------------------------------------------
# Constructors
Expand Down
8 changes: 0 additions & 8 deletions pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import numpy as np

from pandas._libs import Timestamp, lib
from pandas.compat import PY36
from pandas.util._decorators import Appender, Substitution

from pandas.core.dtypes.cast import (
Expand Down Expand Up @@ -233,10 +232,6 @@ def aggregate(self, func=None, *args, **kwargs):
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
if relabeling:
columns = list(kwargs)
if not PY36:
# sort for 3.5 and earlier
columns = list(sorted(columns))

func = [kwargs[col] for col in columns]
kwargs = {}
if not columns:
Expand Down Expand Up @@ -1814,9 +1809,6 @@ def _normalize_keyword_aggregation(kwargs):
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))

# Normalize the aggregation functions as Dict[column, List[func]],
# process normally, then fixup the names.
# TODO(Py35): When we drop python 3.5, change this to
Expand Down
9 changes: 2 additions & 7 deletions pandas/core/internals/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
import numpy.ma as ma

from pandas._libs import lib
import pandas.compat as compat
from pandas.compat import PY36

from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
Expand Down Expand Up @@ -331,16 +329,13 @@ def extract_index(data):
have_raw_arrays = False
have_series = False
have_dicts = False
have_ordered = False

for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
if isinstance(val, OrderedDict):
have_ordered = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
Expand All @@ -352,7 +347,7 @@ def extract_index(data):
if have_series:
index = _union_indexes(indexes)
elif have_dicts:
index = _union_indexes(indexes, sort=not (compat.PY36 or have_ordered))
index = _union_indexes(indexes, sort=False)

if have_raw_arrays:
lengths = list(set(raw_lengths))
Expand Down Expand Up @@ -550,7 +545,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
"""
if columns is None:
gen = (list(x.keys()) for x in data)
types = (dict, OrderedDict) if PY36 else OrderedDict
types = (dict, OrderedDict)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe update docstring and inline types

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think its also worth inlining types now that types is a less complex expression and only used once.

sort = not any(isinstance(d, types) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)

Expand Down
9 changes: 0 additions & 9 deletions pandas/core/series.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
from collections import OrderedDict
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
Expand All @@ -13,7 +12,6 @@
from pandas._config import get_option

from pandas._libs import index as libindex, lib, reshape, tslibs
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate
from pandas.util._validators import validate_bool_kwarg, validate_percentile
Expand Down Expand Up @@ -362,13 +360,6 @@ def _init_dict(self, data, index=None, dtype=None):
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
elif not PY36 and not isinstance(data, OrderedDict) and data:
# Need the `and data` to avoid sorting Series(None, index=[...])
# since that isn't really dict-like
try:
s = s.sort_index()
except TypeError:
pass
return s._data, s.index

@classmethod
Expand Down
6 changes: 2 additions & 4 deletions pandas/io/pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import pickle
import warnings

from pandas.compat import PY36, pickle_compat as pc
from pandas.compat import pickle_compat as pc

from pandas.io.common import _get_handle, _stringify_path

Expand Down Expand Up @@ -140,9 +140,7 @@ def read_pickle(path, compression="infer"):
# 1) try standard library Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes

excs_to_catch = (AttributeError, ImportError)
if PY36:
excs_to_catch += (ModuleNotFoundError,)
excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError)

try:
with warnings.catch_warnings(record=True):
Expand Down
10 changes: 0 additions & 10 deletions pandas/tests/extension/json/test_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@

import pytest

from pandas.compat import PY36

import pandas as pd
from pandas.tests.extension import base
import pandas.util.testing as tm
Expand Down Expand Up @@ -180,9 +178,6 @@ def test_fillna_frame(self):


unhashable = pytest.mark.skip(reason="Unhashable")
unstable = pytest.mark.skipif(
not PY36, reason="Dictionary order unstable" # 3.6 or higher
)


class TestReduce(base.BaseNoReduceTests):
Expand All @@ -199,20 +194,16 @@ def test_sort_values_frame(self):
# TODO (EA.factorize): see if _values_for_factorize allows this.
pass

@unstable
def test_argsort(self, data_for_sorting):
super().test_argsort(data_for_sorting)

@unstable
def test_argsort_missing(self, data_missing_for_sorting):
super().test_argsort_missing(data_missing_for_sorting)

@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values(self, data_for_sorting, ascending):
super().test_sort_values(data_for_sorting, ascending)

@unstable
@pytest.mark.parametrize("ascending", [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
super().test_sort_values_missing(data_missing_for_sorting, ascending)
Expand Down Expand Up @@ -280,7 +271,6 @@ def test_groupby_extension_apply(self):
we'll be able to dispatch unique.
"""

@unstable
@pytest.mark.parametrize("as_index", [True, False])
def test_groupby_extension_agg(self, as_index, data_for_grouping):
super().test_groupby_extension_agg(as_index, data_for_grouping)
Expand Down
17 changes: 2 additions & 15 deletions pandas/tests/frame/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy.ma.mrecords as mrecords
import pytest

from pandas.compat import PY36, is_platform_little_endian
from pandas.compat import is_platform_little_endian

from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
Expand Down Expand Up @@ -387,7 +387,6 @@ def test_constructor_dict_nan_tuple_key(self, value):
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)

@pytest.mark.skipif(not PY36, reason="Insertion order for Python>=3.6")
def test_constructor_dict_order_insertion(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)
Expand All @@ -399,18 +398,6 @@ def test_constructor_dict_order_insertion(self):
expected = DataFrame(data=d, columns=list("ba"))
tm.assert_frame_equal(frame, expected)

@pytest.mark.skipif(PY36, reason="order by value for Python<3.6")
def test_constructor_dict_order_by_values(self):
datetime_series = tm.makeTimeSeries(nper=30)
datetime_series_short = tm.makeTimeSeries(nper=25)

# GH19018
# initialization ordering: by value if python<3.6
d = {"b": datetime_series_short, "a": datetime_series}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list("ab"))
tm.assert_frame_equal(frame, expected)

def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
Expand Down Expand Up @@ -1373,7 +1360,7 @@ def test_constructor_list_of_dict_order(self):
}
)
result = DataFrame(data)
tm.assert_frame_equal(result, expected, check_like=not PY36)
tm.assert_frame_equal(result, expected, check_like=False)

def test_constructor_orient(self, float_string_frame):
data_dict = float_string_frame.T._series
Expand Down
26 changes: 1 addition & 25 deletions pandas/tests/frame/test_mutate_columns.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
import numpy as np
import pytest

from pandas.compat import PY36

from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm

Expand Down Expand Up @@ -60,10 +58,7 @@ def test_assign_order(self):
df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
result = df.assign(D=df.A + df.B, C=df.A - df.B)

if PY36:
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC"))
else:
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]], columns=list("ABCD"))
expected = DataFrame([[1, 2, 3, -1], [3, 4, 7, -1]], columns=list("ABDC"))
tm.assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)

Expand All @@ -80,25 +75,6 @@ def test_assign_bad(self):
with pytest.raises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)

@pytest.mark.skipif(
PY36,
reason="""Issue #14207: valid for python
3.6 and above""",
)
def test_assign_dependent_old_python(self):
df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})

# Key C does not exist at definition time of df
with pytest.raises(KeyError, match="^'C'$"):
df.assign(C=lambda df: df.A, D=lambda df: df["A"] + df["C"])
with pytest.raises(KeyError, match="^'C'$"):
df.assign(C=df.A, D=lambda x: x["A"] + x["C"])

@pytest.mark.skipif(
not PY36,
reason="""Issue #14207: not valid for
python 3.5 and below""",
)
def test_assign_dependent(self):
df = DataFrame({"A": [1, 2], "B": [3, 4]})

Expand Down
Loading