diff --git a/pandas/conftest.py b/pandas/conftest.py
index 131a011c5a101..7463b2b579c0c 100644
--- a/pandas/conftest.py
+++ b/pandas/conftest.py
@@ -106,8 +106,8 @@ def axis(request):
@pytest.fixture(params=[0, "index"], ids=lambda x: f"axis {repr(x)}")
def axis_series(request):
"""
- Fixture for returning the axis numbers of a Series.
- """
+ Fixture for returning the axis numbers of a Series.
+ """
return request.param
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py
index be652ca0e6a36..22bc772da8f28 100644
--- a/pandas/core/computation/pytables.py
+++ b/pandas/core/computation/pytables.py
@@ -601,8 +601,7 @@ def __init__(self, value, converted, kind: str):
self.kind = kind
def tostring(self, encoding) -> str:
- """ quote the string if not encoded
- else encode and return """
+ """ quote the string if not encoded else encode and return """
if self.kind == "string":
if encoding is not None:
return str(self.converted)
diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py
index be1b78eeb146e..e7a132b73e076 100644
--- a/pandas/io/excel/_xlrd.py
+++ b/pandas/io/excel/_xlrd.py
@@ -57,8 +57,9 @@ def get_sheet_data(self, sheet, convert_float):
epoch1904 = self.book.datemode
def _parse_cell(cell_contents, cell_typ):
- """converts the contents of the cell into a pandas
- appropriate object"""
+ """
+ converts the contents of the cell into a pandas appropriate object
+ """
if cell_typ == XL_CELL_DATE:
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 149533bf0c238..35a6870c1194b 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -979,7 +979,7 @@ def to_html(
border : int
A ``border=border`` attribute is included in the opening
``
`` tag. Default ``pd.options.display.html.border``.
- """
+ """
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8bc8470ae7658..b1d032eb1aaff 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1457,8 +1457,10 @@ def _should_parse_dates(self, i):
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
- """ extract and return the names, index_names, col_names
- header is a list-of-lists returned from the parsers """
+ """
+ extract and return the names, index_names, col_names
+ header is a list-of-lists returned from the parsers
+ """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 0e2b909d5cdc7..cde2b07e2a21d 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -569,9 +569,10 @@ def __getattr__(self, name: str):
)
def __contains__(self, key: str) -> bool:
- """ check for existence of this key
- can match the exact pathname or the pathnm w/o the leading '/'
- """
+ """
+ check for existence of this key
+ can match the exact pathname or the pathnm w/o the leading '/'
+ """
node = self.get_node(key)
if node is not None:
name = node._v_pathname
@@ -1831,18 +1832,19 @@ def get_result(self, coordinates: bool = False):
class IndexCol:
- """ an index column description class
+ """
+ an index column description class
- Parameters
- ----------
+ Parameters
+ ----------
- axis : axis which I reference
- values : the ndarray like converted values
- kind : a string description of this type
- typ : the pytables type
- pos : the position in the pytables
+ axis : axis which I reference
+ values : the ndarray like converted values
+ kind : a string description of this type
+ typ : the pytables type
+ pos : the position in the pytables
- """
+ """
is_an_indexable = True
is_data_indexable = True
@@ -1999,9 +2001,11 @@ def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None):
- """ maybe set a string col itemsize:
- min_itemsize can be an integer or a dict with this columns name
- with an integer size """
+ """
+ maybe set a string col itemsize:
+ min_itemsize can be an integer or a dict with this columns name
+ with an integer size
+ """
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
@@ -2051,8 +2055,10 @@ def validate_attr(self, append: bool):
)
def update_info(self, info):
- """ set/update the info for this indexable with the key/value
- if there is a conflict raise/warn as needed """
+ """
+ set/update the info for this indexable with the key/value
+ if there is a conflict raise/warn as needed
+ """
for key in self._info_fields:
@@ -2140,17 +2146,18 @@ def set_attr(self):
class DataCol(IndexCol):
- """ a data holding column, by definition this is not indexable
+ """
+ a data holding column, by definition this is not indexable
- Parameters
- ----------
+ Parameters
+ ----------
- data : the actual data
- cname : the column name in the table to hold the data (typically
- values)
- meta : a string description of the metadata
- metadata : the actual metadata
- """
+ data : the actual data
+ cname : the column name in the table to hold the data (typically
+ values)
+ meta : a string description of the metadata
+ metadata : the actual metadata
+ """
is_an_indexable = False
is_data_indexable = False
@@ -2460,16 +2467,17 @@ class GenericDataIndexableCol(DataIndexableCol):
class Fixed:
- """ represent an object in my store
- facilitate read/write of various types of objects
- this is an abstract base class
+ """
+ represent an object in my store
+ facilitate read/write of various types of objects
+ this is an abstract base class
- Parameters
- ----------
- parent : HDFStore
- group : Node
- The group node where the table resides.
- """
+ Parameters
+ ----------
+ parent : HDFStore
+ group : Node
+ The group node where the table resides.
+ """
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
@@ -2596,8 +2604,10 @@ def validate_version(self, where=None):
return True
def infer_axes(self):
- """ infer the axes of my storer
- return a boolean indicating if we have a valid storer or not """
+ """
+ infer the axes of my storer
+ return a boolean indicating if we have a valid storer or not
+ """
s = self.storable
if s is None:
@@ -3105,29 +3115,29 @@ class FrameFixed(BlockManagerFixed):
class Table(Fixed):
- """ represent a table:
- facilitate read/write of various types of tables
-
- Attrs in Table Node
- -------------------
- These are attributes that are store in the main table node, they are
- necessary to recreate these tables when read back in.
-
- index_axes : a list of tuples of the (original indexing axis and
- index column)
- non_index_axes: a list of tuples of the (original index axis and
- columns on a non-indexing axis)
- values_axes : a list of the columns which comprise the data of this
- table
- data_columns : a list of the columns that we are allowing indexing
- (these become single columns in values_axes), or True to force all
- columns
- nan_rep : the string to use for nan representations for string
- objects
- levels : the names of levels
- metadata : the names of the metadata columns
-
- """
+ """
+ represent a table:
+ facilitate read/write of various types of tables
+
+ Attrs in Table Node
+ -------------------
+ These are attributes that are store in the main table node, they are
+ necessary to recreate these tables when read back in.
+
+ index_axes : a list of tuples of the (original indexing axis and
+ index column)
+ non_index_axes: a list of tuples of the (original index axis and
+ columns on a non-indexing axis)
+ values_axes : a list of the columns which comprise the data of this
+ table
+ data_columns : a list of the columns that we are allowing indexing
+ (these become single columns in values_axes), or True to force all
+ columns
+ nan_rep : the string to use for nan representations for string
+ objects
+ levels : the names of levels
+ metadata : the names of the metadata columns
+ """
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
@@ -4080,10 +4090,11 @@ def read_column(
class WORMTable(Table):
- """ a write-once read-many table: this format DOES NOT ALLOW appending to a
- table. writing is a one-time operation the data are stored in a format
- that allows for searching the data on disk
- """
+ """
+ a write-once read-many table: this format DOES NOT ALLOW appending to a
+ table. writing is a one-time operation the data are stored in a format
+ that allows for searching the data on disk
+ """
table_type = "worm"
@@ -4094,14 +4105,16 @@ def read(
start: Optional[int] = None,
stop: Optional[int] = None,
):
- """ read the indices and the indexing array, calculate offset rows and
- return """
+ """
+ read the indices and the indexing array, calculate offset rows and return
+ """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
- """ write in a format that we can search later on (but cannot append
- to): write out the indices and the values using _write_array
- (e.g. a CArray) create an indexing table so that we can search
+ """
+ write in a format that we can search later on (but cannot append
+ to): write out the indices and the values using _write_array
+ (e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
@@ -4170,8 +4183,9 @@ def write(
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: Optional[int], dropna: bool = False):
- """ we form the data into a 2-d including indexes,values,mask
- write chunk-by-chunk """
+ """
+ we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
+ """
names = self.dtype.names
nrows = self.nrows_expected
diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py
index 9b40778dbcfdf..d47dd2c71b86f 100644
--- a/pandas/io/sas/sas7bdat.py
+++ b/pandas/io/sas/sas7bdat.py
@@ -120,8 +120,10 @@ def column_data_offsets(self):
return np.asarray(self._column_data_offsets, dtype=np.int64)
def column_types(self):
- """Returns a numpy character array of the column types:
- s (string) or d (double)"""
+ """
+ Returns a numpy character array of the column types:
+ s (string) or d (double)
+ """
return np.asarray(self._column_types, dtype=np.dtype("S1"))
def close(self):
diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py
index de09460bb833d..3f47d325d86ef 100644
--- a/pandas/plotting/_matplotlib/core.py
+++ b/pandas/plotting/_matplotlib/core.py
@@ -509,10 +509,11 @@ def _adorn_subplots(self):
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
- """ Tick creation within matplotlib is reasonably expensive and is
- internally deferred until accessed as Ticks are created/destroyed
- multiple times per draw. It's therefore beneficial for us to avoid
- accessing unless we will act on the Tick.
+ """
+ Tick creation within matplotlib is reasonably expensive and is
+ internally deferred until accessed as Ticks are created/destroyed
+ multiple times per draw. It's therefore beneficial for us to avoid
+ accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py
index a60607d586ada..3aa188098620d 100644
--- a/pandas/tests/extension/test_datetime.py
+++ b/pandas/tests/extension/test_datetime.py
@@ -44,9 +44,9 @@ def data_missing_for_sorting(dtype):
@pytest.fixture
def data_for_grouping(dtype):
"""
- Expected to be like [B, B, NA, NA, A, A, B, C]
+ Expected to be like [B, B, NA, NA, A, A, B, C]
- Where A < B < C and NA is missing
+ Where A < B < C and NA is missing
"""
a = pd.Timestamp("2000-01-01")
b = pd.Timestamp("2000-01-02")
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py
index 7645c6b4cf709..0c78facd5fd12 100644
--- a/pandas/tests/generic/test_generic.py
+++ b/pandas/tests/generic/test_generic.py
@@ -23,9 +23,11 @@ def _axes(self):
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
- """ construct an object for the given shape
- if value is specified use that if its a scalar
- if value is an array, repeat it as needed """
+ """
+ construct an object for the given shape
+ if value is specified use that if its a scalar
+ if value is an array, repeat it as needed
+ """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index b7d7124a3a5e5..5662d41e19885 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1496,7 +1496,7 @@ def test_groupby_reindex_inside_function():
def agg_before(hour, func, fix=False):
"""
- Run an aggregate func on the subset of data.
+ Run an aggregate func on the subset of data.
"""
def _func(data):
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 65066fd0099ba..f968144286bd4 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -225,9 +225,8 @@ def test_round_dst_border_nonexistent(self, method, ts_str, freq):
],
)
def test_round_int64(self, timestamp, freq):
- """check that all rounding modes are accurate to int64 precision
- see GH#22591
- """
+ # check that all rounding modes are accurate to int64 precision
+ # see GH#22591
dt = Timestamp(timestamp)
unit = to_offset(freq).nanos