diff --git a/.gitignore b/.gitignore index 4388c376..b3526c7c 100644 --- a/.gitignore +++ b/.gitignore @@ -31,11 +31,14 @@ bld/ [Ll]og/ [Ll]ogs/ +# JetBrains +.idea + # Visual Studio 2015/2017 cache/options directory .vs/ # Uncomment if you have tasks that create the project's static files in wwwroot #wwwroot/ -!.vscode/settings.json +.vscode # Visual Studio 2017 auto generated files Generated\ Files/ @@ -351,4 +354,9 @@ MigrationBackup/ .ionide/ # Python virtual environment -.venv/ +.venv + +# Build files from utils +utils/stubsplit/.eggs +utils/stubsplit/build +utils/stubsplit/stubsplit.egg-info diff --git a/doc/pandas/core/frame.pyi.ds b/doc/pandas/core/frame.pyi.ds index 6708e13d..17dab8e6 100644 --- a/doc/pandas/core/frame.pyi.ds +++ b/doc/pandas/core/frame.pyi.ds @@ -1286,7 +1286,8 @@ sorted_obj : DataFrame or None group_keys: _bool = ..., squeeze: _bool = ..., observed: _bool = ..., - ) -> DataFrameGroupBy: + dropna: _bool = ..., + ) -> DataFrameGroupBy: ... """Group DataFrame using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the diff --git a/doc/pandas/core/series.pyi.ds b/doc/pandas/core/series.pyi.ds index 138b5d74..9cf1ec45 100644 --- a/doc/pandas/core/series.pyi.ds +++ b/doc/pandas/core/series.pyi.ds @@ -80,7 +80,7 @@ starting from the end of the object, just like with Python lists. 3 lion mammal 80.5 """ pass - def to_markdown(self, buf: Optional[FilePathOrBuffer], mode: Optional[_str] = ..., **kwargs) -> None: + def to_markdown(self, buf: Optional[FilePathOrBuffer], mode: Optional[_str] = ..., index: _bool = ..., storage_options: Optional[dict] = ..., **kwargs) -> None: """Print Series in Markdown-friendly format. .. versionadded:: 1.0.0 @@ -150,7 +150,8 @@ Index : 2, Value : C group_keys: _bool = ..., squeeze: _bool = ..., observed: _bool = ..., - ) -> SeriesGroupBy: + dropna: _bool = ... + ) -> SeriesGroupBy: ... """Group Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the @@ -550,7 +551,7 @@ Returns pass def fillna( self, - value: Union[S1, Dict, Series[S1], DataFrame], + value: Optional[Scalar|Dict|Series[S1]|DataFrame] = ..., method: Optional[Union[_str, Literal["backfill", "bfill", "pad", "ffill"]]] = ..., axis: SeriesAxisType = ..., limit: Optional[int] = ..., diff --git a/pandas/_libs/tslibs/timedeltas.pyi b/pandas/_libs/tslibs/timedeltas.pyi index 5b66ec60..bafe9eea 100644 --- a/pandas/_libs/tslibs/timedeltas.pyi +++ b/pandas/_libs/tslibs/timedeltas.pyi @@ -1,10 +1,11 @@ from __future__ import annotations +from datetime import timedelta from typing import Any, Union, Tuple, Type, Optional, Sequence import numpy as np from pandas._typing import Dtype -class _Timedelta: +class _Timedelta(timedelta): def __hash__(self) -> int: ... def __richcmp__(self, other, op: int) -> Any: ... def to_timedelta64(self) -> np.timedelta64: ... diff --git a/pandas/_libs/tslibs/timestamps.pyi b/pandas/_libs/tslibs/timestamps.pyi index c9247b0d..9f9155e6 100644 --- a/pandas/_libs/tslibs/timestamps.pyi +++ b/pandas/_libs/tslibs/timestamps.pyi @@ -1,4 +1,5 @@ from __future__ import annotations +from datetime import datetime, timestamp import sys from typing import Any, Optional, Union @@ -7,7 +8,7 @@ if sys.version_info >= (3, 8): else: from typing_extensions import Literal -class Timestamp(datetime.datetime): +class Timestamp(datetime): def __init__( self, ts_input: Any, @@ -35,7 +36,7 @@ class Tick(SingleConstructorOffset): def delta(self) -> int: ... @property def nanos(self) -> int: ... - def is_on_offset(self, dt: datetime.datetime) -> bool: ... + def is_on_offset(self, dt: datetime) -> bool: ... def is_anchored(self) -> bool: ... def __eq__(self, other) -> bool: ... def __ne__(self, other) -> bool: ... @@ -49,7 +50,7 @@ class Tick(SingleConstructorOffset): def apply(self, other) -> Any: ... def __setstate__(self, state: Mapping) -> None: ... -class Timestamp: +class Timestamp(timestamp): @staticmethod def combine(date, time) -> Timestamp: ... @staticmethod @@ -108,7 +109,7 @@ class Timestamp: def to_datetime64(self) -> _np.datetime64: ... def to_numpy(self) -> _np.datetime64: ... def to_period(self, freq: Optional[str] = ...) -> Any: ... - def to_pydatetime(self) -> datetime.datetime: ... + def to_pydatetime(self, warn: bool = ...) -> datetime: ... def toordinal(self) -> Any: ... def tz_convert(self, tz: Any) -> Timestamp: ... def tz_localize(self, tz: Any, ambiguous: Any = ..., nonexistent: Any = ...) -> Timestamp: ... diff --git a/pandas/_testing.pyi b/pandas/_testing.pyi index a7938723..57f463a3 100644 --- a/pandas/_testing.pyi +++ b/pandas/_testing.pyi @@ -48,7 +48,23 @@ def assert_numpy_array_equal(left, right, strict_nan: bool = ..., check_dtype: b def assert_extension_array_equal( left, right, check_dtype: bool = ..., check_less_precise: bool = ..., check_exact: bool = ..., ) -> None: ... -def assert_series_equal(left: Series, right: Series) -> None: ... +def assert_series_equal(left: Series, right: Series, + check_dtype: bool = ..., + check_index_type: bool|str = ..., + check_series_type: bool = ..., + check_less_precise: bool|int = ..., + check_names : bool = ..., + check_exact: bool = ..., + check_datetimelike_compat: bool = ..., + check_categorical: bool = ..., + check_category_order: bool = ..., + check_freq: bool = ..., + check_flags: bool = ..., + rtol: float = ..., + atol: float = ..., + obj: str = ..., + *, + check_index: bool = ...) -> None: ... def assert_frame_equal(left: DataFrame, right: DataFrame, check_like: Optional[bool] = ...) -> None: ... def assert_equal(left, right, **kwargs) -> None: ... def box_expected(expected, box_cls, transpose: bool = ...): ... diff --git a/pandas/core/reshape/merge.pyi b/pandas/core/reshape/merge.pyi index 69eca779..c355cdc7 100644 --- a/pandas/core/reshape/merge.pyi +++ b/pandas/core/reshape/merge.pyi @@ -3,8 +3,8 @@ from pandas import DataFrame as DataFrame, Series as Series from pandas._typing import Label from typing import Optional, Sequence, Union -def merge(left: Union[DataFrame, Series], - right: Union[DataFrame, Series], +def merge(left: DataFrame, + right: DataFrame|Series, how: str = ..., on: Optional[Union[Label, Sequence]] = ..., left_on: Optional[Union[Label, Sequence]] = ..., diff --git a/pandas/core/series.pyi b/pandas/core/series.pyi index c0630bc5..bd5ab658 100644 --- a/pandas/core/series.pyi +++ b/pandas/core/series.pyi @@ -16,7 +16,7 @@ from pandas.core.window.rolling import Rolling, Window from pandas._typing import ArrayLike as ArrayLike, AxisType as AxisType, Dtype as Dtype, DtypeNp as DtypeNp, \ FilePathOrBuffer as FilePathOrBuffer, Level as Level, MaskType as MaskType, S1 as S1, Scalar as Scalar, \ SeriesAxisType as SeriesAxisType, num as num, Label -from typing import Any, Callable, Dict, Generic, Hashable, Iterable, List, Optional, Sequence, Tuple, Type, Union, overload +from typing import Any, Callable, Dict, Generic, Hashable, Iterable, List, Mapping, Optional, Sequence, Tuple, Type, Union, overload if sys.version_info >= (3, 8): from typing import Literal else: @@ -162,9 +162,9 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): encoding: Optional[_str] = ..., ) -> _str: ... @overload - def to_markdown(self, buf: Optional[FilePathOrBuffer], mode: Optional[_str] = ..., **kwargs) -> None: ... + def to_markdown(self, buf: Optional[FilePathOrBuffer], mode: Optional[_str] = ..., index: _bool = ..., storage_options: Optional[dict] = ..., **kwargs) -> None: ... @overload - def to_markdown(self, mode: Optional[_str] = ...,) -> _str: ... + def to_markdown(self, mode: Optional[_str] = ..., index: _bool = ..., storage_options: Optional[dict] = ...) -> _str: ... def items(self) -> Iterable[Tuple[Union[int, _str], S1]]: ... def iteritems(self) -> Iterable[Tuple[Label, S1]]: ... def keys(self) -> List: ... @@ -180,6 +180,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): group_keys: _bool = ..., squeeze: _bool = ..., observed: _bool = ..., + dropna: _bool = ... ) -> SeriesGroupBy: ... @overload def count(self, level: None = ...) -> int: ... @@ -203,11 +204,13 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): def corr( self, other: Series[S1], method: Literal["pearson", "kendall", "spearman"] = ..., min_periods: int = ..., ) -> float: ... - def cov(self, other: Series[S1], min_periods: Optional[int] = ...) -> float: ... + def cov(self, other: Series[S1], min_periods: Optional[int] = ..., ddof: int = ...) -> float: ... def diff(self, periods: int = ...) -> Series[S1]: ... def autocorr(self, lag: int = ...) -> float: ... @overload - def dot(self, other: Union[DataFrame, Series[S1]]) -> Series[S1]: ... + def dot(self, other: Series[S1]) -> Scalar: ... + @overload + def dot(self, other: DataFrame) -> Series[S1]: ... @overload def dot(self, other: _ListLike) -> np.ndarray: ... def __matmul__(self, other): ... @@ -230,7 +233,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): self, other: Series[S1], func: Callable, fill_value: Optional[Scalar] = ... ) -> Series[S1]: ... def combine_first(self, other: Series[S1]) -> Series[S1]: ... - def update(self, other: Series[S1]) -> None: ... + def update(self, other: Series[S1]|Sequence[S1]|Mapping[int, S1]) -> None: ... def sort_values( self, axis: SeriesAxisType = ..., @@ -297,6 +300,18 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): fill_axis: SeriesAxisType = ..., broadcast_axis: Optional[SeriesAxisType] = ..., ) -> Tuple[Series, Series]: ... + @overload + def rename( + self, + index = ..., + *, + inplace: Literal[True], + axis: Optional[SeriesAxisType] = ..., + copy: _bool = ..., + level: Optional[Level] = ..., + errors: _str|Literal["raise", "ignore"] = ... + ) -> None: ... + @overload def rename( self, index = ..., @@ -305,22 +320,35 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): copy: _bool = ..., inplace: _bool = ..., level: Optional[Level] = ..., - errors: Union[_str, Literal["raise", "ignore"]] = ... + errors: _str|Literal["raise", "ignore"] = ... ) -> Series: ... def reindex_like( self, other: Series[S1], - method: Optional[Union[_str, Literal["backfill", "bfill", "pad", "ffill", "nearest"]]] = ..., + method: Optional[_str|Literal["backfill", "bfill", "pad", "ffill", "nearest"]] = ..., copy: _bool = ..., limit: Optional[int] = ..., tolerance: Optional[float] = ..., ) -> Series: ... + @overload def drop( self, - labels: Optional[Union[_str, List]] = ..., + labels: Optional[_str|int|List] = ..., axis: SeriesAxisType = ..., - index: Optional[Union[List[_str], List[int], Index]] = ..., - columns: Optional[Union[_str, List]] = ..., + index: Optional[List[_str]|List[int]|Index] = ..., + columns: Optional[_str|List] = ..., + level: Optional[Level] = ..., + errors: Literal["ignore", "raise"] = ..., + *, + inplace: Literal[True] + ) -> None: ... + @overload + def drop( + self, + labels: Optional[_str|int|List] = ..., + axis: SeriesAxisType = ..., + index: Optional[List[_str]|List[int]|Index] = ..., + columns: Optional[_str|List] = ..., level: Optional[Level] = ..., inplace: _bool = ..., errors: Literal["ignore", "raise"] = ..., @@ -328,7 +356,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def fillna( self, - value: Union[Scalar, Dict, Series[S1], DataFrame], + value: Optional[Scalar|Dict|Series[S1]|DataFrame] = ..., method: Optional[Union[_str, Literal["backfill", "bfill", "pad", "ffill"]]] = ..., axis: SeriesAxisType = ..., limit: Optional[int] = ..., @@ -339,7 +367,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def fillna( self, - value: Union[Scalar, Dict, Series[S1], DataFrame], + value: Optional[Scalar|Dict|Series[S1]|DataFrame] = ..., method: Optional[Union[_str, Literal["backfill", "bfill", "pad", "ffill"]]] = ..., axis: SeriesAxisType = ..., *, @@ -349,7 +377,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def fillna( self, - value: Union[Scalar, Dict, Series[S1], DataFrame], + value: Optional[Scalar|Dict|Series[S1]|DataFrame] = ..., method: Optional[Union[_str, Literal["backfill", "bfill", "pad", "ffill"]]] = ..., axis: SeriesAxisType = ..., inplace: _bool = ..., @@ -381,6 +409,11 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): def isnull(self) -> Series[_bool]: ... def notna(self) -> Series[_bool]: ... def notnull(self) -> Series[_bool]: ... + @overload + def dropna( + self, axis: SeriesAxisType = ..., how: Optional[_str] = ..., *, inplace: Literal[True] + ) -> None: ... + @overload def dropna( self, axis: SeriesAxisType = ..., inplace: _bool = ..., how: Optional[_str] = ..., ) -> Series[S1]: ... @@ -453,33 +486,27 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def ffill( self, - value: Union[S1, Dict, Series[S1], DataFrame], - axis: SeriesAxisType, - inplace: Literal[True], + value: Union[S1, Dict, Series[S1], DataFrame] = ..., + axis: SeriesAxisType = ..., limit: Optional[int] = ..., downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... - @overload - def ffill( - self, - value: Union[S1, Dict, Series[S1], DataFrame], + *, inplace: Literal[True], - limit: Optional[int] = ..., - downcast: Optional[Dict] = ..., ) -> None: ... @overload def ffill( self, - value: Union[S1, Dict, Series[S1], DataFrame], + value: Union[S1, Dict, Series[S1], DataFrame] = ..., axis: SeriesAxisType = ..., - *, limit: Optional[int] = ..., downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... + *, + inplace: Literal[False], + ) -> Series[S1]: ... @overload def ffill( self, - value: Union[S1, Dict, Series[S1], DataFrame], + value: Union[S1, Dict, Series[S1], DataFrame] = ..., axis: SeriesAxisType = ..., inplace: _bool = ..., limit: Optional[int] = ..., @@ -488,7 +515,7 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def bfill( self, - value: Union[S1, Dict, Series[S1], DataFrame], + value: Union[S1, Dict, Series[S1], DataFrame] = ..., axis: SeriesAxisType = ..., limit: Optional[int] = ..., downcast: Optional[Dict] = ..., @@ -498,12 +525,13 @@ class Series(IndexOpsMixin, NDFrame, Generic[S1]): @overload def bfill( self, - value: Union[S1, Dict, Series[S1], DataFrame], + value: Union[S1, Dict, Series[S1], DataFrame] = ..., axis: SeriesAxisType = ..., - *, limit: Optional[int] = ..., downcast: Optional[Dict] = ..., - ) -> Series[S1]: ... + *, + inplace: Literal[False] + ) -> Series[S1]: ... @overload def bfill( self, diff --git a/pandas/core/strings.pyi b/pandas/core/strings.pyi index 9241e2fe..35c66832 100644 --- a/pandas/core/strings.pyi +++ b/pandas/core/strings.pyi @@ -92,4 +92,3 @@ class StringMethods(NoNewAttributesMixin, Generic[RT]): isnumeric = ... isdecimal = ... - diff --git a/pandas/core/tools/datetimes.pyi b/pandas/core/tools/datetimes.pyi index 5c5e59ba..ca25b87b 100644 --- a/pandas/core/tools/datetimes.pyi +++ b/pandas/core/tools/datetimes.pyi @@ -41,7 +41,7 @@ def to_datetime( unit: Optional[str] = ..., infer_datetime_format: bool = ..., origin: Scalar = ..., - cache: bool = ...) -> datetime: ... + cache: bool = ...) -> Timestamp: ... @overload def to_datetime( arg: Union[list, tuple, ArrayLike], diff --git a/pandas/io/parsers.pyi b/pandas/io/parsers.pyi index a0e172a9..a56d708b 100644 --- a/pandas/io/parsers.pyi +++ b/pandas/io/parsers.pyi @@ -12,7 +12,6 @@ else: @overload def read_csv( reader: FilePathOrBuffer, - *, sep: str = ..., delimiter: Optional[str] = ..., header: Union[int, Sequence[int], str, Literal["infer"]] = ..., @@ -42,7 +41,7 @@ def read_csv( date_parser: Optional[Callable] = ..., dayfirst: bool = ..., cache_dates: bool = ..., - iterator: Literal[True], + iterator: bool = ..., chunksize: Optional[int] = ..., compression: Optional[Union[str, Literal["infer", "gzip", "bz2", "zip", "xz"]]] = ..., thousands: Optional[str] = ..., @@ -57,20 +56,19 @@ def read_csv( dialect: Optional[str] = ..., error_bad_lines: bool = ..., warn_bad_lines: bool = ..., - on_bad_lines: Literal["error", "warn", "skip"] = "error", delim_whitespace: bool = ..., low_memory: bool = ..., memory_map: bool = ..., float_precision: Optional[str] = ..., - storage_options: Optional[Dict[str, Any]] = ..., ) -> TextFileReader: ... + @overload def read_csv( filepath: FilePathOrBuffer, *, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, Union[bool, Literal[False]]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -125,7 +123,7 @@ def read_csv( *, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, Union[bool, Literal[False]]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -180,7 +178,7 @@ def read_csv( *, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, Union[bool, Literal[False]]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -234,7 +232,7 @@ def read_csv( filepath: FilePathOrBuffer, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, Union[bool, Literal[False]]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -286,7 +284,6 @@ def read_csv( @overload def read_table( reader: FilePathOrBuffer, - *, sep: str = ..., delimiter: Optional[str] = ..., header: Union[int, Sequence[int], str, Literal["infer"]] = ..., @@ -316,7 +313,7 @@ def read_table( date_parser: Optional[Callable] = ..., dayfirst: bool = ..., cache_dates: bool = ..., - iterator: Literal[True], + iterator: bool = ..., chunksize: Optional[int] = ..., compression: Optional[Union[str, Literal["infer", "gzip", "bz2", "zip", "xz"]]] = ..., thousands: Optional[str] = ..., @@ -342,7 +339,7 @@ def read_table( *, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, bool, Literal[False]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -395,7 +392,7 @@ def read_table( *, sep: str = ..., delimiter: Optional[str] = ..., - header: Union[int, Sequence[int], str, Literal["infer"]] = ..., + header: Optional[int|Sequence[int]|str|Literal["infer"]] = ..., names: Optional[Sequence[str]] = ..., index_col: Optional[Union[int, str, Sequence, bool, Literal[False]]] = ..., usecols: Optional[Union[int, str, Sequence]] = ..., @@ -495,6 +492,7 @@ def read_table( memory_map: bool = ..., float_precision: Optional[str] = ..., ) -> TextFileReader: ... + @overload def read_table( filepath: FilePathOrBuffer, diff --git a/pygame/README.md b/pygame/README.md new file mode 100644 index 00000000..0f0edce4 --- /dev/null +++ b/pygame/README.md @@ -0,0 +1,2 @@ +These stubs are derived from the original PyGame package at https://github.com/pygame/pygame + diff --git a/tenacity/README.md b/tenacity/README.md new file mode 100644 index 00000000..df5de930 --- /dev/null +++ b/tenacity/README.md @@ -0,0 +1,2 @@ +These stubs are derived from the original tenacity package at https://github.com/jd/tenacity + diff --git a/utils/stubsplit/ChangeLog b/utils/stubsplit/ChangeLog new file mode 100644 index 00000000..2e35f5fc --- /dev/null +++ b/utils/stubsplit/ChangeLog @@ -0,0 +1,170 @@ +CHANGES +======= + +* pandas: handle setitem with Timedelta (#120) +* fix NamedAgg aggfunc should be AggScalar (#128) +* pandas: fix Series.between() (#121) +* read\_csv on\_bad\_lines and storage\_options arguments (#119) +* allow \`args\` and \`kwargs\` for pandas DataFrame.apply (#104) +* pandas: add dropna argument to DataFrameGroupBy (#126) +* pandas: fix Timedeltas constructor (#123) +* pandas: add kwargs to to\_feather (#124) +* pandas: fix series and using getitem (#122) +* fix for missing types on NamedAgg (#111) +* fix overloads for read\_csv and read\_table (#109) +* add typing for pandas.isnull(), pandas.isna() (#106) +* fix arguments to pandas.Series.where() (#108) +* for pandas.loc\_\_setitem\_\_, use Sequence and allow boolean series (#107) +* fix return types for pandas str accessors (#105) +* pandas allow tuples to mix types in index in .loc and be of any length (#103) +* fix pandas .loc with slice arguments (#98) +* Allow Series as argument to pandas.to\_datetime (#102) +* pandas Series.to\_dict() can return any key in Dict (#99) +* Resync pandas stubs from pylance (#101) +* allow Series.index to be set (#97) +* pandas: allow tuples to get scalar values in .loc (#96) +* Rename \_BaseExcelReader to BaseExcelReader +* pandas: allow list of strings to index DataFrame (#94) +* pandas: allow loc set with scalar arguments (#93) +* allow boolean series as columns chooser in getitem (#92) +* fix Series.mask argument (#89) +* pandas: fix .loc setitem with a scalar (#90) +* matplotlib Patch.\_\_init\_\_ accepts any arguments (#91) +* Fix annotation for the \`parse\_dates\` argument of the \`pandas.read\_html\` function (#85) +* initial opencv scraped stub (#72) +* adding partial stubs for huggingface transformers due to poor perform… (#86) +* allow Index in setitem on DataFrame (#84) +* pandas: allow scalars as args to series.fillna (#83) +* pandas: fix various arguments for Series.iloc and DataFrame.loc (#82) +* fix return type of Series groupby aggregate (#81) +* pandas: allow str as second item in setitem for loc on DF, allow Index in setitem for loc on Series (#79) +* Update typing for ascending param for DataFrame.sort\_values and DataF… (#80) +* Add type for pandas.\_\_version\_\_ (#78) +* Remove upstreamed stubs (#77) +* Allow a scalar argument for data in Series (#76) +* fix axis arguments for concat (#68) +* loc on DataFrame should accept str (#74) +* Fix a syntax error in the matplotlib stubs (#75) +* Added initial partial sympy-stubs +* loc on just index returns DataFrame (#71) +* setter for index.names (#70) +* add gym-stubs used in pylance to stub repo +* pandas: fix series name argument (#66) +* fix series.astype dtype argument for pandas (#64) +* fix loc to accept Index as arg (#63) +* fix arguments for merge (#61) +* fix drop columns argument (#59) +* fix getitem issue (#56) +* added pywin32 compiled module's stubs for longer name import usage cases (#57) +* Pandas fixes. (#54) +* Update PIL stubs (#53) +* Add matplotlib and PIL stubs from Pylance (#51) +* Pandas (#49) +* push stubs in pylance to python-type-stubs repo +* Fix mapper-based calling convention of DataFrame rename (#41) +* Fix issues with rename\_axis overloads and arguments (microsoft/pylance-release#634), concat return type (microsoft/pylance-release#624), path-like arguments (microsoft/pylance-release#602) and DataFrame.astype (microsoft/python-type-stubs#32) +* Fix pygame AbstractGroup attribute typehints (#34) +* Incorporated changes from Pylance +* Create README.md +* Create README.md +* Changed from copying original package licenses to READMEs that reference original package locations +* Added Microsoft headers as we can't put the (c) line in the license file for PyGame +* Add license and 3rd party notice file +* Add stubsplit utility. Update pandas stubs +* Update README.md +* Create CONTRIBUTING.md +* Update README.md +* Reorg +* Partial +* Partial +* Update README.md +* Updated README +* Partial +* Partial +* Partial +* Fixups after code review by Pygame folks +* Ran black and isort on pygame and fixed a few other errors caught by pyright +* Removed stub files that have been submitted to typeshed and replaced with README.md files +* Take 2 +* PR Feedback changes +* removed an Any +* Remove compat.pyi amd small fixes +* PR feedback changes +* added two more stubs +* First commit of modified stubs. More celanup needed for typshed +* black formatting messes up every alternate save +* Clearn up commas, initializers +* Fixed \_\_init\_\_.py and dangling commas +* Completed Rect.pyi +* Initial pass creating overloads & ensuring types +* initial commit of pygame stubs +* fixed \_\_init\_\_.py to export the classes +* Fixed a couple of issues in deprecated stubs +* Partial +* Fixed a couple of bugs caught by stubtest +* Small clean-up in retry stubs +* Small clean-up within packaging stubs +* Small clean-ups in markdown stubs +* A few more small tweaks to deprecated +* Small clean-up in deprecated stubs +* Fix a few issues with the docs to merge and named them with the proper suffix +* Added a few more typings, and crated a doc directory where we can keep the docstrings to be merged +* More cleanup and fixes +* Create README.md +* Small clean-up pass on cachetools +* Enabled black formatter +* Removed uneeded imports. Removed use of Any where redundant and not explicitly intended. Fixed some more minor issues +* fixed errors found with mypy tests removed sigs that were already typed in the base class +* Added utility scripts. Renamed DType to Dtype. Made the iLocIndexerrFrame synthetic class a subclass of the concrete non-specialized iLocIndexer, and similar for loc indexers and for both of these as specialized for Series +* Many fixes, added lots of missing Cython types, commented out unneeded imports (leaving them around for now though; we can strip all comments later) +* Did a clean-up pass on freezegun (small changes for consistency with other type stubs) +* Small clean-up pass on filelock stub +* Minor tweaks +* Add missing annotations +* Minor corrections +* Address more comments +* Address comments +* Formatting +* Add stubs for freezegun +* Formatting +* Fixes +* Stubs for filelock +* fixed a \_Timer reference +* Deleted multidict stubs. The latest version of multidict ships with inlined stubs +* Did clean-up pass on slugify stubs +* Did clean-up pass on packaging stubs +* Started to clean up mutlidict stubs. More work is needed +* Checked in .vscode/settings.json so everyone uses the same settings when working on stubs +* Did a clean-up pass on markdown stubs +* Did a clean-up pass on deprecated stubs +* Did a clean-up pass on cachetools stubs +* Did a clean-up pass on aiofiles stubs +* Added Tuples Made \_timer to a Callable fixed popitem +* Fixes +* Whoops +* Stubs for retry +* Fixes +* Fixes +* Removed the remaining Anys,; Removed danglings commas Removed generics \_\_iter\_\_() returns \_KT +* Add slugify stubs +* New pandas staubs based off stubgen skeletons, with the old ones I did before merged in +* Drop non-core API +* PR feedback +* added generics and removed almost all Anys +* Add stubs for Extensions public API +* Fix formatting +* Remove overload with kwargs +* Add stubs for core public API +* Add overloads + keyword args +* Add overloads +* Add callable +* Create pyproject.toml +* Stubs for deprecated +* stubs for cachetools +* Added initial stubs for aiofiles package +* Added some packages that had type hints in comments/docstrings +* Initial SECURITY.md commit +* Initial README.md commit +* Initial CODE\_OF\_CONDUCT.md commit +* Initial LICENSE commit +* Initial commit