From b289d1a1762c8a8a982e2ef659520fa864410d3e Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 12:59:32 -0800 Subject: [PATCH 1/9] TST: un-xfail pyarrow verbose tests --- pandas/tests/io/parser/common/test_verbose.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py index bcfb9cd4032ad..14deba8b40b22 100644 --- a/pandas/tests/io/parser/common/test_verbose.py +++ b/pandas/tests/io/parser/common/test_verbose.py @@ -6,10 +6,7 @@ import pytest -xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") - -@xfail_pyarrow # ValueError: The 'verbose' option is not supported def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d @@ -22,6 +19,12 @@ def test_verbose_read(all_parsers, capsys): one,1,2,3 two,1,2,3""" + if parser.engine == "pyarrow": + msg = "The 'verbose' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), verbose=True) + return + # Engines are verbose in different ways. parser.read_csv(StringIO(data), verbose=True) captured = capsys.readouterr() @@ -33,7 +36,6 @@ def test_verbose_read(all_parsers, capsys): assert captured.out == "Filled 3 NA values in column a\n" -@xfail_pyarrow # ValueError: The 'verbose' option is not supported def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d @@ -46,6 +48,12 @@ def test_verbose_read2(all_parsers, capsys): seven,1,2,3 eight,1,2,3""" + if parser.engine == "pyarrow": + msg = "The 'verbose' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), verbose=True, index_col=0) + return + parser.read_csv(StringIO(data), verbose=True, index_col=0) captured = capsys.readouterr() From 18848b55218e15dc2b75d0dac2c394af8ffe0eb4 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:05:42 -0800 Subject: [PATCH 2/9] un-xfail pyarrow tests --- .../io/parser/common/test_read_errors.py | 40 ++++++++++++++----- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py index 52ddb38192a6b..7e841ed8b4ebd 100644 --- a/pandas/tests/io/parser/common/test_read_errors.py +++ b/pandas/tests/io/parser/common/test_read_errors.py @@ -63,7 +63,6 @@ def test_bad_stream_exception(all_parsers, csv_dir_path): parser.read_csv(stream) -@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_malformed(all_parsers): # see gh-6607 parser = all_parsers @@ -74,11 +73,14 @@ def test_malformed(all_parsers): 2,3,4 """ msg = "Expected 3 fields in line 4, saw 5" - with pytest.raises(ParserError, match=msg): + err = ParserError + if parser.engine == "pyarrow": + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + err = ValueError + with pytest.raises(err, match=msg): parser.read_csv(StringIO(data), header=1, comment="#") -@xfail_pyarrow # ValueError: The 'iterator' option is not supported @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore @@ -90,6 +92,20 @@ def test_malformed_chunks(all_parsers, nrows): 2,3,4 """ parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + header=1, + comment="#", + iterator=True, + chunksize=1, + skiprows=[2], + ) + return + msg = "Expected 3 fields in line 6, saw 5" with parser.read_csv( StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2] @@ -239,19 +255,21 @@ def test_null_byte_char(request, all_parsers): parser.read_csv(StringIO(data), names=names) -# ValueError: the 'pyarrow' engine does not support sep=None with delim_whitespace=False -@xfail_pyarrow @pytest.mark.filterwarnings("always::ResourceWarning") def test_open_file(request, all_parsers): # GH 39024 parser = all_parsers + + msg = "Could not determine delimiter" + err = csv.Error if parser.engine == "c": - request.applymarker( - pytest.mark.xfail( - reason=f"{parser.engine} engine does not support sep=None " - f"with delim_whitespace=False" - ) + msg = "the 'c' engine does not support sep=None with delim_whitespace=False" + err = ValueError + elif parser.engine == "pyarrow": + msg = ( + "the 'pyarrow' engine does not support sep=None with delim_whitespace=False" ) + err = ValueError with tm.ensure_clean() as path: file = Path(path) @@ -259,7 +277,7 @@ def test_open_file(request, all_parsers): with tm.assert_produces_warning(None): # should not trigger a ResourceWarning - with pytest.raises(csv.Error, match="Could not determine delimiter"): + with pytest.raises(err, match=msg): parser.read_csv(file, sep=None, encoding_errors="replace") From 28574e49a8724d09797073e8e22fd42c10ceebb6 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:10:33 -0800 Subject: [PATCH 3/9] de-xfail pyarrow tests --- .../tests/io/parser/common/test_iterator.py | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/pandas/tests/io/parser/common/test_iterator.py b/pandas/tests/io/parser/common/test_iterator.py index 26619857bd231..a521c84aa007d 100644 --- a/pandas/tests/io/parser/common/test_iterator.py +++ b/pandas/tests/io/parser/common/test_iterator.py @@ -15,10 +15,8 @@ pytestmark = pytest.mark.filterwarnings( "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -@xfail_pyarrow # ValueError: The 'iterator' option is not supported def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D @@ -33,6 +31,13 @@ def test_iterator(all_parsers): kwargs = {"index_col": 0} expected = parser.read_csv(StringIO(data), **kwargs) + + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), iterator=True, **kwargs) + return + with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader: first_chunk = reader.read(3) tm.assert_frame_equal(first_chunk, expected[:3]) @@ -41,7 +46,6 @@ def test_iterator(all_parsers): tm.assert_frame_equal(last_chunk, expected[3:]) -@xfail_pyarrow # ValueError: The 'iterator' option is not supported def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C @@ -50,6 +54,12 @@ def test_iterator2(all_parsers): baz,7,8,9 """ + if parser.engine == "pyarrow": + msg = "The 'iterator' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), iterator=True) + return + with parser.read_csv(StringIO(data), iterator=True) as reader: result = list(reader) @@ -61,7 +71,6 @@ def test_iterator2(all_parsers): tm.assert_frame_equal(result[0], expected) -@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers @@ -70,6 +79,11 @@ def test_iterator_stop_on_chunksize(all_parsers): bar,4,5,6 baz,7,8,9 """ + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), chunksize=1) + return with parser.read_csv(StringIO(data), chunksize=1) as reader: result = list(reader) @@ -83,7 +97,6 @@ def test_iterator_stop_on_chunksize(all_parsers): tm.assert_frame_equal(concat(result), expected) -@xfail_pyarrow # AssertionError: Regex pattern did not match @pytest.mark.parametrize( "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}] ) @@ -92,6 +105,12 @@ def test_iterator_skipfooter_errors(all_parsers, kwargs): parser = all_parsers data = "a\n1\n2" + if parser.engine == "pyarrow": + msg = ( + "The '(chunksize|iterator)' option is not supported with the " + "'pyarrow' engine" + ) + with pytest.raises(ValueError, match=msg): with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _: pass From 236c9cdfecf3956d7ebf4b9b880f579847a1e02d Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:34:46 -0800 Subject: [PATCH 4/9] de-xfail pyarrow tests --- .../io/parser/common/test_file_buffer_url.py | 46 +++++++++++++++---- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py index 5d5814e880f8b..7fd86e956b543 100644 --- a/pandas/tests/io/parser/common/test_file_buffer_url.py +++ b/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -214,8 +214,14 @@ def test_eof_states(all_parsers, data, kwargs, expected, msg, request): # see gh-10728, gh-10548 parser = all_parsers + if parser.engine == "pyarrow" and "comment" in kwargs: + msg = "The 'comment' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), **kwargs) + return + if parser.engine == "pyarrow" and "\r" not in data: - mark = pytest.mark.xfail(reason="The 'comment' option is not supported") + mark = pytest.mark.xfail(reason="Mismatched exception type/message") request.applymarker(mark) if expected is None: @@ -356,7 +362,6 @@ def test_read_csv_file_handle(all_parsers, io_class, encoding): assert not handle.closed -@xfail_pyarrow # ValueError: The 'memory_map' option is not supported def test_memory_map_compression(all_parsers, compression): """ Support memory map for compressed files. @@ -369,19 +374,32 @@ def test_memory_map_compression(all_parsers, compression): with tm.ensure_clean() as path: expected.to_csv(path, index=False, compression=compression) - tm.assert_frame_equal( - parser.read_csv(path, memory_map=True, compression=compression), - expected, - ) + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, memory_map=True, compression=compression) + return + + result = parser.read_csv(path, memory_map=True, compression=compression) + + tm.assert_frame_equal( + result, + expected, + ) -@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_context_manager(all_parsers, datapath): # make sure that opened files are closed parser = all_parsers path = datapath("io", "data", "csv", "iris.csv") + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, chunksize=1) + return + reader = parser.read_csv(path, chunksize=1) assert not reader.handles.handle.closed try: @@ -392,12 +410,17 @@ def test_context_manager(all_parsers, datapath): assert reader.handles.handle.closed -@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_context_manageri_user_provided(all_parsers, datapath): # make sure that user-provided handles are not closed parser = all_parsers with open(datapath("io", "data", "csv", "iris.csv"), encoding="utf-8") as path: + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(path, chunksize=1) + return + reader = parser.read_csv(path, chunksize=1) assert not reader.handles.handle.closed try: @@ -417,7 +440,6 @@ def test_file_descriptor_leak(all_parsers, using_copy_on_write): parser.read_csv(path) -@xfail_pyarrow # ValueError: The 'memory_map' option is not supported def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers @@ -426,5 +448,11 @@ def test_memory_map(all_parsers, csv_dir_path): {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]} ) + if parser.engine == "pyarrow": + msg = "The 'memory_map' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(mmap_file, memory_map=True) + return + result = parser.read_csv(mmap_file, memory_map=True) tm.assert_frame_equal(result, expected) From b2bd5c9f6031f5d4e67074b91b67fa670b88d3de Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:45:51 -0800 Subject: [PATCH 5/9] de-xfail pyarrow tests --- pandas/io/parsers/arrow_parser_wrapper.py | 9 +++++++- .../io/parser/dtypes/test_dtypes_basic.py | 23 ++++++++++++------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/pandas/io/parsers/arrow_parser_wrapper.py b/pandas/io/parsers/arrow_parser_wrapper.py index 35965c90ee7fb..a1d69deb6a21e 100644 --- a/pandas/io/parsers/arrow_parser_wrapper.py +++ b/pandas/io/parsers/arrow_parser_wrapper.py @@ -13,6 +13,7 @@ ) from pandas.util._exceptions import find_stack_level +from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.inference import is_integer import pandas as pd @@ -203,7 +204,13 @@ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame: # Ignore non-existent columns from dtype mapping # like other parsers do if isinstance(self.dtype, dict): - self.dtype = {k: v for k, v in self.dtype.items() if k in frame.columns} + self.dtype = { + k: pandas_dtype(v) + for k, v in self.dtype.items() + if k in frame.columns + } + else: + self.dtype = pandas_dtype(self.dtype) try: frame = frame.astype(self.dtype) except TypeError as e: diff --git a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py index 3f3d340ab2e08..32b4b1dedc3cb 100644 --- a/pandas/tests/io/parser/dtypes/test_dtypes_basic.py +++ b/pandas/tests/io/parser/dtypes/test_dtypes_basic.py @@ -73,7 +73,6 @@ def test_dtype_per_column(all_parsers): tm.assert_frame_equal(result, expected) -@pytest.mark.usefixtures("pyarrow_xfail") def test_invalid_dtype_per_column(all_parsers): parser = all_parsers data = """\ @@ -87,7 +86,6 @@ def test_invalid_dtype_per_column(all_parsers): parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"}) -@pytest.mark.usefixtures("pyarrow_xfail") def test_raise_on_passed_int_dtype_with_nas(all_parsers): # see gh-2631 parser = all_parsers @@ -96,22 +94,31 @@ def test_raise_on_passed_int_dtype_with_nas(all_parsers): 2001,,11 2001,106380451,67""" - msg = ( - "Integer column has NA values" - if parser.engine == "c" - else "Unable to convert column DOY" - ) + if parser.engine == "c": + msg = "Integer column has NA values" + elif parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + else: + msg = "Unable to convert column DOY" + with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True) -@pytest.mark.usefixtures("pyarrow_xfail") def test_dtype_with_converters(all_parsers): parser = all_parsers data = """a,b 1.1,2.2 1.2,2.3""" + if parser.engine == "pyarrow": + msg = "The 'converters' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)} + ) + return + # Dtype spec ignored if converted specified. result = parser.read_csv_check_warnings( ParserWarning, From fa730e668e084752b4f37eb63ee7a4183b01a717 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:47:10 -0800 Subject: [PATCH 6/9] de-xfail pyarrow tests --- .../tests/io/parser/dtypes/test_categorical.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/pandas/tests/io/parser/dtypes/test_categorical.py b/pandas/tests/io/parser/dtypes/test_categorical.py index c7586bd9334ef..b1b35447b60c2 100644 --- a/pandas/tests/io/parser/dtypes/test_categorical.py +++ b/pandas/tests/io/parser/dtypes/test_categorical.py @@ -146,8 +146,6 @@ def test_categorical_dtype_utf16(all_parsers, csv_dir_path): tm.assert_frame_equal(actual, expected) -# ValueError: The 'chunksize' option is not supported with the 'pyarrow' engine -@xfail_pyarrow def test_categorical_dtype_chunksize_infer_categories(all_parsers): # see gh-10153 parser = all_parsers @@ -160,6 +158,13 @@ def test_categorical_dtype_chunksize_infer_categories(all_parsers): DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}), DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]), ] + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2) + return + with parser.read_csv( StringIO(data), dtype={"b": "category"}, chunksize=2 ) as actuals: @@ -167,8 +172,6 @@ def test_categorical_dtype_chunksize_infer_categories(all_parsers): tm.assert_frame_equal(actual, expected) -# ValueError: The 'chunksize' option is not supported with the 'pyarrow' engine -@xfail_pyarrow def test_categorical_dtype_chunksize_explicit_categories(all_parsers): # see gh-10153 parser = all_parsers @@ -186,6 +189,13 @@ def test_categorical_dtype_chunksize_explicit_categories(all_parsers): ), ] dtype = CategoricalDtype(cats) + + if parser.engine == "pyarrow": + msg = "The 'chunksize' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) + return + with parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) as actuals: for actual, expected in zip(actuals, expecteds): tm.assert_frame_equal(actual, expected) From 9ca188608959e2c17342b2af868a99664c24a2a0 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:50:16 -0800 Subject: [PATCH 7/9] de-xfail pyarrow tests --- pandas/tests/io/parser/common/test_ints.py | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/pandas/tests/io/parser/common/test_ints.py b/pandas/tests/io/parser/common/test_ints.py index 086b43be59823..41bfbb55d818f 100644 --- a/pandas/tests/io/parser/common/test_ints.py +++ b/pandas/tests/io/parser/common/test_ints.py @@ -126,10 +126,8 @@ def test_int64_min_issues(all_parsers): tm.assert_frame_equal(result, expected) -# ValueError: The 'converters' option is not supported with the 'pyarrow' engine -@xfail_pyarrow @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) -def test_int64_overflow(all_parsers, conv): +def test_int64_overflow(all_parsers, conv, request): data = """ID 00013007854817840016671868 00013007854817840016749251 @@ -143,6 +141,10 @@ def test_int64_overflow(all_parsers, conv): if conv is None: # 13007854817840016671868 > UINT64_MAX, so this # will overflow and return object as the dtype. + if parser.engine == "pyarrow": + mark = pytest.mark.xfail(reason="parses to float64") + request.applymarker(mark) + result = parser.read_csv(StringIO(data)) expected = DataFrame( [ @@ -161,13 +163,19 @@ def test_int64_overflow(all_parsers, conv): # 13007854817840016671868 > UINT64_MAX, so attempts # to cast to either int64 or uint64 will result in # an OverflowError being raised. - msg = ( - "(Python int too large to convert to C long)|" - "(long too big to convert)|" - "(int too big to convert)" + msg = "|".join( + [ + "Python int too large to convert to C long", + "long too big to convert", + "int too big to convert", + ] ) + err = OverflowError + if parser.engine == "pyarrow": + err = ValueError + msg = "The 'converters' option is not supported with the 'pyarrow' engine" - with pytest.raises(OverflowError, match=msg): + with pytest.raises(err, match=msg): parser.read_csv(StringIO(data), converters={"ID": conv}) From 67d1be989d1d3c0e8ccb8ac1a86f31546ccc53c9 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 13:56:39 -0800 Subject: [PATCH 8/9] de-xfail pyarrow test --- pandas/tests/io/parser/common/test_decimal.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pandas/tests/io/parser/common/test_decimal.py b/pandas/tests/io/parser/common/test_decimal.py index b8a68c138eeff..4ceca037f589a 100644 --- a/pandas/tests/io/parser/common/test_decimal.py +++ b/pandas/tests/io/parser/common/test_decimal.py @@ -13,10 +13,7 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") - -@xfail_pyarrow @pytest.mark.parametrize( "data,thousands,decimal", [ @@ -42,6 +39,14 @@ def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal): parser = all_parsers expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) + if parser.engine == "pyarrow": + msg = "The 'thousands' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), sep="|", thousands=thousands, decimal=decimal + ) + return + result = parser.read_csv( StringIO(data), sep="|", thousands=thousands, decimal=decimal ) From ab18119db470d1fef08f42d59b38c080ce3ffbf3 Mon Sep 17 00:00:00 2001 From: Brock Date: Fri, 10 Nov 2023 14:07:36 -0800 Subject: [PATCH 9/9] De-xfail pyarrow tests --- .../io/parser/common/test_common_basic.py | 72 ++++++++++++++++--- 1 file changed, 63 insertions(+), 9 deletions(-) diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 3abbd14c20e16..a2ffec45cfc7f 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -119,7 +119,6 @@ def test_read_csv_local(all_parsers, csv1): tm.assert_frame_equal(result, expected) -@xfail_pyarrow def test_1000_sep(all_parsers): parser = all_parsers data = """A|B|C @@ -128,6 +127,12 @@ def test_1000_sep(all_parsers): """ expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]}) + if parser.engine == "pyarrow": + msg = "The 'thousands' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep="|", thousands=",") + return + result = parser.read_csv(StringIO(data), sep="|", thousands=",") tm.assert_frame_equal(result, expected) @@ -161,7 +166,6 @@ def test_csv_mixed_type(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow def test_read_csv_low_memory_no_rows_with_index(all_parsers): # see gh-21141 parser = all_parsers @@ -174,6 +178,13 @@ def test_read_csv_low_memory_no_rows_with_index(all_parsers): 2,2,3,4 3,3,4,5 """ + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) + return + result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0) expected = DataFrame(columns=["A", "B", "C"]) tm.assert_frame_equal(result, expected) @@ -212,7 +223,6 @@ def test_read_csv_dataframe(all_parsers, csv1): tm.assert_frame_equal(result, expected) -@xfail_pyarrow @pytest.mark.parametrize("nrows", [3, 3.0]) def test_read_nrows(all_parsers, nrows): # see gh-10476 @@ -230,11 +240,16 @@ def test_read_nrows(all_parsers, nrows): ) parser = all_parsers + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), nrows=nrows) + return + result = parser.read_csv(StringIO(data), nrows=nrows) tm.assert_frame_equal(result, expected) -@xfail_pyarrow @pytest.mark.parametrize("nrows", [1.2, "foo", -1]) def test_read_nrows_bad(all_parsers, nrows): data = """index,A,B,C,D @@ -247,6 +262,8 @@ def test_read_nrows_bad(all_parsers, nrows): """ msg = r"'nrows' must be an integer >=0" parser = all_parsers + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), nrows=nrows) @@ -277,7 +294,6 @@ def test_missing_trailing_delimiters(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow def test_skip_initial_space(all_parsers): data = ( '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, ' @@ -289,6 +305,18 @@ def test_skip_initial_space(all_parsers): ) parser = all_parsers + if parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), + names=list(range(33)), + header=None, + na_values=["-9999.0"], + skipinitialspace=True, + ) + return + result = parser.read_csv( StringIO(data), names=list(range(33)), @@ -437,7 +465,6 @@ def test_read_empty_with_usecols(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) -@xfail_pyarrow @pytest.mark.parametrize( "kwargs,expected", [ @@ -467,6 +494,12 @@ def test_trailing_spaces(all_parsers, kwargs, expected): data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501 parser = all_parsers + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) + return + result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs) tm.assert_frame_equal(result, expected) @@ -488,7 +521,6 @@ def test_read_filepath_or_buffer(all_parsers): parser.read_csv(filepath_or_buffer=b"input") -@xfail_pyarrow @pytest.mark.parametrize("delim_whitespace", [True, False]) def test_single_char_leading_whitespace(all_parsers, delim_whitespace): # see gh-9710 @@ -501,6 +533,15 @@ def test_single_char_leading_whitespace(all_parsers, delim_whitespace): b\n""" expected = DataFrame({"MyColumn": list("abab")}) + + if parser.engine == "pyarrow": + msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace + ) + return + result = parser.read_csv( StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace ) @@ -688,7 +729,6 @@ def test_first_row_bom_unquoted(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow @pytest.mark.parametrize("nrows", range(1, 6)) def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): # GH 28071 @@ -698,6 +738,15 @@ def test_blank_lines_between_header_and_data_rows(all_parsers, nrows): ) csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4" parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'nrows' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False + ) + return + df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False) tm.assert_frame_equal(df, ref[:nrows]) @@ -731,11 +780,16 @@ def test_read_csv_names_not_accepting_sets(all_parsers): parser.read_csv(StringIO(data), names=set("QAZ")) -@xfail_pyarrow def test_read_table_delim_whitespace_default_sep(all_parsers): # GH: 35958 f = StringIO("a b c\n1 -2 -3\n4 5 6") parser = all_parsers + + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_table(f, delim_whitespace=True) + return result = parser.read_table(f, delim_whitespace=True) expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]}) tm.assert_frame_equal(result, expected)