Skip to content

Commit 48c8038

Browse files
committed
add global FIRST_GCS_FILE_SUFFIX
1 parent b867d24 commit 48c8038

File tree

5 files changed

+26
-20
lines changed

5 files changed

+26
-20
lines changed

tests/system/small/test_dataframe_io.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,11 @@
1919
import pyarrow as pa
2020
import pytest
2121

22-
from tests.system.utils import assert_pandas_df_equal, convert_pandas_dtypes
22+
from tests.system.utils import (
23+
assert_pandas_df_equal,
24+
convert_pandas_dtypes,
25+
FIRST_GCS_FILE_SUFFIX,
26+
)
2327

2428
try:
2529
import pandas_gbq # type: ignore
@@ -149,7 +153,7 @@ def test_to_csv_index(
149153
# read_csv will decode into bytes inproperly, convert_pandas_dtypes will encode properly from string
150154
dtype.pop("bytes_col")
151155
gcs_df = pd.read_csv(
152-
path.replace("*", "000000000000"),
156+
path.replace("*", FIRST_GCS_FILE_SUFFIX),
153157
dtype=dtype,
154158
date_format={"timestamp_col": "YYYY-MM-DD HH:MM:SS Z"},
155159
index_col=index_col,
@@ -187,7 +191,7 @@ def test_to_csv_tabs(
187191
# read_csv will decode into bytes inproperly, convert_pandas_dtypes will encode properly from string
188192
dtype.pop("bytes_col")
189193
gcs_df = pd.read_csv(
190-
path.replace("*", "000000000000"),
194+
path.replace("*", FIRST_GCS_FILE_SUFFIX),
191195
sep="\t",
192196
dtype=dtype,
193197
date_format={"timestamp_col": "YYYY-MM-DD HH:MM:SS Z"},
@@ -433,7 +437,7 @@ def test_to_json_index_records_orient(
433437
scalars_df.to_json(path, index=index, orient="records", lines=True)
434438

435439
gcs_df = pd.read_json(
436-
path.replace("*", "000000000000"),
440+
path.replace("*", FIRST_GCS_FILE_SUFFIX),
437441
lines=True,
438442
convert_dates=["datetime_col"],
439443
)
@@ -475,7 +479,7 @@ def test_to_parquet_index(scalars_dfs, gcs_folder, index):
475479
# table.
476480
scalars_df.to_parquet(path, index=index)
477481

478-
gcs_df = pd.read_parquet(path.replace("*", "000000000000"))
482+
gcs_df = pd.read_parquet(path.replace("*", FIRST_GCS_FILE_SUFFIX))
479483
convert_pandas_dtypes(gcs_df, bytes_col=False)
480484
if index and scalars_df.index.name is not None:
481485
gcs_df = gcs_df.set_index(scalars_df.index.name)

tests/system/small/test_encryption.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
import bigframes
2121
import bigframes.ml.linear_model
22+
from tests.system.utils import FIRST_GCS_FILE_SUFFIX
2223

2324

2425
@pytest.fixture(scope="module")
@@ -160,7 +161,7 @@ def test_read_csv_gcs(
160161
# Create a csv in gcs
161162
write_path = gcs_folder + "test_read_csv_gcs_bigquery_engine*.csv"
162163
read_path = (
163-
write_path.replace("*", "000000000000") if engine is None else write_path
164+
write_path.replace("*", FIRST_GCS_FILE_SUFFIX) if engine is None else write_path
164165
)
165166
scalars_df_index.to_csv(write_path)
166167

tests/system/small/test_series.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from tests.system.utils import (
2828
assert_pandas_df_equal,
2929
assert_series_equal,
30+
FIRST_GCS_FILE_SUFFIX,
3031
skip_legacy_pandas,
3132
)
3233

@@ -2393,7 +2394,7 @@ def test_to_frame(scalars_dfs):
23932394
def test_to_json(gcs_folder, scalars_df_index, scalars_pandas_df_index):
23942395
path = gcs_folder + "test_series_to_json*.jsonl"
23952396
scalars_df_index["int64_col"].to_json(path, lines=True, orient="records")
2396-
gcs_df = pd.read_json(path.replace("*", "000000000000"), lines=True)
2397+
gcs_df = pd.read_json(path.replace("*", FIRST_GCS_FILE_SUFFIX), lines=True)
23972398

23982399
pd.testing.assert_series_equal(
23992400
gcs_df["int64_col"].astype(pd.Int64Dtype()),
@@ -2406,7 +2407,7 @@ def test_to_json(gcs_folder, scalars_df_index, scalars_pandas_df_index):
24062407
def test_to_csv(gcs_folder, scalars_df_index, scalars_pandas_df_index):
24072408
path = gcs_folder + "test_series_to_csv*.csv"
24082409
scalars_df_index["int64_col"].to_csv(path)
2409-
gcs_df = pd.read_csv(path.replace("*", "000000000000"))
2410+
gcs_df = pd.read_csv(path.replace("*", FIRST_GCS_FILE_SUFFIX))
24102411

24112412
pd.testing.assert_series_equal(
24122413
gcs_df["int64_col"].astype(pd.Int64Dtype()),

tests/system/small/test_session.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,7 @@
3030
import bigframes.dataframe
3131
import bigframes.dtypes
3232
import bigframes.ml.linear_model
33-
from tests.system.utils import skip_legacy_pandas
34-
35-
FIRST_FILE = "000000000000"
33+
from tests.system.utils import FIRST_GCS_FILE_SUFFIX, skip_legacy_pandas
3634

3735

3836
def test_read_gbq_tokyo(
@@ -442,7 +440,7 @@ def test_read_csv_gcs_default_engine(session, scalars_dfs, gcs_folder):
442440
path = gcs_folder + "test_read_csv_gcs_default_engine_w_index*.csv"
443441
else:
444442
path = gcs_folder + "test_read_csv_gcs_default_engine_wo_index*.csv"
445-
read_path = path.replace("*", FIRST_FILE)
443+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
446444
scalars_df.to_csv(path, index=False)
447445
dtype = scalars_df.dtypes.to_dict()
448446
dtype.pop("geography_col")
@@ -641,15 +639,15 @@ def test_read_csv_default_engine_throws_not_implemented_error(
641639
gcs_folder
642640
+ "test_read_csv_gcs_default_engine_throws_not_implemented_error*.csv"
643641
)
644-
read_path = path.replace("*", FIRST_FILE)
642+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
645643
scalars_df_index.to_csv(path)
646644
with pytest.raises(NotImplementedError, match=match):
647645
session.read_csv(read_path, **kwargs)
648646

649647

650648
def test_read_csv_gcs_default_engine_w_header(session, scalars_df_index, gcs_folder):
651649
path = gcs_folder + "test_read_csv_gcs_default_engine_w_header*.csv"
652-
read_path = path.replace("*", FIRST_FILE)
650+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
653651
scalars_df_index.to_csv(path)
654652

655653
# Skips header=N rows, normally considers the N+1th row as the header, but overridden by
@@ -716,7 +714,7 @@ def test_read_csv_gcs_default_engine_w_index_col_name(
716714
session, scalars_df_default_index, gcs_folder
717715
):
718716
path = gcs_folder + "test_read_csv_gcs_default_engine_w_index_col_name*.csv"
719-
read_path = path.replace("*", FIRST_FILE)
717+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
720718
scalars_df_default_index.to_csv(path)
721719

722720
df = session.read_csv(read_path, index_col="rowindex")
@@ -731,7 +729,7 @@ def test_read_csv_gcs_default_engine_w_index_col_index(
731729
session, scalars_df_default_index, gcs_folder
732730
):
733731
path = gcs_folder + "test_read_csv_gcs_default_engine_w_index_col_index*.csv"
734-
read_path = path.replace("*", FIRST_FILE)
732+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
735733
scalars_df_default_index.to_csv(path)
736734

737735
index_col = scalars_df_default_index.columns.to_list().index("rowindex")
@@ -790,7 +788,7 @@ def test_read_csv_local_default_engine_w_index_col_index(
790788
def test_read_csv_gcs_w_usecols(session, scalars_df_index, gcs_folder, engine):
791789
path = gcs_folder + "test_read_csv_gcs_w_usecols"
792790
path = path + "_default_engine*.csv" if engine is None else path + "_bq_engine*.csv"
793-
read_path = path.replace("*", FIRST_FILE) if engine is None else path
791+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX) if engine is None else path
794792
scalars_df_index.to_csv(path)
795793

796794
# df should only have 1 column which is bool_col.
@@ -902,7 +900,7 @@ def test_read_parquet_gcs(session: bigframes.Session, scalars_dfs, gcs_folder, e
902900

903901
# Only bigquery engine for reads supports wildcards in path name.
904902
if engine != "bigquery":
905-
path = path.replace("*", "000000000000")
903+
path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
906904

907905
df_out = (
908906
session.read_parquet(path, engine=engine)
@@ -1012,7 +1010,7 @@ def test_read_parquet_gcs_compression_not_supported(
10121010
def test_read_json_gcs_bq_engine(session, scalars_dfs, gcs_folder):
10131011
scalars_df, _ = scalars_dfs
10141012
path = gcs_folder + "test_read_json_gcs_bq_engine_w_index*.json"
1015-
read_path = path.replace("*", FIRST_FILE)
1013+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
10161014
scalars_df.to_json(path, index=False, lines=True, orient="records")
10171015
df = session.read_json(read_path, lines=True, orient="records", engine="bigquery")
10181016

@@ -1036,7 +1034,7 @@ def test_read_json_gcs_bq_engine(session, scalars_dfs, gcs_folder):
10361034
def test_read_json_gcs_default_engine(session, scalars_dfs, gcs_folder):
10371035
scalars_df, _ = scalars_dfs
10381036
path = gcs_folder + "test_read_json_gcs_default_engine_w_index*.json"
1039-
read_path = path.replace("*", FIRST_FILE)
1037+
read_path = path.replace("*", FIRST_GCS_FILE_SUFFIX)
10401038
scalars_df.to_json(
10411039
path,
10421040
index=False,

tests/system/utils.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828

2929
from bigframes.functions import remote_function
3030

31+
FIRST_GCS_FILE_SUFFIX = "000000000000"
32+
3133

3234
def skip_legacy_pandas(test):
3335
@functools.wraps(test)

0 commit comments

Comments
 (0)