From 8d1646c36a289e3fe921aeeb5fd5ab60c7a10ae7 Mon Sep 17 00:00:00 2001 From: chris-b1 Date: Sun, 28 Aug 2016 08:14:55 -0500 Subject: [PATCH 1/3] COMPAT: int dtype in json tests (#14100) --- pandas/io/tests/json/test_ujson.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pandas/io/tests/json/test_ujson.py b/pandas/io/tests/json/test_ujson.py index 0dda6ead2a3b9..704023bd847b7 100644 --- a/pandas/io/tests/json/test_ujson.py +++ b/pandas/io/tests/json/test_ujson.py @@ -1321,20 +1321,22 @@ def testSeries(self): numpy=True)) outp = Series(**dec) + exp_np = Series(np.array([10, 20, 30, 40, 50, 60])) + exp_pd = Series([10, 20, 30, 40, 50, 60]) outp = Series(ujson.decode(ujson.encode(s, orient="records"), numpy=True)) - exp = Series([10, 20, 30, 40, 50, 60]) - tm.assert_series_equal(outp, exp) + tm.assert_series_equal(outp, exp_np) outp = Series(ujson.decode(ujson.encode(s, orient="records"))) - tm.assert_series_equal(outp, exp) + exp = Series([10, 20, 30, 40, 50, 60]) + tm.assert_series_equal(outp, exp_pd) outp = Series(ujson.decode(ujson.encode(s, orient="values"), numpy=True)) - tm.assert_series_equal(outp, exp) + tm.assert_series_equal(outp, exp_np) outp = Series(ujson.decode(ujson.encode(s, orient="values"))) - tm.assert_series_equal(outp, exp) + tm.assert_series_equal(outp, exp_pd) outp = Series(ujson.decode(ujson.encode( s, orient="index"))).sort_values() From c939ca5c1981fd12ab7beb57de706ebdb36bedd8 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Sat, 27 Aug 2016 11:25:52 -0400 Subject: [PATCH 2/3] Enable Google BigQuery (pandas.io.gbq) integration testing #11089 --- .travis.yml | 1 + ci/requirements-2.7.pip | 1 + pandas/io/tests/test_gbq.py | 288 +++++++++++++++++++++++------------- travis_gbq.json.enc | Bin 0 -> 2352 bytes 4 files changed, 190 insertions(+), 100 deletions(-) create mode 100644 travis_gbq.json.enc diff --git a/.travis.yml b/.travis.yml index 2716fa7628d61..aaf178a4b933f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -229,6 +229,7 @@ matrix: - USE_CACHE=true before_install: + - openssl aes-256-cbc -K $encrypted_4a66b2b60580_key -iv $encrypted_4a66b2b60580_iv -in travis_gbq.json.enc -out travis_gbq.json -d - echo "before_install" - echo $VIRTUAL_ENV - export PATH="$HOME/miniconda/bin:$PATH" diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip index cc3462dbf9ed0..d16b932c8be4f 100644 --- a/ci/requirements-2.7.pip +++ b/ci/requirements-2.7.pip @@ -6,3 +6,4 @@ oauth2client==1.5.0 pathlib backports.lzma py +PyCrypto diff --git a/pandas/io/tests/test_gbq.py b/pandas/io/tests/test_gbq.py index 4b71192c907f8..056622b671bd6 100644 --- a/pandas/io/tests/test_gbq.py +++ b/pandas/io/tests/test_gbq.py @@ -4,6 +4,8 @@ import pytz import platform from time import sleep +import os +import logging import numpy as np @@ -21,7 +23,11 @@ PRIVATE_KEY_JSON_PATH = None PRIVATE_KEY_JSON_CONTENTS = None -DATASET_ID = 'pydata_pandas_bq_testing' +if compat.PY3: + DATASET_ID = 'pydata_pandas_bq_testing_py3' +else: + DATASET_ID = 'pydata_pandas_bq_testing_py2' + TABLE_ID = 'new_test' DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID) @@ -35,25 +41,49 @@ def _skip_if_no_project_id(): - if not PROJECT_ID: + if not _get_project_id(): raise nose.SkipTest( "Cannot run integration tests without a project id") def _skip_if_no_private_key_path(): - if not PRIVATE_KEY_JSON_PATH: + if not _get_private_key_path(): raise nose.SkipTest("Cannot run integration tests without a " "private key json file path") def _skip_if_no_private_key_contents(): - if not PRIVATE_KEY_JSON_CONTENTS: + if not _get_private_key_contents(): raise nose.SkipTest("Cannot run integration tests without a " "private key json contents") - _skip_if_no_project_id() - _skip_if_no_private_key_path() - _skip_if_no_private_key_contents() + +def _in_travis_environment(): + return 'TRAVIS_BUILD_DIR' in os.environ + + +def _get_project_id(): + if _in_travis_environment(): + return 'pandas-travis' + else: + return PROJECT_ID + + +def _get_private_key_path(): + if _in_travis_environment(): + return os.path.join(os.environ.get('TRAVIS_BUILD_DIR'), + 'travis_gbq.json') + else: + return PRIVATE_KEY_JSON_PATH + + +def _get_private_key_contents(): + if _in_travis_environment(): + with open(os.path.join(os.environ.get('TRAVIS_BUILD_DIR'), + 'travis_gbq.json')) as f: + return f.read() + else: + return PRIVATE_KEY_JSON_CONTENTS def _test_imports(): @@ -144,18 +174,22 @@ def _test_imports(): "service account support") -def test_requirements(): +def _setup_common(): try: _test_imports() except (ImportError, NotImplementedError) as import_exception: raise nose.SkipTest(import_exception) + if _in_travis_environment(): + logging.getLogger('oauth2client').setLevel(logging.ERROR) + logging.getLogger('apiclient').setLevel(logging.ERROR) + def _check_if_can_get_correct_default_credentials(): # Checks if "Application Default Credentials" can be fetched # from the environment the tests are running in. # See Issue #13577 - test_requirements() + import httplib2 try: from googleapiclient.discovery import build @@ -169,19 +203,20 @@ def _check_if_can_get_correct_default_credentials(): bigquery_service = build('bigquery', 'v2', http=http) jobs = bigquery_service.jobs() job_data = {'configuration': {'query': {'query': 'SELECT 1'}}} - jobs.insert(projectId=PROJECT_ID, body=job_data).execute() + jobs.insert(projectId=_get_project_id(), body=job_data).execute() return True except: return False def clean_gbq_environment(private_key=None): - dataset = gbq._Dataset(PROJECT_ID, private_key=private_key) + dataset = gbq._Dataset(_get_project_id(), private_key=private_key) for i in range(1, 10): if DATASET_ID + str(i) in dataset.datasets(): dataset_id = DATASET_ID + str(i) - table = gbq._Table(PROJECT_ID, dataset_id, private_key=private_key) + table = gbq._Table(_get_project_id(), dataset_id, + private_key=private_key) for j in range(1, 20): if TABLE_ID + str(j) in dataset.tables(dataset_id): table.delete(TABLE_ID + str(j)) @@ -215,11 +250,11 @@ def test_generate_bq_schema_deprecated(): class TestGBQConnectorIntegration(tm.TestCase): def setUp(self): - test_requirements() - + _setup_common() _skip_if_no_project_id() - self.sut = gbq.GbqConnector(PROJECT_ID) + self.sut = gbq.GbqConnector(_get_project_id(), + private_key=_get_private_key_path()) def test_should_be_able_to_make_a_connector(self): self.assertTrue(self.sut is not None, @@ -259,13 +294,13 @@ def test_get_application_default_credentials_returns_credentials(self): class TestGBQConnectorServiceAccountKeyPathIntegration(tm.TestCase): def setUp(self): - test_requirements() + _setup_common() _skip_if_no_project_id() _skip_if_no_private_key_path() - self.sut = gbq.GbqConnector(PROJECT_ID, - private_key=PRIVATE_KEY_JSON_PATH) + self.sut = gbq.GbqConnector(_get_project_id(), + private_key=_get_private_key_path()) def test_should_be_able_to_make_a_connector(self): self.assertTrue(self.sut is not None, @@ -290,13 +325,13 @@ def test_should_be_able_to_get_results_from_query(self): class TestGBQConnectorServiceAccountKeyContentsIntegration(tm.TestCase): def setUp(self): - test_requirements() + _setup_common() _skip_if_no_project_id() - _skip_if_no_private_key_contents() + _skip_if_no_private_key_path() - self.sut = gbq.GbqConnector(PROJECT_ID, - private_key=PRIVATE_KEY_JSON_CONTENTS) + self.sut = gbq.GbqConnector(_get_project_id(), + private_key=_get_private_key_path()) def test_should_be_able_to_make_a_connector(self): self.assertTrue(self.sut is not None, @@ -321,7 +356,7 @@ def test_should_be_able_to_get_results_from_query(self): class GBQUnitTests(tm.TestCase): def setUp(self): - test_requirements() + _setup_common() def test_import_google_api_python_client(self): if compat.PY2: @@ -396,12 +431,12 @@ def test_read_gbq_with_empty_private_key_file_should_fail(self): private_key=empty_file_path) def test_read_gbq_with_corrupted_private_key_json_should_fail(self): - _skip_if_no_private_key_contents() + _skip_if_no_private_key_path() with tm.assertRaises(gbq.InvalidPrivateKeyFormat): gbq.read_gbq( 'SELECT 1', project_id='x', - private_key=re.sub('[a-z]', '9', PRIVATE_KEY_JSON_CONTENTS)) + private_key=re.sub('[a-z]', '9', _get_private_key_path())) class TestReadGBQIntegration(tm.TestCase): @@ -414,7 +449,7 @@ def setUpClass(cls): _skip_if_no_project_id() - test_requirements() + _setup_common() def setUp(self): # - PER-TEST FIXTURES - @@ -435,87 +470,108 @@ def tearDown(self): # executed. pass + def test_should_read_as_user_account(self): + if _in_travis_environment(): + raise nose.SkipTest("Cannot run local auth in travis environment") + + query = 'SELECT "PI" as VALID_STRING' + df = gbq.read_gbq(query, project_id=_get_project_id()) + tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']})) + def test_should_read_as_service_account_with_key_path(self): _skip_if_no_private_key_path() query = 'SELECT "PI" as VALID_STRING' - df = gbq.read_gbq(query, project_id=PROJECT_ID, - private_key=PRIVATE_KEY_JSON_PATH) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']})) def test_should_read_as_service_account_with_key_contents(self): _skip_if_no_private_key_contents() query = 'SELECT "PI" as VALID_STRING' - df = gbq.read_gbq(query, project_id=PROJECT_ID, - private_key=PRIVATE_KEY_JSON_CONTENTS) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_contents()) tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']})) def test_should_properly_handle_valid_strings(self): query = 'SELECT "PI" as VALID_STRING' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'VALID_STRING': ['PI']})) def test_should_properly_handle_empty_strings(self): query = 'SELECT "" as EMPTY_STRING' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'EMPTY_STRING': [""]})) def test_should_properly_handle_null_strings(self): query = 'SELECT STRING(NULL) as NULL_STRING' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'NULL_STRING': [None]})) def test_should_properly_handle_valid_integers(self): query = 'SELECT INTEGER(3) as VALID_INTEGER' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'VALID_INTEGER': [3]})) def test_should_properly_handle_null_integers(self): query = 'SELECT INTEGER(NULL) as NULL_INTEGER' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'NULL_INTEGER': [np.nan]})) def test_should_properly_handle_valid_floats(self): query = 'SELECT PI() as VALID_FLOAT' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame( {'VALID_FLOAT': [3.141592653589793]})) def test_should_properly_handle_null_floats(self): query = 'SELECT FLOAT(NULL) as NULL_FLOAT' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'NULL_FLOAT': [np.nan]})) def test_should_properly_handle_timestamp_unix_epoch(self): query = 'SELECT TIMESTAMP("1970-01-01 00:00:00") as UNIX_EPOCH' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame( {'UNIX_EPOCH': [np.datetime64('1970-01-01T00:00:00.000000Z')]})) def test_should_properly_handle_arbitrary_timestamp(self): query = 'SELECT TIMESTAMP("2004-09-15 05:00:00") as VALID_TIMESTAMP' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({ 'VALID_TIMESTAMP': [np.datetime64('2004-09-15T05:00:00.000000Z')] })) def test_should_properly_handle_null_timestamp(self): query = 'SELECT TIMESTAMP(NULL) as NULL_TIMESTAMP' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'NULL_TIMESTAMP': [NaT]})) def test_should_properly_handle_true_boolean(self): query = 'SELECT BOOLEAN(TRUE) as TRUE_BOOLEAN' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'TRUE_BOOLEAN': [True]})) def test_should_properly_handle_false_boolean(self): query = 'SELECT BOOLEAN(FALSE) as FALSE_BOOLEAN' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'FALSE_BOOLEAN': [False]})) def test_should_properly_handle_null_boolean(self): query = 'SELECT BOOLEAN(NULL) as NULL_BOOLEAN' - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, DataFrame({'NULL_BOOLEAN': [None]})) def test_unicode_string_conversion_and_normalization(self): @@ -530,13 +586,15 @@ def test_unicode_string_conversion_and_normalization(self): query = 'SELECT "{0}" as UNICODE_STRING'.format(unicode_string) - df = gbq.read_gbq(query, project_id=PROJECT_ID) + df = gbq.read_gbq(query, project_id=_get_project_id(), + private_key=_get_private_key_path()) tm.assert_frame_equal(df, correct_test_datatype) def test_index_column(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2" - result_frame = gbq.read_gbq( - query, project_id=PROJECT_ID, index_col="STRING_1") + result_frame = gbq.read_gbq(query, project_id=_get_project_id(), + index_col="STRING_1", + private_key=_get_private_key_path()) correct_frame = DataFrame( {'STRING_1': ['a'], 'STRING_2': ['b']}).set_index("STRING_1") tm.assert_equal(result_frame.index.name, correct_frame.index.name) @@ -544,8 +602,9 @@ def test_index_column(self): def test_column_order(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" col_order = ['STRING_3', 'STRING_1', 'STRING_2'] - result_frame = gbq.read_gbq( - query, project_id=PROJECT_ID, col_order=col_order) + result_frame = gbq.read_gbq(query, project_id=_get_project_id(), + col_order=col_order, + private_key=_get_private_key_path()) correct_frame = DataFrame({'STRING_1': ['a'], 'STRING_2': [ 'b'], 'STRING_3': ['c']})[col_order] tm.assert_frame_equal(result_frame, correct_frame) @@ -553,8 +612,9 @@ def test_column_order(self): def test_column_order_plus_index(self): query = "SELECT 'a' as STRING_1, 'b' as STRING_2, 'c' as STRING_3" col_order = ['STRING_3', 'STRING_2'] - result_frame = gbq.read_gbq(query, project_id=PROJECT_ID, - index_col='STRING_1', col_order=col_order) + result_frame = gbq.read_gbq(query, project_id=_get_project_id(), + index_col='STRING_1', col_order=col_order, + private_key=_get_private_key_path()) correct_frame = DataFrame( {'STRING_1': ['a'], 'STRING_2': ['b'], 'STRING_3': ['c']}) correct_frame.set_index('STRING_1', inplace=True) @@ -564,16 +624,19 @@ def test_column_order_plus_index(self): def test_malformed_query(self): with tm.assertRaises(gbq.GenericGBQException): gbq.read_gbq("SELCET * FORM [publicdata:samples.shakespeare]", - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) def test_bad_project_id(self): with tm.assertRaises(gbq.GenericGBQException): - gbq.read_gbq("SELECT 1", project_id='001') + gbq.read_gbq("SELECT 1", project_id='001', + private_key=_get_private_key_path()) def test_bad_table_name(self): with tm.assertRaises(gbq.GenericGBQException): gbq.read_gbq("SELECT * FROM [publicdata:samples.nope]", - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) def test_download_dataset_larger_than_200k_rows(self): test_size = 200005 @@ -582,7 +645,8 @@ def test_download_dataset_larger_than_200k_rows(self): df = gbq.read_gbq("SELECT id FROM [publicdata:samples.wikipedia] " "GROUP EACH BY id ORDER BY id ASC LIMIT {0}" .format(test_size), - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) self.assertEqual(len(df.drop_duplicates()), test_size) def test_zero_rows(self): @@ -590,7 +654,8 @@ def test_zero_rows(self): df = gbq.read_gbq("SELECT title, id " "FROM [publicdata:samples.wikipedia] " "WHERE timestamp=-9999999", - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) page_array = np.zeros( (0,), dtype=[('title', object), ('id', np.dtype(float))]) expected_result = DataFrame(page_array, columns=['title', 'id']) @@ -602,13 +667,15 @@ def test_legacy_sql(self): # Test that a legacy sql statement fails when # setting dialect='standard' with tm.assertRaises(gbq.GenericGBQException): - gbq.read_gbq(legacy_sql, project_id=PROJECT_ID, - dialect='standard') + gbq.read_gbq(legacy_sql, project_id=_get_project_id(), + dialect='standard', + private_key=_get_private_key_path()) # Test that a legacy sql statement succeeds when # setting dialect='legacy' - df = gbq.read_gbq(legacy_sql, project_id=PROJECT_ID, - dialect='legacy') + df = gbq.read_gbq(legacy_sql, project_id=_get_project_id(), + dialect='legacy', + private_key=_get_private_key_path()) self.assertEqual(len(df.drop_duplicates()), 10) def test_standard_sql(self): @@ -618,12 +685,14 @@ def test_standard_sql(self): # Test that a standard sql statement fails when using # the legacy SQL dialect (default value) with tm.assertRaises(gbq.GenericGBQException): - gbq.read_gbq(standard_sql, project_id=PROJECT_ID) + gbq.read_gbq(standard_sql, project_id=_get_project_id(), + private_key=_get_private_key_path()) # Test that a standard sql statement succeeds when # setting dialect='standard' - df = gbq.read_gbq(standard_sql, project_id=PROJECT_ID, - dialect='standard') + df = gbq.read_gbq(standard_sql, project_id=_get_project_id(), + dialect='standard', + private_key=_get_private_key_path()) self.assertEqual(len(df.drop_duplicates()), 10) def test_invalid_option_for_sql_dialect(self): @@ -632,13 +701,14 @@ def test_invalid_option_for_sql_dialect(self): # Test that an invalid option for `dialect` raises ValueError with tm.assertRaises(ValueError): - gbq.read_gbq(sql_statement, project_id=PROJECT_ID, - dialect='invalid') + gbq.read_gbq(sql_statement, project_id=_get_project_id(), + dialect='invalid', + private_key=_get_private_key_path()) # Test that a correct option for dialect succeeds # to make sure ValueError was due to invalid dialect - gbq.read_gbq(sql_statement, project_id=PROJECT_ID, - dialect='standard') + gbq.read_gbq(sql_statement, project_id=_get_project_id(), + dialect='standard', private_key=_get_private_key_path()) class TestToGBQIntegration(tm.TestCase): @@ -656,18 +726,22 @@ def setUpClass(cls): _skip_if_no_project_id() - test_requirements() - clean_gbq_environment() + _setup_common() + clean_gbq_environment(_get_private_key_path()) - gbq._Dataset(PROJECT_ID).create(DATASET_ID + "1") + gbq._Dataset(_get_project_id(), + private_key=_get_private_key_path() + ).create(DATASET_ID + "1") def setUp(self): # - PER-TEST FIXTURES - # put here any instruction you want to be run *BEFORE* *EVERY* test is # executed. - self.dataset = gbq._Dataset(PROJECT_ID) - self.table = gbq._Table(PROJECT_ID, DATASET_ID + "1") + self.dataset = gbq._Dataset(_get_project_id(), + private_key=_get_private_key_path()) + self.table = gbq._Table(_get_project_id(), DATASET_ID + "1", + private_key=_get_private_key_path()) @classmethod def tearDownClass(cls): @@ -675,7 +749,7 @@ def tearDownClass(cls): # put here any instruction you want to execute only *ONCE* *AFTER* # executing all tests. - clean_gbq_environment() + clean_gbq_environment(_get_private_key_path()) def tearDown(self): # - PER-TEST FIXTURES - @@ -689,13 +763,15 @@ def test_upload_data(self): test_size = 20001 df = make_mixed_dataframe_v2(test_size) - gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000) + gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000, + private_key=_get_private_key_path()) sleep(30) # <- Curses Google!!! result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}" .format(destination_table), - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) self.assertEqual(result['NUM_ROWS'][0], test_size) def test_upload_data_if_table_exists_fail(self): @@ -707,11 +783,13 @@ def test_upload_data_if_table_exists_fail(self): # Test the default value of if_exists is 'fail' with tm.assertRaises(gbq.TableCreationError): - gbq.to_gbq(df, destination_table, PROJECT_ID) + gbq.to_gbq(df, destination_table, _get_project_id(), + private_key=_get_private_key_path()) # Test the if_exists parameter with value 'fail' with tm.assertRaises(gbq.TableCreationError): - gbq.to_gbq(df, destination_table, PROJECT_ID, if_exists='fail') + gbq.to_gbq(df, destination_table, _get_project_id(), + if_exists='fail', private_key=_get_private_key_path()) def test_upload_data_if_table_exists_append(self): destination_table = DESTINATION_TABLE + "3" @@ -721,22 +799,26 @@ def test_upload_data_if_table_exists_append(self): df_different_schema = tm.makeMixedDataFrame() # Initialize table with sample data - gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000) + gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000, + private_key=_get_private_key_path()) # Test the if_exists parameter with value 'append' - gbq.to_gbq(df, destination_table, PROJECT_ID, if_exists='append') + gbq.to_gbq(df, destination_table, _get_project_id(), + if_exists='append', private_key=_get_private_key_path()) sleep(30) # <- Curses Google!!! result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}" .format(destination_table), - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) self.assertEqual(result['NUM_ROWS'][0], test_size * 2) # Try inserting with a different schema, confirm failure with tm.assertRaises(gbq.InvalidSchema): gbq.to_gbq(df_different_schema, destination_table, - PROJECT_ID, if_exists='append') + _get_project_id(), if_exists='append', + private_key=_get_private_key_path()) def test_upload_data_if_table_exists_replace(self): destination_table = DESTINATION_TABLE + "4" @@ -746,17 +828,20 @@ def test_upload_data_if_table_exists_replace(self): df_different_schema = tm.makeMixedDataFrame() # Initialize table with sample data - gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000) + gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000, + private_key=_get_private_key_path()) # Test the if_exists parameter with the value 'replace'. gbq.to_gbq(df_different_schema, destination_table, - PROJECT_ID, if_exists='replace') + _get_project_id(), if_exists='replace', + private_key=_get_private_key_path()) sleep(30) # <- Curses Google!!! result = gbq.read_gbq("SELECT COUNT(*) as NUM_ROWS FROM {0}" .format(destination_table), - project_id=PROJECT_ID) + project_id=_get_project_id(), + private_key=_get_private_key_path()) self.assertEqual(result['NUM_ROWS'][0], 5) def test_google_upload_errors_should_raise_exception(self): @@ -769,7 +854,8 @@ def test_google_upload_errors_should_raise_exception(self): index=range(2)) with tm.assertRaises(gbq.StreamingInsertError): - gbq.to_gbq(bad_df, destination_table, PROJECT_ID, verbose=True) + gbq.to_gbq(bad_df, destination_table, _get_project_id(), + verbose=True, private_key=_get_private_key_path()) def test_generate_schema(self): df = tm.makeMixedDataFrame() @@ -828,7 +914,9 @@ def test_list_dataset(self): def test_list_table_zero_results(self): dataset_id = DATASET_ID + "2" self.dataset.create(dataset_id) - table_list = gbq._Dataset(PROJECT_ID).tables(dataset_id) + table_list = gbq._Dataset(_get_project_id(), + private_key=_get_private_key_path() + ).tables(dataset_id) self.assertEqual(len(table_list), 0, 'Expected gbq.list_table() to return 0') @@ -854,7 +942,7 @@ def test_dataset_exists(self): def create_table_data_dataset_does_not_exist(self): dataset_id = DATASET_ID + "6" table_id = TABLE_ID + "1" - table_with_new_dataset = gbq._Table(PROJECT_ID, dataset_id) + table_with_new_dataset = gbq._Table(_get_project_id(), dataset_id) df = make_mixed_dataframe_v2(10) table_with_new_dataset.create(table_id, gbq._generate_bq_schema(df)) self.assertTrue(self.dataset.exists(dataset_id), @@ -884,8 +972,8 @@ def setUpClass(cls): _skip_if_no_project_id() _skip_if_no_private_key_path() - test_requirements() - clean_gbq_environment(PRIVATE_KEY_JSON_PATH) + _setup_common() + clean_gbq_environment(_get_private_key_path()) def setUp(self): # - PER-TEST FIXTURES - @@ -899,7 +987,7 @@ def tearDownClass(cls): # put here any instruction you want to execute only *ONCE* *AFTER* # executing all tests. - clean_gbq_environment(PRIVATE_KEY_JSON_PATH) + clean_gbq_environment(_get_private_key_path()) def tearDown(self): # - PER-TEST FIXTURES - @@ -913,15 +1001,15 @@ def test_upload_data_as_service_account_with_key_path(self): test_size = 10 df = make_mixed_dataframe_v2(test_size) - gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000, - private_key=PRIVATE_KEY_JSON_PATH) + gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000, + private_key=_get_private_key_path()) sleep(30) # <- Curses Google!!! result = gbq.read_gbq( "SELECT COUNT(*) as NUM_ROWS FROM {0}".format(destination_table), - project_id=PROJECT_ID, - private_key=PRIVATE_KEY_JSON_PATH) + project_id=_get_project_id(), + private_key=_get_private_key_path()) self.assertEqual(result['NUM_ROWS'][0], test_size) @@ -940,11 +1028,11 @@ def setUpClass(cls): # put here any instruction you want to execute only *ONCE* *BEFORE* # executing *ALL* tests described below. + _setup_common() _skip_if_no_project_id() _skip_if_no_private_key_contents() - test_requirements() - clean_gbq_environment(PRIVATE_KEY_JSON_CONTENTS) + clean_gbq_environment(_get_private_key_contents()) def setUp(self): # - PER-TEST FIXTURES - @@ -958,7 +1046,7 @@ def tearDownClass(cls): # put here any instruction you want to execute only *ONCE* *AFTER* # executing all tests. - clean_gbq_environment(PRIVATE_KEY_JSON_CONTENTS) + clean_gbq_environment(_get_private_key_contents()) def tearDown(self): # - PER-TEST FIXTURES - @@ -972,15 +1060,15 @@ def test_upload_data_as_service_account_with_key_contents(self): test_size = 10 df = make_mixed_dataframe_v2(test_size) - gbq.to_gbq(df, destination_table, PROJECT_ID, chunksize=10000, - private_key=PRIVATE_KEY_JSON_CONTENTS) + gbq.to_gbq(df, destination_table, _get_project_id(), chunksize=10000, + private_key=_get_private_key_contents()) sleep(30) # <- Curses Google!!! result = gbq.read_gbq( "SELECT COUNT(*) as NUM_ROWS FROM {0}".format(destination_table), - project_id=PROJECT_ID, - private_key=PRIVATE_KEY_JSON_CONTENTS) + project_id=_get_project_id(), + private_key=_get_private_key_contents()) self.assertEqual(result['NUM_ROWS'][0], test_size) if __name__ == '__main__': diff --git a/travis_gbq.json.enc b/travis_gbq.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..fd053738770a3e3985cb496037e02d2d00f02e3c GIT binary patch literal 2352 zcmV-03D5Q-+|}ycsTZQUw7-o6E+RWnnRSEh%OD#ByA#aFg#H>#ku10PXN_lg9dR$q z+Zbt`R*DCf1x_gcthW{@GAHn28n6XLm5s(^KCQO~wYar^GT~$o5-F%++Pg5^9%u2l zh0Ws%a;%CV#e9y9M$;Oca3on$pXdCcF19IJEzoPg92TUX{c+>H#Y54T@it%J$4Ap~H1A?}9F7or0AQdzK`6Fq!F$Z;q{uKi#t z@L(3HEMFjq@hKptv71}@Z#A-5hX9w_YUol~|C3K;UaRZSRQ%&gc=BAlC|_^`Dp6}woP@$p^kF5?YPSp@Re zu%{d>MuY?hI+<}eUkl0jmI!z@w5^riyzZoYx7?q1)DbI61c9!kGx8=r5Y<%-6BXB7{ne8+VRjfc2;WDE<_?IzRO z6hp>*b9>B76+@VXlV{QSETv@Tih8|p=1ZL#Q9VAph}GW8DTX}SRXNI(4mu?2eKeLU3h zoh<;0gB&y?P|646qj@@!kRRp!F{z0LD~{*>>}!K$id`jdj>6u3otVfC_$U_mfQNg@ zM649N^o3$L!}pw;%#h&prZ_jV)*d`oYnOFwMq=%;GP2;(O}FawNv58U{HpgphX|%c zWiLWmIB>C{@t|H7cWe7-Md$6;k#hzMPpD)`==HWJmc3|WgA*IQARd~*yHUABgF1G; z&WeF4c;M>OxqvEf7%yKAFF$x;l@<1I4O}`?TAg_ z98|QbWohm4PCG5UXd?rFe8Bj#(2_3X{sXywAmjLLnUmia7vpNC=BTHdah_xJ69F}E z2rxH-03(~!CHuzA*#C?-%R<+CNzYwv1f8~96mUFXQ&igqaP+3O46aSkGlDfcSUkK z>!#_a%sv?z5AT|{-j?TL&AX5!GJbZdHTSIRe}p%~50Ks%=w{@eD_=G-VyUEqOUu#V zb``U{jn2wcXY0GuuCMY*!;!?su*hju`4t94qW^y7Qw|?Wg#dG*HG8B4Gf*=NGKZ_x6Ab1f4e^s4M$ z7di28)NDZKSS26{Y~nfq(22m>VrzMCy*WWXtffhi{NjCti|j+wYi0rVghyGAi9-sr zlY&oDh($v-sE$tMI<)A?9@{xIyU+=5IRJeS0QX{TxN9Qk3W+NtVzud7E8HbMmGrYh zCq;N}zz=0E)VZ+y3a z!+tw?yQw;*ZE_K3J<`q95cf+s#QP8UFfIxFB`E{iHvy#&lhMP10Ibw)`&$!zO@4CE z{u=+smTR!^9qxb<4F>s@enL{faAm3;@-RnG5wV@U&~JctbZcRR!Cb?Fc4S}(YAoWo z&wK!7SZ4@(EVJmfl>{}&4ne)*MotDaZfazvRi?m;IhzmLSx&sk6PtZu1j-EuZC4!8 zZJZ25SW}t{Ks_o^1N}&_O0{K9<9#ts_aFFzUaK9qE~oQczxpkx;z3aepl9trWHVhTs^Qd=eX*uE1x@|2nUsUFgVo-m$dd2s)JaK zZr%y@qRNpDBpSCe9DlX*wy-^0Q+`lr2#n=QmEzOPskFJ>qA1@)bsR~AYYADk9avQy z_+DMzl5tt3+wP<)ihQsYTbBxI2wA)om&bk8JT;^le@mkDhoyly24Z-urVqpK9a?^_ z)CaQ@?z+)c|R_YXjB|22@)N3`_a zH)G7V&%TX}HC@g0U~4z|^G2+OY?lQt1C&w+QCpYX7#WS|x#j$-Va?5XI-kSEKwB$@ zmBVJo{0B*a>lZ)*p8f?l*DG?9b`+*rCN3VO@H@~xK_kY>gLV7bIUvq8m}W`36-g+K zL%Z+ECMf7hma!ZV}iTVS*+`_~mf3;jw!65Q9(z{c=Jm53LF&|I4GQ zd#l$=!IV*nv81Ct)*Wlx1B+ma-RtKG5f9XV2^PZzjBpAHvEDO`ofr?MTs^DYbhm=5 zmPpn6BvDv0ZHPHaT=Y#wg0$SZgiyN4re#Tsnsz( zh~zB)A7A&urlw9{@JO1dqJFaX$92GZ9 Date: Sun, 28 Aug 2016 10:18:32 -0400 Subject: [PATCH 3/3] re-encode travis_gbq.json for travis on pydata/pandas --- .travis.yml | 3 ++- ci/travis_gbq.json.enc | Bin 0 -> 2352 bytes travis_gbq.json.enc | Bin 2352 -> 0 bytes 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 ci/travis_gbq.json.enc delete mode 100644 travis_gbq.json.enc diff --git a/.travis.yml b/.travis.yml index aaf178a4b933f..741f0541827a8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -229,7 +229,8 @@ matrix: - USE_CACHE=true before_install: - - openssl aes-256-cbc -K $encrypted_4a66b2b60580_key -iv $encrypted_4a66b2b60580_iv -in travis_gbq.json.enc -out travis_gbq.json -d + + - openssl aes-256-cbc -K $encrypted_1d9d7b1f171b_key -iv $encrypted_1d9d7b1f171b_iv -in ci\travis_gbq.json.enc -out ci\travis_gbq.json -d - echo "before_install" - echo $VIRTUAL_ENV - export PATH="$HOME/miniconda/bin:$PATH" diff --git a/ci/travis_gbq.json.enc b/ci/travis_gbq.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..c2a33bbd6f26383bd7e8a7a504e626284efb5fd0 GIT binary patch literal 2352 zcmV-03D5QoiY_vZjh&7QCFrhKcFBG@`zj6HxkUamBtL*$SOfIYLQAnP$$?HCW-UzE zqY3S}bS_tytBr;XZgqTWlqlC0A?TtDDzJS4<-4yF+82AKZYaOSzyy z)LIN&*Phn|s>u2rH)V_1hyj-xu@)mBOg%_tj5_Sz6kyK>B5Gj0bp;~khYB=Ul|&X? zUFSM`<{}P#4_#PMfT#y?P!&Q=azAz#tG@DOU=aLF%RTb9pTg+mwrTZ+`_vBO5^xdb zCk{k&n*k1|x?M-4M;q$_?J$Z=GMNDL*;ETHrT|OpFalF9aJ;1NN8;rz^YfzF2c#MtNZvI;NuIJQ-M<=GHh=X9{ian$nm(H@?nOf1bgG`&RpLSr<5g9xf z2teKs?kATag6a+LsF}ejFjmcfSCRZKh(1~}uiJ(Qc@Q;)ValsMLtF!2X$O%Cb z2KMdb?&ns7GPy+RSdg<1=+QLqzgq74x1J+)2!4_{d|gtTVv9I=qfT>YNLb!NjSeg= zF|Qh88XA3rHR)>wth;QO_M(&hfA8)$QEpGgANx7DK|J`dW)T_`Xz_E!NK^R8RZg$y zc5}UIuDBt}n1#0!5GPf8Jbgag71LqHsVxL^@1qNIX|Dy=0vXV0(4^j2t$?ktEZdd5 zu_ckdLNK1WUPlJaR4^MLsqCIlhr=wrO2O}*qt8Z*MskXFh93(O!7RnBrwEDnT<`it5D0Mb#*2bx#aqC@LEJC=x_>Rx<|ygktaBRpWD z4#{MIj?XI%F|f1Z!qi;RP!vt6Ble@nmfAd}TzlXws1BJ)f5{5gri+aezIomN6ImrH zx}$i#tM@W$hzh(j)Gt+D=6S|?h}()_-~|h%S3)QyM`7f{Yf{v>p$dbYb8XdaAwacm zYIgF03~bBRJ?Q|Rm{AoSq^LSBkDa|`3tNoi02mXu+-Du+k_EUwoHMFk922)^pS;_D6#vtq~4S z0+*&E9tblkhvce%@L*}odrsPg ze1D(imA!lhnI7E+EDFG9720>Y4#l_d;0oNsr)BvjIN8`WGnc1$a?%?ycY8#Jhm$-C3s{t9ZH!5Tdr>`t41 zT)!t07R`S+w73>s@5X;v4d{Zrz<~%E?>$ry4A?zF{TOsf3y|_$p=_p^7 zyHtMEaO`#lEy8g>>v{%h!1*z-W`(rGI}x7M3P7v}4?u6$pF9q$Z>h4+;M|XMMXn-` zt;L)h+N2X->u!;3$*+|@qIVFK-FHTOWzOKyOMLi?7uHQUumZzC>x@c?*cS{IeR9pz z%j|yMgIP(6EQpB4%%ANMRmAGv^MZ8l-{UC8Un6k3C~MltE7?VC^N!9xT725P)|Gtf z&Y(8ua0ZUJO(-Sc>1rq^R0ra;Wa5&>w$UCFV36KRm<$T^2(h&JMd-wYacGQvViWbN z;Sj}nB6rj56!|*PGf00&z+`c`4W3nX4V>s9=aCW8AGAn)EiROzk#ku76;QET`eHgm z(nw)$QzY5E$?_QwzB-{3OpF_c;7(A1@_v7pYaO5JgoY(y&*&O#VUKi8dkA)N#1BEo z^s5wOm{@=f>c|t#|7>EeQqHh!uRXjICpE`%G!Z+Zt<^J-#-9iG(VG#%Nv?sI+ zbc`m4USJyzcgu?tl;%C}Ez6G@|f#&^hF+`g-yrj{hmY4yhlk+b#gV44cV?S5r%;?ge?g z#lzI?kuY1oXLg&XxdkBG8g*9plC**(x1xRs!fCuZZfAb#o*pyTq1{n<-CM+4c6lHo zqhwh;eK)Jl1X}YUP)?=oto!8X%qgNi1g>n7$x+*H3lrxcs&2-MENP(#=M;+oe_zRD zmCP_qF1Fe;UFgs(|6U79ig}b`dz4{4Eh38)&RvnO=3V=+bB@oe8weiJM6CJ5c%GQ-iz&#q=Du>_LJKa?c5%>1J4;MeQNYk^_$~ z;|WA1#Nz81yr8Jafys`4PisrSy?Jw~yQrKw#cLkq4Jq8We*d_mk#2#X^w3p=gJB>* z#!GJ%sBPy+SR&x<$od^Zj0! zidEfbN|w72WG4PR*<}{0X+HTW38KvQlnKe|LO@K*{nS!xOGu^})|VMf4R={d{^$ZY Wc%~RC+CiWM`BrrE1b(~# literal 0 HcmV?d00001 diff --git a/travis_gbq.json.enc b/travis_gbq.json.enc deleted file mode 100644 index fd053738770a3e3985cb496037e02d2d00f02e3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2352 zcmV-03D5Q-+|}ycsTZQUw7-o6E+RWnnRSEh%OD#ByA#aFg#H>#ku10PXN_lg9dR$q z+Zbt`R*DCf1x_gcthW{@GAHn28n6XLm5s(^KCQO~wYar^GT~$o5-F%++Pg5^9%u2l zh0Ws%a;%CV#e9y9M$;Oca3on$pXdCcF19IJEzoPg92TUX{c+>H#Y54T@it%J$4Ap~H1A?}9F7or0AQdzK`6Fq!F$Z;q{uKi#t z@L(3HEMFjq@hKptv71}@Z#A-5hX9w_YUol~|C3K;UaRZSRQ%&gc=BAlC|_^`Dp6}woP@$p^kF5?YPSp@Re zu%{d>MuY?hI+<}eUkl0jmI!z@w5^riyzZoYx7?q1)DbI61c9!kGx8=r5Y<%-6BXB7{ne8+VRjfc2;WDE<_?IzRO z6hp>*b9>B76+@VXlV{QSETv@Tih8|p=1ZL#Q9VAph}GW8DTX}SRXNI(4mu?2eKeLU3h zoh<;0gB&y?P|646qj@@!kRRp!F{z0LD~{*>>}!K$id`jdj>6u3otVfC_$U_mfQNg@ zM649N^o3$L!}pw;%#h&prZ_jV)*d`oYnOFwMq=%;GP2;(O}FawNv58U{HpgphX|%c zWiLWmIB>C{@t|H7cWe7-Md$6;k#hzMPpD)`==HWJmc3|WgA*IQARd~*yHUABgF1G; z&WeF4c;M>OxqvEf7%yKAFF$x;l@<1I4O}`?TAg_ z98|QbWohm4PCG5UXd?rFe8Bj#(2_3X{sXywAmjLLnUmia7vpNC=BTHdah_xJ69F}E z2rxH-03(~!CHuzA*#C?-%R<+CNzYwv1f8~96mUFXQ&igqaP+3O46aSkGlDfcSUkK z>!#_a%sv?z5AT|{-j?TL&AX5!GJbZdHTSIRe}p%~50Ks%=w{@eD_=G-VyUEqOUu#V zb``U{jn2wcXY0GuuCMY*!;!?su*hju`4t94qW^y7Qw|?Wg#dG*HG8B4Gf*=NGKZ_x6Ab1f4e^s4M$ z7di28)NDZKSS26{Y~nfq(22m>VrzMCy*WWXtffhi{NjCti|j+wYi0rVghyGAi9-sr zlY&oDh($v-sE$tMI<)A?9@{xIyU+=5IRJeS0QX{TxN9Qk3W+NtVzud7E8HbMmGrYh zCq;N}zz=0E)VZ+y3a z!+tw?yQw;*ZE_K3J<`q95cf+s#QP8UFfIxFB`E{iHvy#&lhMP10Ibw)`&$!zO@4CE z{u=+smTR!^9qxb<4F>s@enL{faAm3;@-RnG5wV@U&~JctbZcRR!Cb?Fc4S}(YAoWo z&wK!7SZ4@(EVJmfl>{}&4ne)*MotDaZfazvRi?m;IhzmLSx&sk6PtZu1j-EuZC4!8 zZJZ25SW}t{Ks_o^1N}&_O0{K9<9#ts_aFFzUaK9qE~oQczxpkx;z3aepl9trWHVhTs^Qd=eX*uE1x@|2nUsUFgVo-m$dd2s)JaK zZr%y@qRNpDBpSCe9DlX*wy-^0Q+`lr2#n=QmEzOPskFJ>qA1@)bsR~AYYADk9avQy z_+DMzl5tt3+wP<)ihQsYTbBxI2wA)om&bk8JT;^le@mkDhoyly24Z-urVqpK9a?^_ z)CaQ@?z+)c|R_YXjB|22@)N3`_a zH)G7V&%TX}HC@g0U~4z|^G2+OY?lQt1C&w+QCpYX7#WS|x#j$-Va?5XI-kSEKwB$@ zmBVJo{0B*a>lZ)*p8f?l*DG?9b`+*rCN3VO@H@~xK_kY>gLV7bIUvq8m}W`36-g+K zL%Z+ECMf7hma!ZV}iTVS*+`_~mf3;jw!65Q9(z{c=Jm53LF&|I4GQ zd#l$=!IV*nv81Ct)*Wlx1B+ma-RtKG5f9XV2^PZzjBpAHvEDO`ofr?MTs^DYbhm=5 zmPpn6BvDv0ZHPHaT=Y#wg0$SZgiyN4re#Tsnsz( zh~zB)A7A&urlw9{@JO1dqJFaX$92GZ9