diff --git a/bigquery/samples/client_query.py b/bigquery/samples/client_query.py index b2cf0c8637f2..5242c854e220 100644 --- a/bigquery/samples/client_query.py +++ b/bigquery/samples/client_query.py @@ -30,10 +30,7 @@ def client_query(client): ORDER BY total_people DESC LIMIT 20 """ - query_job = client.query( - query, - location="US", # Must match the source and the destination dataset(s) location. - ) # Make an API request. + query_job = client.query(query) # Make an API request. print("The query data:") for row in query_job: diff --git a/bigquery/samples/client_query_add_column.py b/bigquery/samples/client_query_add_column.py index c26cbe96622d..e2724eb300b1 100644 --- a/bigquery/samples/client_query_add_column.py +++ b/bigquery/samples/client_query_add_column.py @@ -43,7 +43,6 @@ def client_query_add_column(client, table_id): # 'age' columns, while the results of this query will contain an # additional 'favorite_color' column. 'SELECT "Timmy" as full_name, 85 as age, "Blue" as favorite_color;', - location="US", # Must match the source and the destination dataset(s) location. job_config=job_config, ) # Make an API request. query_job.result() # Wait for the job to complete. diff --git a/bigquery/samples/client_query_batch.py b/bigquery/samples/client_query_batch.py index 9dab2b70a78e..73ddd4f83ae2 100644 --- a/bigquery/samples/client_query_batch.py +++ b/bigquery/samples/client_query_batch.py @@ -32,13 +32,13 @@ def client_query_batch(client): """ # Start the query, passing in the extra configuration. - query_job = client.query( - sql, location="US", job_config=job_config - ) # Make an API request. + query_job = client.query(sql, job_config=job_config) # Make an API request. # Check on the progress by getting the job's updated state. Once the state # is `DONE`, the results are ready. - query_job = client.get_job(query_job.job_id, location="US") # Make an API request. + query_job = client.get_job( + query_job.job_id, location=query_job.location + ) # Make an API request. print("Job {} is currently in state {}".format(query_job.job_id, query_job.state)) # [END bigquery_query_batch] diff --git a/bigquery/samples/client_query_destination_table.py b/bigquery/samples/client_query_destination_table.py index 471667780c41..f40ce3a5afc1 100644 --- a/bigquery/samples/client_query_destination_table.py +++ b/bigquery/samples/client_query_destination_table.py @@ -35,11 +35,7 @@ def client_query_destination_table(client, table_id): """ # Start the query, passing in the extra configuration. - query_job = client.query( - sql, - location="US", # Must match the source and the destination dataset(s) location. - job_config=job_config, - ) # Make an API request. + query_job = client.query(sql, job_config=job_config) # Make an API request. query_job.result() # Wait for the job to complete. print("Query results loaded to the table {}".format(table_id)) diff --git a/bigquery/samples/client_query_destination_table_cmek.py b/bigquery/samples/client_query_destination_table_cmek.py index a8749ba35384..06c02038ba7a 100644 --- a/bigquery/samples/client_query_destination_table_cmek.py +++ b/bigquery/samples/client_query_destination_table_cmek.py @@ -38,9 +38,7 @@ def client_query_destination_table_cmek(client, table_id, kms_key_name): # Start the query, passing in the extra configuration. query_job = client.query( - "SELECT 17 AS my_col;", - location="US", # Must match the source and the destination dataset(s) location. - job_config=job_config, + "SELECT 17 AS my_col;", job_config=job_config ) # Make an API request. query_job.result() # Wait for the job to complete. diff --git a/bigquery/samples/client_query_destination_table_legacy.py b/bigquery/samples/client_query_destination_table_legacy.py index 7d30c20f1d6f..88c38b79e839 100644 --- a/bigquery/samples/client_query_destination_table_legacy.py +++ b/bigquery/samples/client_query_destination_table_legacy.py @@ -40,11 +40,7 @@ def client_query_destination_table_legacy(client, table_id): """ # Start the query, passing in the extra configuration. - query_job = client.query( - sql, - location="US", # Must match the source and the destination dataset(s) location. - job_config=job_config, - ) # Make an API request. + query_job = client.query(sql, job_config=job_config) # Make an API request. query_job.result() # Wait for the job to complete. print("Query results loaded to the table {}".format(table_id)) diff --git a/bigquery/samples/client_query_dry_run.py b/bigquery/samples/client_query_dry_run.py index 4094d5de4dcf..8b6f018a5a90 100644 --- a/bigquery/samples/client_query_dry_run.py +++ b/bigquery/samples/client_query_dry_run.py @@ -33,7 +33,6 @@ def client_query_dry_run(client): "WHERE state = 'WA' " "GROUP BY name" ), - location="US", # Must match the source and the destination dataset(s) location. job_config=job_config, ) # Make an API request. diff --git a/bigquery/samples/client_query_legacy_sql.py b/bigquery/samples/client_query_legacy_sql.py index 8400a9acc60a..f9e2c69b6ae4 100644 --- a/bigquery/samples/client_query_legacy_sql.py +++ b/bigquery/samples/client_query_legacy_sql.py @@ -32,11 +32,7 @@ def client_query_legacy_sql(client): job_config.use_legacy_sql = True # Start the query, passing in the extra configuration. - query_job = client.query( - query, - location="US", # Must match the source and the destination dataset(s) location. - job_config=job_config, - ) # Make an API request. + query_job = client.query(query, job_config=job_config) # Make an API request. print("The query data:") for row in query_job: diff --git a/bigquery/samples/client_query_relax_column.py b/bigquery/samples/client_query_relax_column.py index 4b3a5080df6c..48c264a16c68 100644 --- a/bigquery/samples/client_query_relax_column.py +++ b/bigquery/samples/client_query_relax_column.py @@ -45,7 +45,6 @@ def client_query_relax_column(client, table_id): # In this example, the existing table contains 'full_name' and 'age' as # required columns, but the query results will omit the second column. 'SELECT "Beyonce" as full_name;', - location="US", # Must match the source and the destination dataset(s) location. job_config=job_config, ) # Make an API request. query_job.result() # Wait for the job to complete. diff --git a/bigquery/samples/copy_table.py b/bigquery/samples/copy_table.py index 28fb5ceb0cb8..20f6776cf87d 100644 --- a/bigquery/samples/copy_table.py +++ b/bigquery/samples/copy_table.py @@ -28,11 +28,7 @@ def copy_table(client, source_table_id, destination_table_id): # TODO(developer): Set destination_table_id to the ID of the destination table. # destination_table_id = "your-project.destination_dataset.destination_table" - job = client.copy_table( - source_table_id, - destination_table_id, - location="US", # Must match the source and the destination dataset(s) location. - ) + job = client.copy_table(source_table_id, destination_table_id) job.result() # Wait for the job to complete. print("A copy of the table created.") diff --git a/bigquery/samples/copy_table_cmek.py b/bigquery/samples/copy_table_cmek.py index 49a98140c6ac..0aa299084d19 100644 --- a/bigquery/samples/copy_table_cmek.py +++ b/bigquery/samples/copy_table_cmek.py @@ -36,12 +36,7 @@ def copy_table_cmek(client, dest_table_id, orig_table_id, kms_key_name): encryption_config = bigquery.EncryptionConfiguration(kms_key_name=kms_key_name) job_config = bigquery.CopyJobConfig() job_config.destination_encryption_configuration = encryption_config - job = client.copy_table( - orig_table_id, - dest_table_id, - location="US", # Must match the source and the destination dataset(s) location. - job_config=job_config, - ) + job = client.copy_table(orig_table_id, dest_table_id, job_config=job_config) job.result() # Wait for the job to complete. dest_table = client.get_table(dest_table_id) # Make an API request. diff --git a/bigquery/samples/copy_table_multiple_source.py b/bigquery/samples/copy_table_multiple_source.py index 6f54d592b7ca..532ea0a0ab90 100644 --- a/bigquery/samples/copy_table_multiple_source.py +++ b/bigquery/samples/copy_table_multiple_source.py @@ -28,11 +28,7 @@ def copy_table_multiple_source(client, dest_table_id, table_ids): # TODO(developer): Set table_ids to the list of the IDs of the original tables. # table_ids = ["your-project.your_dataset.your_table_name", ...] - job = client.copy_table( - table_ids, - dest_table_id, - location="US", # Must match the source and the destination dataset(s) location. - ) # Make an API request. + job = client.copy_table(table_ids, dest_table_id) # Make an API request. job.result() # Wait for the job to complete. print("The tables {} have been appended to {}".format(table_ids, dest_table_id)) diff --git a/bigquery/samples/load_table_dataframe.py b/bigquery/samples/load_table_dataframe.py index 7133b0bfbcc1..8cfb34424457 100644 --- a/bigquery/samples/load_table_dataframe.py +++ b/bigquery/samples/load_table_dataframe.py @@ -61,10 +61,7 @@ def load_table_dataframe(client, table_id): ) job = client.load_table_from_dataframe( - dataframe, - table_id, - job_config=job_config, - location="US", # Must match the source and the destination dataset(s) location. + dataframe, table_id, job_config=job_config ) # Make an API request. job.result() # Wait for the job to complete. diff --git a/bigquery/samples/tests/test_create_job.py b/bigquery/samples/tests/test_create_job.py index 5ead51156606..3cda34bf0848 100644 --- a/bigquery/samples/tests/test_create_job.py +++ b/bigquery/samples/tests/test_create_job.py @@ -19,6 +19,6 @@ def test_create_job(capsys, client): query_job = create_job.create_job(client) - client.cancel_job(query_job.job_id, location="US") + client.cancel_job(query_job.job_id, location=query_job.location) out, err = capsys.readouterr() assert "Started job: {}".format(query_job.job_id) in out