From a74215cbc88f70800f5a2716a630d25033daf5b8 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:23:28 -0700 Subject: [PATCH 01/20] changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- .kokoro/test-samples.sh | 8 ++++---- .kokoro/trampoline_v2.sh | 2 +- google/cloud/dataproc_v1/__init__.py | 4 ++-- google/cloud/dataproc_v1beta2/__init__.py | 4 ++-- synth.metadata | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index a18decba..1e12cb1e 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -87,11 +87,11 @@ for file in samples/**/requirements.txt; do python3.6 -m nox -s "$RUN_TESTS_SESSION" EXIT=$? - # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop fi if [[ $EXIT -ne 0 ]]; then diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh index 4af6cdc2..719bcd5b 100755 --- a/.kokoro/trampoline_v2.sh +++ b/.kokoro/trampoline_v2.sh @@ -159,7 +159,7 @@ if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then "KOKORO_GITHUB_COMMIT" "KOKORO_GITHUB_PULL_REQUEST_NUMBER" "KOKORO_GITHUB_PULL_REQUEST_COMMIT" - # For FlakyBot + # For Build Cop Bot "KOKORO_GITHUB_COMMIT_URL" "KOKORO_GITHUB_PULL_REQUEST_URL" ) diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index d248a17a..b17ac4f8 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -109,12 +109,12 @@ "AcceleratorConfig", "AutoscalingConfig", "AutoscalingPolicy", + "AutoscalingPolicyServiceClient", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", "CancelJobRequest", "Cluster", "ClusterConfig", - "ClusterControllerClient", "ClusterMetrics", "ClusterOperation", "ClusterOperationMetadata", @@ -193,5 +193,5 @@ "WorkflowTemplatePlacement", "WorkflowTemplateServiceClient", "YarnApplication", - "AutoscalingPolicyServiceClient", + "ClusterControllerClient", ) diff --git a/google/cloud/dataproc_v1beta2/__init__.py b/google/cloud/dataproc_v1beta2/__init__.py index 7af81c6b..f5dee477 100644 --- a/google/cloud/dataproc_v1beta2/__init__.py +++ b/google/cloud/dataproc_v1beta2/__init__.py @@ -110,12 +110,12 @@ "AcceleratorConfig", "AutoscalingConfig", "AutoscalingPolicy", + "AutoscalingPolicyServiceClient", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", "CancelJobRequest", "Cluster", "ClusterConfig", - "ClusterControllerClient", "ClusterMetrics", "ClusterOperation", "ClusterOperationMetadata", @@ -195,5 +195,5 @@ "WorkflowTemplatePlacement", "WorkflowTemplateServiceClient", "YarnApplication", - "AutoscalingPolicyServiceClient", + "ClusterControllerClient", ) diff --git a/synth.metadata b/synth.metadata index cfb4b764..586f5415 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "2b4f513b09497e047435e679223a8db5d228d845" + "sha": "1b1164217295fde7a25df720d4e79d975a0ce67a" } }, { From c8ea13e3c66b6ae7dfabd98e07fd46df97db2f56 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:25:59 -0700 Subject: [PATCH 02/20] chore(python): skip docfx in main presubmit * chore(python): skip docfx in main presubmit * fix: properly template the repo name Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Fri Jan 8 10:32:13 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483 Source-Link: https://github.com/googleapis/synthtool/commit/fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483 --- .kokoro/build.sh | 16 ++++++++++------ .kokoro/docs/docs-presubmit.cfg | 11 +++++++++++ .trampolinerc | 2 ++ noxfile.py | 11 +++++++++++ synth.metadata | 4 ++-- 5 files changed, 36 insertions(+), 8 deletions(-) diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 0cd63701..3949070c 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -15,7 +15,11 @@ set -eo pipefail -cd github/python-dataproc +if [[ -z "${PROJECT_ROOT:-}" ]]; then + PROJECT_ROOT="github/python-dataproc" +fi + +cd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -30,16 +34,16 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") # Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +python3 -m pip uninstall --yes --quiet nox-automation # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --upgrade --quiet nox +python3 -m nox --version # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3.6 -m nox -s "${NOX_SESSION:-}" + python3 -m nox -s ${NOX_SESSION:-} else - python3.6 -m nox + python3 -m nox fi diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg index 11181078..d4dad0b3 100644 --- a/.kokoro/docs/docs-presubmit.cfg +++ b/.kokoro/docs/docs-presubmit.cfg @@ -15,3 +15,14 @@ env_vars: { key: "TRAMPOLINE_IMAGE_UPLOAD" value: "false" } + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: github/python-dataproc/.kokoro/build.sh" +} + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "docs docfx" +} diff --git a/.trampolinerc b/.trampolinerc index 995ee291..c7d663ae 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -18,12 +18,14 @@ required_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" + "NOX_SESSION" ) # Add env vars which are passed down into the container here. pass_down_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" + "NOX_SESSION" ) # Prevent unintentional override on the default image. diff --git a/noxfile.py b/noxfile.py index f230390f..f73702a9 100644 --- a/noxfile.py +++ b/noxfile.py @@ -30,6 +30,17 @@ SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): diff --git a/synth.metadata b/synth.metadata index 586f5415..32e056cd 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" + "sha": "fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" + "sha": "fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483" } } ], From c690d3a0f5db304e43f78e2e4905c357049d2393 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:25:59 -0700 Subject: [PATCH 03/20] chore: add missing quotation mark Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Mon Jan 11 09:43:06 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 16ec872dd898d7de6e1822badfac32484b5d9031 Source-Link: https://github.com/googleapis/synthtool/commit/16ec872dd898d7de6e1822badfac32484b5d9031 --- .kokoro/docs/docs-presubmit.cfg | 2 +- synth.metadata | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg index d4dad0b3..8cf1dcbf 100644 --- a/.kokoro/docs/docs-presubmit.cfg +++ b/.kokoro/docs/docs-presubmit.cfg @@ -18,7 +18,7 @@ env_vars: { env_vars: { key: "TRAMPOLINE_BUILD_FILE" - value: github/python-dataproc/.kokoro/build.sh" + value: "github/python-dataproc/.kokoro/build.sh" } # Only run this nox session. diff --git a/synth.metadata b/synth.metadata index 32e056cd..182b4e89 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483" + "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483" + "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" } } ], From 511e6527e1e826ae734eefef5fd292e72a1509a0 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:26:43 -0700 Subject: [PATCH 04/20] chore: add 3.9 to noxfile template Since the python-docs-samples noxfile-template doesn't sync with this, I wanted to make sure the noxfile template matched the most recent change [here](https://github.com/GoogleCloudPlatform/python-docs-samples/pull/4968/files) cc @tmatsuo Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Fri Jan 15 17:24:05 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: 56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f Source-Link: https://github.com/googleapis/synthtool/commit/56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f --- samples/snippets/noxfile.py | 2 +- synth.metadata | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index bca0522e..97bf7da8 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -85,7 +85,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/synth.metadata b/synth.metadata index 182b4e89..12b2f304 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" + "sha": "56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" + "sha": "56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f" } } ], From 5d9688511f40d34f5ea3cfd155c6674816c71c56 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:26:43 -0700 Subject: [PATCH 05/20] build(python): make `NOX_SESSION` optional I added this accidentally in #889. `NOX_SESSION` should be passed down if it is set but not marked required. Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue Jan 19 09:38:04 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: ba960d730416fe05c50547e975ce79fcee52c671 Source-Link: https://github.com/googleapis/synthtool/commit/ba960d730416fe05c50547e975ce79fcee52c671 --- .trampolinerc | 1 - synth.metadata | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.trampolinerc b/.trampolinerc index c7d663ae..383b6ec8 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -18,7 +18,6 @@ required_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" - "NOX_SESSION" ) # Add env vars which are passed down into the container here. diff --git a/synth.metadata b/synth.metadata index 12b2f304..9035132d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f" + "sha": "ba960d730416fe05c50547e975ce79fcee52c671" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "56ddc68f36b32341e9f22c2c59b4ce6aa3ba635f" + "sha": "ba960d730416fe05c50547e975ce79fcee52c671" } } ], From d3fc93b7a06c0a706d66b86fd402c75bd79a75b5 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:27:47 -0700 Subject: [PATCH 06/20] chore: Add header checker config to python library synth MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have it working in [python-docs-samples](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/.github/header-checker-lint.yml) we should consider adding it to the 🐍 libraries :) Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Mon Jan 25 13:24:08 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: 573f7655311b553a937f9123bee17bf78497db95 Source-Link: https://github.com/googleapis/synthtool/commit/573f7655311b553a937f9123bee17bf78497db95 --- .github/header-checker-lint.yml | 15 +++++++++++++++ synth.metadata | 5 +++-- 2 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 .github/header-checker-lint.yml diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml new file mode 100644 index 00000000..fc281c05 --- /dev/null +++ b/.github/header-checker-lint.yml @@ -0,0 +1,15 @@ +{"allowedCopyrightHolders": ["Google LLC"], + "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], + "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"], + "sourceFileExtensions": [ + "ts", + "js", + "java", + "sh", + "Dockerfile", + "yaml", + "py", + "html", + "txt" + ] +} \ No newline at end of file diff --git a/synth.metadata b/synth.metadata index 9035132d..c23457bb 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba960d730416fe05c50547e975ce79fcee52c671" + "sha": "573f7655311b553a937f9123bee17bf78497db95" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba960d730416fe05c50547e975ce79fcee52c671" + "sha": "573f7655311b553a937f9123bee17bf78497db95" } } ], @@ -57,6 +57,7 @@ ".github/ISSUE_TEMPLATE/feature_request.md", ".github/ISSUE_TEMPLATE/support_request.md", ".github/PULL_REQUEST_TEMPLATE.md", + ".github/header-checker-lint.yml", ".github/release-please.yml", ".github/snippet-bot.yml", ".gitignore", From 826763bf81c4d6cfd6c99545ca0886c836fde01e Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:28:31 -0700 Subject: [PATCH 07/20] chore: add noxfile parameters for extra dependencies Also, add tests for some noxfile parameters for assurance that the template generates valid Python. Co-authored-by: Jeffrey Rennie Source-Author: Tim Swast Source-Date: Tue Jan 26 12:26:57 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 778d8beae28d6d87eb01fdc839a4b4d966ed2ebe Source-Link: https://github.com/googleapis/synthtool/commit/778d8beae28d6d87eb01fdc839a4b4d966ed2ebe --- noxfile.py | 1 + synth.metadata | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/noxfile.py b/noxfile.py index f73702a9..ee6aa2ab 100644 --- a/noxfile.py +++ b/noxfile.py @@ -86,6 +86,7 @@ def default(session): session.install( "mock", "pytest", "pytest-cov", ) + session.install("-e", ".") # Run py.test against the unit tests. diff --git a/synth.metadata b/synth.metadata index c23457bb..0e54751c 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "573f7655311b553a937f9123bee17bf78497db95" + "sha": "778d8beae28d6d87eb01fdc839a4b4d966ed2ebe" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "573f7655311b553a937f9123bee17bf78497db95" + "sha": "778d8beae28d6d87eb01fdc839a4b4d966ed2ebe" } } ], From 0c9f5b306e8453d7ec559ec34936c18c55989d91 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:29:36 -0700 Subject: [PATCH 08/20] build: migrate to flakybot Source-Author: Justin Beckwith Source-Date: Thu Jan 28 22:22:38 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: d1bb9173100f62c0cfc8f3138b62241e7f47ca6a Source-Link: https://github.com/googleapis/synthtool/commit/d1bb9173100f62c0cfc8f3138b62241e7f47ca6a --- .kokoro/test-samples.sh | 8 ++++---- .kokoro/trampoline_v2.sh | 2 +- synth.metadata | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 1e12cb1e..a18decba 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -87,11 +87,11 @@ for file in samples/**/requirements.txt; do python3.6 -m nox -s "$RUN_TESTS_SESSION" EXIT=$? - # If this is a periodic build, send the test log to the Build Cop Bot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + # If this is a periodic build, send the test log to the FlakyBot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop - $KOKORO_GFILE_DIR/linux_amd64/buildcop + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot fi if [[ $EXIT -ne 0 ]]; then diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh index 719bcd5b..4af6cdc2 100755 --- a/.kokoro/trampoline_v2.sh +++ b/.kokoro/trampoline_v2.sh @@ -159,7 +159,7 @@ if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then "KOKORO_GITHUB_COMMIT" "KOKORO_GITHUB_PULL_REQUEST_NUMBER" "KOKORO_GITHUB_PULL_REQUEST_COMMIT" - # For Build Cop Bot + # For FlakyBot "KOKORO_GITHUB_COMMIT_URL" "KOKORO_GITHUB_PULL_REQUEST_URL" ) diff --git a/synth.metadata b/synth.metadata index 0e54751c..2de8ce09 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "778d8beae28d6d87eb01fdc839a4b4d966ed2ebe" + "sha": "d1bb9173100f62c0cfc8f3138b62241e7f47ca6a" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "778d8beae28d6d87eb01fdc839a4b4d966ed2ebe" + "sha": "d1bb9173100f62c0cfc8f3138b62241e7f47ca6a" } } ], From c465f2ad4178f976952820445cd3e83f8131a307 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:31:25 -0700 Subject: [PATCH 09/20] chore(python): include py.typed files in release A py.typed file must be included in the released package for it to be considered typed by type checkers. https://www.python.org/dev/peps/pep-0561/#packaging-type-information. See https://github.com/googleapis/python-secret-manager/issues/79 Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Fri Feb 5 17:32:06 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 33366574ffb9e11737b3547eb6f020ecae0536e8 Source-Link: https://github.com/googleapis/synthtool/commit/33366574ffb9e11737b3547eb6f020ecae0536e8 --- MANIFEST.in | 4 ++-- synth.metadata | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index e9e29d12..e783f4c6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -16,10 +16,10 @@ # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE -recursive-include google *.json *.proto +recursive-include google *.json *.proto py.typed recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ # Exclude scripts for samples readmegen -prune scripts/readme-gen \ No newline at end of file +prune scripts/readme-gen diff --git a/synth.metadata b/synth.metadata index 2de8ce09..e9f4bb20 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d1bb9173100f62c0cfc8f3138b62241e7f47ca6a" + "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d1bb9173100f62c0cfc8f3138b62241e7f47ca6a" + "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" } } ], From 8b80ab0d0f69547cdc7c432fb50d9e6044ccdf46 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:32:09 -0700 Subject: [PATCH 10/20] docs: update python contributing guide Adds details about blacken, updates version for system tests, and shows how to pass through pytest arguments. Source-Author: Chris Cotter Source-Date: Mon Feb 8 17:13:36 2021 -0500 Source-Repo: googleapis/synthtool Source-Sha: 4679e7e415221f03ff2a71e3ffad75b9ec41d87e Source-Link: https://github.com/googleapis/synthtool/commit/4679e7e415221f03ff2a71e3ffad75b9ec41d87e --- CONTRIBUTING.rst | 22 ++++++++++++++++++---- synth.metadata | 4 ++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index ecad28ae..be20ff0b 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -70,9 +70,14 @@ We use `nox `__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: $ nox -s unit-2.7 - $ nox -s unit-3.7 + $ nox -s unit-3.8 $ ... +- Args to pytest can be passed through the nox command separated by a `--`. For + example, to run a single test:: + + $ nox -s unit-3.8 -- -k + .. note:: The unit tests and system tests are described in the @@ -93,8 +98,12 @@ On Debian/Ubuntu:: ************ Coding Style ************ +- We use the automatic code formatter ``black``. You can run it using + the nox session ``blacken``. This will eliminate many lint errors. Run via:: + + $ nox -s blacken -- PEP8 compliance, with exceptions defined in the linter configuration. +- PEP8 compliance is required, with exceptions defined in the linter configuration. If you have ``nox`` installed, you can test that you have not introduced any non-compliant code via:: @@ -133,13 +142,18 @@ Running System Tests - To run system tests, you can execute:: - $ nox -s system-3.7 + # Run all system tests + $ nox -s system-3.8 $ nox -s system-2.7 + # Run a single system test + $ nox -s system-3.8 -- -k + + .. note:: System tests are only configured to run under Python 2.7 and - Python 3.7. For expediency, we do not run them in older versions + Python 3.8. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local diff --git a/synth.metadata b/synth.metadata index e9f4bb20..045e0fb5 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" + "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" + "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" } } ], From af552c86520bdfb689626f9a20eb3bba78da31f0 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:33:14 -0700 Subject: [PATCH 11/20] build(python): enable flakybot on library unit and system tests Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Feb 17 14:10:46 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: d17674372e27fb8f23013935e794aa37502071aa Source-Link: https://github.com/googleapis/synthtool/commit/d17674372e27fb8f23013935e794aa37502071aa --- .gitignore | 4 +++- .kokoro/build.sh | 10 ++++++++++ noxfile.py | 17 +++++++++++++++-- synth.metadata | 4 ++-- 4 files changed, 30 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index b9daa52f..b4243ced 100644 --- a/.gitignore +++ b/.gitignore @@ -50,8 +50,10 @@ docs.metadata # Virtual environment env/ + +# Test logs coverage.xml -sponge_log.xml +*sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 3949070c..ef511aab 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -40,6 +40,16 @@ python3 -m pip uninstall --yes --quiet nox-automation python3 -m pip install --upgrade --quiet nox python3 -m nox --version +# If this is a continuous build, send the test log to the FlakyBot. +# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi + # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then diff --git a/noxfile.py b/noxfile.py index ee6aa2ab..c4c085c5 100644 --- a/noxfile.py +++ b/noxfile.py @@ -93,6 +93,7 @@ def default(session): session.run( "py.test", "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", "--cov=google/cloud", "--cov=tests/unit", "--cov-append", @@ -141,9 +142,21 @@ def system(session): # Run py.test against the system tests. if system_test_exists: - session.run("py.test", "--quiet", system_test_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) if system_test_folder_exists: - session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/synth.metadata b/synth.metadata index 045e0fb5..7413221f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" + "sha": "d17674372e27fb8f23013935e794aa37502071aa" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" + "sha": "d17674372e27fb8f23013935e794aa37502071aa" } } ], From 228414b6de8bb7c76c3195bd125f0bd82b2a629b Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:34:19 -0700 Subject: [PATCH 12/20] test: install pyopenssl for mtls testing Source-Author: arithmetic1728 <58957152+arithmetic1728@users.noreply.github.com> Source-Date: Tue Mar 2 12:27:56 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: 0780323da96d5a53925fe0547757181fe76e8f1e Source-Link: https://github.com/googleapis/synthtool/commit/0780323da96d5a53925fe0547757181fe76e8f1e --- noxfile.py | 3 +++ synth.metadata | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/noxfile.py b/noxfile.py index c4c085c5..311af4e0 100644 --- a/noxfile.py +++ b/noxfile.py @@ -123,6 +123,9 @@ def system(session): # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) diff --git a/synth.metadata b/synth.metadata index 7413221f..1cfdb17d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d17674372e27fb8f23013935e794aa37502071aa" + "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d17674372e27fb8f23013935e794aa37502071aa" + "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" } } ], From 5cf694f977d52bd14bf30083932d936c72a36b2f Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:36:29 -0700 Subject: [PATCH 13/20] chore: add pre-commit-config to renovate ignore paths Disable renovate PRs on the .pre-commit-config.yaml which is templated from synthtool. https://docs.renovatebot.com/configuration-options/#ignorepaths Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Mon Mar 15 09:05:39 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 2c54c473779ea731128cea61a3a6c975a08a5378 Source-Link: https://github.com/googleapis/synthtool/commit/2c54c473779ea731128cea61a3a6c975a08a5378 --- renovate.json | 3 ++- synth.metadata | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/renovate.json b/renovate.json index 4fa94931..f08bc22c 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,6 @@ { "extends": [ "config:base", ":preserveSemverRanges" - ] + ], + "ignorePaths": [".pre-commit-config.yaml"] } diff --git a/synth.metadata b/synth.metadata index 1cfdb17d..a431b3b9 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" + "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" + "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" } } ], From a3789109f64070a6215fd91a3835ec5de0df8091 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:37:13 -0700 Subject: [PATCH 14/20] build(python): fail nox sessions if a python version is missing Nox's default behavior is to quietly skip if a python interpreter is missing. https://nox.thea.codes/en/stable/usage.html#failing-sessions-when-the-interpreter-is-missing Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue Mar 16 13:38:02 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: eda422b90c3dde4a872a13e6b78a8f802c40d0db Source-Link: https://github.com/googleapis/synthtool/commit/eda422b90c3dde4a872a13e6b78a8f802c40d0db --- noxfile.py | 3 +++ synth.metadata | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/noxfile.py b/noxfile.py index 311af4e0..e91dc8e2 100644 --- a/noxfile.py +++ b/noxfile.py @@ -41,6 +41,9 @@ "docs", ] +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): diff --git a/synth.metadata b/synth.metadata index a431b3b9..7fb0e07d 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" + "sha": "eda422b90c3dde4a872a13e6b78a8f802c40d0db" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" + "sha": "eda422b90c3dde4a872a13e6b78a8f802c40d0db" } } ], From a7c52d818c88e8f9b4e7b6d3d5a75938aa864985 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:37:57 -0700 Subject: [PATCH 15/20] chore(python): add kokoro configs for periodic builds against head This change should be non-destructive. Note for library repo maintainers: After applying this change, you can easily add (or change) periodic builds against head by adding config files in google3. See python-pubsub repo for example. Source-Author: Takashi Matsuo Source-Date: Fri Mar 19 11:17:59 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 79c8dd7ee768292f933012d3a69a5b4676404cda Source-Link: https://github.com/googleapis/synthtool/commit/79c8dd7ee768292f933012d3a69a5b4676404cda --- .kokoro/samples/python3.6/periodic-head.cfg | 11 +++ .kokoro/samples/python3.7/periodic-head.cfg | 11 +++ .kokoro/samples/python3.8/periodic-head.cfg | 11 +++ .kokoro/test-samples-against-head.sh | 28 ++++++ .kokoro/test-samples-impl.sh | 102 ++++++++++++++++++++ .kokoro/test-samples.sh | 96 +++--------------- synth.metadata | 9 +- 7 files changed, 186 insertions(+), 82 deletions(-) create mode 100644 .kokoro/samples/python3.6/periodic-head.cfg create mode 100644 .kokoro/samples/python3.7/periodic-head.cfg create mode 100644 .kokoro/samples/python3.8/periodic-head.cfg create mode 100755 .kokoro/test-samples-against-head.sh create mode 100755 .kokoro/test-samples-impl.sh diff --git a/.kokoro/samples/python3.6/periodic-head.cfg b/.kokoro/samples/python3.6/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.7/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.8/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh new file mode 100755 index 00000000..68eddb57 --- /dev/null +++ b/.kokoro/test-samples-against-head.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A customized test runner for samples. +# +# For periodic builds, you can specify this file for testing against head. + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-dataproc + +exec .kokoro/test-samples-impl.sh diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh new file mode 100755 index 00000000..cf5de74c --- /dev/null +++ b/.kokoro/test-samples-impl.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the FlakyBot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index a18decba..fbde31d4 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +# The default test runner for samples. +# +# For periodic builds, we rewinds the repo to the latest release, and +# run test-samples-impl.sh. # `-e` enables the script to automatically fail when a command fails # `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero @@ -24,87 +28,19 @@ cd github/python-dataproc # Run periodic samples tests at latest release if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + # preserving the test runner implementation. + cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh" + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + echo "Now we rewind the repo back to the latest release..." LATEST_RELEASE=$(git describe --abbrev=0 --tags) git checkout $LATEST_RELEASE -fi - -# Exit early if samples directory doesn't exist -if [ ! -d "./samples" ]; then - echo "No tests run. `./samples` not found" - exit 0 -fi - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Debug: show build environment -env | grep KOKORO - -# Install nox -python3.6 -m pip install --upgrade --quiet nox - -# Use secrets acessor service account to get secrets -if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then - gcloud auth activate-service-account \ - --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ - --project="cloud-devrel-kokoro-resources" -fi - -# This script will create 3 files: -# - testing/test-env.sh -# - testing/service-account.json -# - testing/client-secrets.json -./scripts/decrypt-secrets.sh - -source ./testing/test-env.sh -export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json - -# For cloud-run session, we activate the service account for gcloud sdk. -gcloud auth activate-service-account \ - --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" - -export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json - -echo -e "\n******************** TESTING PROJECTS ********************" - -# Switch to 'fail at end' to allow all tests to complete before exiting. -set +e -# Use RTN to return a non-zero value if the test fails. -RTN=0 -ROOT=$(pwd) -# Find all requirements.txt in the samples directory (may break on whitespace). -for file in samples/**/requirements.txt; do - cd "$ROOT" - # Navigate to the project folder. - file=$(dirname "$file") - cd "$file" - - echo "------------------------------------------------------------" - echo "- testing $file" - echo "------------------------------------------------------------" - - # Use nox to execute the tests for the project. - python3.6 -m nox -s "$RUN_TESTS_SESSION" - EXIT=$? - - # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. - if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot + echo "The current head is: " + echo $(git rev-parse --verify HEAD) + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + # move back the test runner implementation if there's no file. + if [ ! -f .kokoro/test-samples-impl.sh ]; then + cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh fi +fi - if [[ $EXIT -ne 0 ]]; then - RTN=1 - echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" - else - echo -e "\n Testing completed.\n" - fi - -done -cd "$ROOT" - -# Workaround for Kokoro permissions issue: delete secrets -rm testing/{test-env.sh,client-secrets.json,service-account.json} - -exit "$RTN" +exec .kokoro/test-samples-impl.sh diff --git a/synth.metadata b/synth.metadata index 7fb0e07d..8e379504 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "eda422b90c3dde4a872a13e6b78a8f802c40d0db" + "sha": "79c8dd7ee768292f933012d3a69a5b4676404cda" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "eda422b90c3dde4a872a13e6b78a8f802c40d0db" + "sha": "79c8dd7ee768292f933012d3a69a5b4676404cda" } } ], @@ -82,16 +82,21 @@ ".kokoro/samples/lint/presubmit.cfg", ".kokoro/samples/python3.6/common.cfg", ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic-head.cfg", ".kokoro/samples/python3.6/periodic.cfg", ".kokoro/samples/python3.6/presubmit.cfg", ".kokoro/samples/python3.7/common.cfg", ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic-head.cfg", ".kokoro/samples/python3.7/periodic.cfg", ".kokoro/samples/python3.7/presubmit.cfg", ".kokoro/samples/python3.8/common.cfg", ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic-head.cfg", ".kokoro/samples/python3.8/periodic.cfg", ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples-against-head.sh", + ".kokoro/test-samples-impl.sh", ".kokoro/test-samples.sh", ".kokoro/trampoline.sh", ".kokoro/trampoline_v2.sh", From 6927edf9f7ed29e68219416216dc74cc874fa1a2 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:39:23 -0700 Subject: [PATCH 16/20] chore(deps): update precommit hook pycqa/flake8 to v3.9.0 [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [pycqa/flake8](https://gitlab.com/pycqa/flake8) | repository | minor | `3.8.4` -> `3.9.0` | --- ### Release Notes
pycqa/flake8 ### [`v3.9.0`](https://gitlab.com/pycqa/flake8/compare/3.8.4...3.9.0) [Compare Source](https://gitlab.com/pycqa/flake8/compare/3.8.4...3.9.0)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/synthtool). Source-Author: WhiteSource Renovate Source-Date: Tue Mar 23 17:38:03 2021 +0100 Source-Repo: googleapis/synthtool Source-Sha: f5c5904fb0c6aa3b3730eadf4e5a4485afc65726 Source-Link: https://github.com/googleapis/synthtool/commit/f5c5904fb0c6aa3b3730eadf4e5a4485afc65726 --- .pre-commit-config.yaml | 2 +- synth.metadata | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a9024b15..32302e48 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,6 +12,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.0 hooks: - id: flake8 diff --git a/synth.metadata b/synth.metadata index 8e379504..33c77dbe 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "79c8dd7ee768292f933012d3a69a5b4676404cda" + "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "79c8dd7ee768292f933012d3a69a5b4676404cda" + "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" } } ], From 02ca9227addf4f568d8e8c3c48d84624b53a4925 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:39:44 -0700 Subject: [PATCH 17/20] test(python): use constraints files to check dependency lower bounds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a constraints file when installing dependencies for system and unit tests nox sessions. https://pip.pypa.io/en/stable/user_guide/#constraints-files > Constraints files are requirements files that **only control which version of a requirement is installed, not whether it is installed or not**. Their syntax and contents is nearly identical to Requirements Files. There is one key difference: Including a package in a constraints file does not trigger installation of the package. ``` testing ├── constraints-3.10.txt ├── constraints-3.11.txt ├── constraints-3.6.txt ├── constraints-3.7.txt ├── constraints-3.8.txt └── constraints-3.9.txt ``` Going forward, one constraints file (currently 3.6) will be populated with every library requirement and extra listed in the `setup.py`. The constraints file will pin each requirement to the lower bound. This ensures that library maintainers will see test failures if they forget to update a lower bound on a dependency. See https://github.com/googleapis/python-bigquery/pull/263 for an example Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue Mar 23 10:52:02 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 86ed43d4f56e6404d068e62e497029018879c771 Source-Link: https://github.com/googleapis/synthtool/commit/86ed43d4f56e6404d068e62e497029018879c771 --- noxfile.py | 22 ++++++++++++++-------- synth.metadata | 4 ++-- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/noxfile.py b/noxfile.py index e91dc8e2..479f81a1 100644 --- a/noxfile.py +++ b/noxfile.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os +import pathlib import shutil import nox @@ -30,6 +31,8 @@ SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ "unit", @@ -84,13 +87,15 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("asyncmock", "pytest-asyncio") - session.install( - "mock", "pytest", "pytest-cov", + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) + session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) - session.install("-e", ".") + session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + + session.install("-e", ".", "-c", constraints_path) # Run py.test against the unit tests. session.run( @@ -117,6 +122,9 @@ def unit(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -141,10 +149,8 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install( - "mock", "pytest", "google-cloud-testutils", - ) - session.install("-e", ".") + session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) + session.install("-e", ".", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: diff --git a/synth.metadata b/synth.metadata index 33c77dbe..b986eb0f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" + "sha": "86ed43d4f56e6404d068e62e497029018879c771" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" + "sha": "86ed43d4f56e6404d068e62e497029018879c771" } } ], From 818f840fb35163a2850cfc505e2698e1eb2c0f94 Mon Sep 17 00:00:00 2001 From: yoshi-automation Date: Sat, 3 Apr 2021 04:40:49 -0700 Subject: [PATCH 18/20] build(python): update docfx job to use new plugin Source-Author: Dan Lee <71398022+dandhlee@users.noreply.github.com> Source-Date: Tue Mar 30 19:36:37 2021 -0400 Source-Repo: googleapis/synthtool Source-Sha: 4501974ad08b5d693311457e2ea4ce845676e329 Source-Link: https://github.com/googleapis/synthtool/commit/4501974ad08b5d693311457e2ea4ce845676e329 --- noxfile.py | 6 +++--- synth.metadata | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/noxfile.py b/noxfile.py index 479f81a1..70927e9d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -211,9 +211,9 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - # sphinx-docfx-yaml supports up to sphinx version 1.5.5. - # https://github.com/docascode/sphinx-docfx-yaml/issues/97 - session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + session.install( + "sphinx<3.0.0", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml" + ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/synth.metadata b/synth.metadata index b986eb0f..2dec65f8 100644 --- a/synth.metadata +++ b/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "86ed43d4f56e6404d068e62e497029018879c771" + "sha": "4501974ad08b5d693311457e2ea4ce845676e329" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "86ed43d4f56e6404d068e62e497029018879c771" + "sha": "4501974ad08b5d693311457e2ea4ce845676e329" } } ], From 600793190cd1a69eeceb96a6444ad9de73f382d8 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Mon, 5 Apr 2021 19:57:36 +0000 Subject: [PATCH 19/20] chore: regen --- .coveragerc | 22 +- .../autoscaling_policy_service.rst | 11 + docs/dataproc_v1/cluster_controller.rst | 11 + docs/dataproc_v1/job_controller.rst | 11 + docs/dataproc_v1/services.rst | 18 +- docs/dataproc_v1/types.rst | 1 + .../dataproc_v1/workflow_template_service.rst | 11 + .../autoscaling_policy_service.rst | 11 + docs/dataproc_v1beta2/cluster_controller.rst | 11 + docs/dataproc_v1beta2/job_controller.rst | 11 + docs/dataproc_v1beta2/services.rst | 18 +- docs/dataproc_v1beta2/types.rst | 1 + .../workflow_template_service.rst | 11 + google/cloud/dataproc_v1/__init__.py | 4 +- .../async_client.py | 82 +++-- .../autoscaling_policy_service/client.py | 96 +++-- .../autoscaling_policy_service/pagers.py | 27 +- .../transports/base.py | 21 +- .../transports/grpc.py | 112 +++--- .../transports/grpc_asyncio.py | 120 +++--- .../cluster_controller/async_client.py | 119 ++++-- .../services/cluster_controller/client.py | 156 ++++---- .../services/cluster_controller/pagers.py | 27 +- .../cluster_controller/transports/base.py | 24 +- .../cluster_controller/transports/grpc.py | 114 +++--- .../transports/grpc_asyncio.py | 122 +++---- .../services/job_controller/async_client.py | 84 ++++- .../services/job_controller/client.py | 118 +++--- .../services/job_controller/pagers.py | 27 +- .../job_controller/transports/base.py | 25 +- .../job_controller/transports/grpc.py | 114 +++--- .../job_controller/transports/grpc_asyncio.py | 122 +++---- .../workflow_template_service/async_client.py | 126 ++++--- .../workflow_template_service/client.py | 145 ++++---- .../workflow_template_service/pagers.py | 27 +- .../transports/base.py | 25 +- .../transports/grpc.py | 114 +++--- .../transports/grpc_asyncio.py | 122 +++---- google/cloud/dataproc_v1/types/__init__.py | 206 +++++------ .../dataproc_v1/types/autoscaling_policies.py | 22 +- google/cloud/dataproc_v1/types/clusters.py | 84 ++--- google/cloud/dataproc_v1/types/jobs.py | 98 ++--- google/cloud/dataproc_v1/types/operations.py | 10 +- .../dataproc_v1/types/workflow_templates.py | 76 ++-- google/cloud/dataproc_v1beta2/__init__.py | 4 +- .../async_client.py | 82 +++-- .../autoscaling_policy_service/client.py | 96 +++-- .../autoscaling_policy_service/pagers.py | 27 +- .../transports/base.py | 21 +- .../transports/grpc.py | 112 +++--- .../transports/grpc_asyncio.py | 120 +++--- .../cluster_controller/async_client.py | 137 ++++--- .../services/cluster_controller/client.py | 174 +++++---- .../services/cluster_controller/pagers.py | 27 +- .../cluster_controller/transports/base.py | 24 +- .../cluster_controller/transports/grpc.py | 114 +++--- .../transports/grpc_asyncio.py | 122 +++---- .../services/job_controller/async_client.py | 84 ++++- .../services/job_controller/client.py | 118 +++--- .../services/job_controller/pagers.py | 27 +- .../job_controller/transports/base.py | 25 +- .../job_controller/transports/grpc.py | 114 +++--- .../job_controller/transports/grpc_asyncio.py | 122 +++---- .../workflow_template_service/async_client.py | 126 ++++--- .../workflow_template_service/client.py | 145 ++++---- .../workflow_template_service/pagers.py | 27 +- .../transports/base.py | 25 +- .../transports/grpc.py | 114 +++--- .../transports/grpc_asyncio.py | 122 +++---- .../cloud/dataproc_v1beta2/types/__init__.py | 214 +++++------ .../types/autoscaling_policies.py | 22 +- .../cloud/dataproc_v1beta2/types/clusters.py | 88 ++--- google/cloud/dataproc_v1beta2/types/jobs.py | 98 ++--- .../dataproc_v1beta2/types/operations.py | 10 +- .../types/workflow_templates.py | 84 ++--- scripts/fixup_dataproc_v1_keywords.py | 4 +- scripts/fixup_dataproc_v1beta2_keywords.py | 4 +- synth.metadata | 202 +--------- tests/unit/gapic/dataproc_v1/__init__.py | 15 + .../test_autoscaling_policy_service.py | 333 ++++++++++++----- .../dataproc_v1/test_cluster_controller.py | 312 +++++++++++----- .../gapic/dataproc_v1/test_job_controller.py | 330 ++++++++++++----- .../test_workflow_template_service.py | 344 +++++++++++++----- tests/unit/gapic/dataproc_v1beta2/__init__.py | 15 + .../test_autoscaling_policy_service.py | 333 ++++++++++++----- .../test_cluster_controller.py | 312 +++++++++++----- .../dataproc_v1beta2/test_job_controller.py | 330 ++++++++++++----- .../test_workflow_template_service.py | 344 +++++++++++++----- 88 files changed, 4891 insertions(+), 3329 deletions(-) create mode 100644 docs/dataproc_v1/autoscaling_policy_service.rst create mode 100644 docs/dataproc_v1/cluster_controller.rst create mode 100644 docs/dataproc_v1/job_controller.rst create mode 100644 docs/dataproc_v1/workflow_template_service.rst create mode 100644 docs/dataproc_v1beta2/autoscaling_policy_service.rst create mode 100644 docs/dataproc_v1beta2/cluster_controller.rst create mode 100644 docs/dataproc_v1beta2/job_controller.rst create mode 100644 docs/dataproc_v1beta2/workflow_template_service.rst diff --git a/.coveragerc b/.coveragerc index 218f2ddf..d6dce8f1 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,27 +1,11 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! [run] branch = True [report] fail_under = 100 show_missing = True -omit = google/cloud/dataproc/__init__.py +omit = + google/cloud/dataproc/__init__.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER @@ -31,4 +15,4 @@ exclude_lines = # This is added at the module level as a safeguard for if someone # generates the code and tries to run it without pip installing. This # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound \ No newline at end of file + except pkg_resources.DistributionNotFound diff --git a/docs/dataproc_v1/autoscaling_policy_service.rst b/docs/dataproc_v1/autoscaling_policy_service.rst new file mode 100644 index 00000000..cbb62baa --- /dev/null +++ b/docs/dataproc_v1/autoscaling_policy_service.rst @@ -0,0 +1,11 @@ +AutoscalingPolicyService +------------------------------------------ + +.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1/cluster_controller.rst b/docs/dataproc_v1/cluster_controller.rst new file mode 100644 index 00000000..1c4e398b --- /dev/null +++ b/docs/dataproc_v1/cluster_controller.rst @@ -0,0 +1,11 @@ +ClusterController +----------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.cluster_controller + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1.services.cluster_controller.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1/job_controller.rst b/docs/dataproc_v1/job_controller.rst new file mode 100644 index 00000000..e73db167 --- /dev/null +++ b/docs/dataproc_v1/job_controller.rst @@ -0,0 +1,11 @@ +JobController +------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.job_controller + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1.services.job_controller.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1/services.rst b/docs/dataproc_v1/services.rst index 8c7fc841..9d91e7ce 100644 --- a/docs/dataproc_v1/services.rst +++ b/docs/dataproc_v1/services.rst @@ -1,15 +1,9 @@ Services for Google Cloud Dataproc v1 API ========================================= +.. toctree:: + :maxdepth: 2 -.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1.services.cluster_controller - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1.services.job_controller - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service - :members: - :inherited-members: + autoscaling_policy_service + cluster_controller + job_controller + workflow_template_service diff --git a/docs/dataproc_v1/types.rst b/docs/dataproc_v1/types.rst index 5dde0cd6..bc1a0a30 100644 --- a/docs/dataproc_v1/types.rst +++ b/docs/dataproc_v1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Dataproc v1 API .. automodule:: google.cloud.dataproc_v1.types :members: + :undoc-members: :show-inheritance: diff --git a/docs/dataproc_v1/workflow_template_service.rst b/docs/dataproc_v1/workflow_template_service.rst new file mode 100644 index 00000000..154de462 --- /dev/null +++ b/docs/dataproc_v1/workflow_template_service.rst @@ -0,0 +1,11 @@ +WorkflowTemplateService +----------------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1beta2/autoscaling_policy_service.rst b/docs/dataproc_v1beta2/autoscaling_policy_service.rst new file mode 100644 index 00000000..3a411371 --- /dev/null +++ b/docs/dataproc_v1beta2/autoscaling_policy_service.rst @@ -0,0 +1,11 @@ +AutoscalingPolicyService +------------------------------------------ + +.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1beta2/cluster_controller.rst b/docs/dataproc_v1beta2/cluster_controller.rst new file mode 100644 index 00000000..c10e78c7 --- /dev/null +++ b/docs/dataproc_v1beta2/cluster_controller.rst @@ -0,0 +1,11 @@ +ClusterController +----------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1beta2/job_controller.rst b/docs/dataproc_v1beta2/job_controller.rst new file mode 100644 index 00000000..3f5d74e1 --- /dev/null +++ b/docs/dataproc_v1beta2/job_controller.rst @@ -0,0 +1,11 @@ +JobController +------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller.pagers + :members: + :inherited-members: diff --git a/docs/dataproc_v1beta2/services.rst b/docs/dataproc_v1beta2/services.rst index 273b2def..23c2d640 100644 --- a/docs/dataproc_v1beta2/services.rst +++ b/docs/dataproc_v1beta2/services.rst @@ -1,15 +1,9 @@ Services for Google Cloud Dataproc v1beta2 API ============================================== +.. toctree:: + :maxdepth: 2 -.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller - :members: - :inherited-members: -.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service - :members: - :inherited-members: + autoscaling_policy_service + cluster_controller + job_controller + workflow_template_service diff --git a/docs/dataproc_v1beta2/types.rst b/docs/dataproc_v1beta2/types.rst index e3dba489..1358e4c1 100644 --- a/docs/dataproc_v1beta2/types.rst +++ b/docs/dataproc_v1beta2/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Dataproc v1beta2 API .. automodule:: google.cloud.dataproc_v1beta2.types :members: + :undoc-members: :show-inheritance: diff --git a/docs/dataproc_v1beta2/workflow_template_service.rst b/docs/dataproc_v1beta2/workflow_template_service.rst new file mode 100644 index 00000000..10a2826c --- /dev/null +++ b/docs/dataproc_v1beta2/workflow_template_service.rst @@ -0,0 +1,11 @@ +WorkflowTemplateService +----------------------------------------- + +.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service + :members: + :inherited-members: + + +.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers + :members: + :inherited-members: diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index b17ac4f8..82d780ab 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -115,6 +115,7 @@ "CancelJobRequest", "Cluster", "ClusterConfig", + "ClusterControllerClient", "ClusterMetrics", "ClusterOperation", "ClusterOperationMetadata", @@ -191,7 +192,6 @@ "WorkflowNode", "WorkflowTemplate", "WorkflowTemplatePlacement", - "WorkflowTemplateServiceClient", "YarnApplication", - "ClusterControllerClient", + "WorkflowTemplateServiceClient", ) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py index fa91a7e7..5508ec26 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py @@ -86,7 +86,36 @@ class AutoscalingPolicyServiceAsyncClient: AutoscalingPolicyServiceClient.parse_common_location_path ) - from_service_account_file = AutoscalingPolicyServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -156,7 +185,7 @@ async def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -164,7 +193,7 @@ async def create_autoscaling_policy( r"""Creates new autoscaling policy. Args: - request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest`): The request object. A request to create an autoscaling policy. parent (:class:`str`): @@ -181,13 +210,15 @@ async def create_autoscaling_policy( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): Required. The autoscaling policy to create. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -198,7 +229,7 @@ async def create_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -206,7 +237,7 @@ async def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) + has_flattened_params = any([parent, policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -220,8 +251,8 @@ async def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -247,7 +278,7 @@ async def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -258,13 +289,14 @@ async def update_autoscaling_policy( replacements. Args: - request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest`): The request object. A request to update an autoscaling policy. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): Required. The updated autoscaling policy. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -275,7 +307,7 @@ async def update_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -283,7 +315,7 @@ async def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) + has_flattened_params = any([policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -295,8 +327,8 @@ async def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -309,6 +341,7 @@ async def update_autoscaling_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -340,7 +373,7 @@ async def get_autoscaling_policy( r"""Retrieves autoscaling policy. Args: - request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest`): The request object. A request to fetch an autoscaling policy. name (:class:`str`): @@ -356,6 +389,7 @@ async def get_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -367,7 +401,7 @@ async def get_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -401,6 +435,7 @@ async def get_autoscaling_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -430,7 +465,7 @@ async def list_autoscaling_policies( r"""Lists autoscaling policies in the project. Args: - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (:class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest`): The request object. A request to list autoscaling policies in a project. parent (:class:`str`): @@ -446,6 +481,7 @@ async def list_autoscaling_policies( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -457,7 +493,7 @@ async def list_autoscaling_policies( sent along with the request as metadata. Returns: - ~.pagers.ListAutoscalingPoliciesAsyncPager: + google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: A response to a request to list autoscaling policies in a project. Iterating over this object will yield @@ -494,6 +530,7 @@ async def list_autoscaling_policies( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -531,7 +568,7 @@ async def delete_autoscaling_policy( more clusters. Args: - request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest`): The request object. A request to delete an autoscaling policy. Autoscaling policies in use by one or more clusters will @@ -551,6 +588,7 @@ async def delete_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py index 1551d1f4..5a6d1151 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py @@ -114,6 +114,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -126,7 +142,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + AutoscalingPolicyServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -236,10 +252,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport (Union[str, AutoscalingPolicyServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -275,21 +291,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -332,7 +344,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -342,7 +354,7 @@ def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -350,10 +362,10 @@ def create_autoscaling_policy( r"""Creates new autoscaling policy. Args: - request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest): The request object. A request to create an autoscaling policy. - parent (:class:`str`): + parent (str): Required. The "resource name" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -367,13 +379,15 @@ def create_autoscaling_policy( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The autoscaling policy to create. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -384,7 +398,7 @@ def create_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -392,7 +406,7 @@ def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) + has_flattened_params = any([parent, policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -411,8 +425,8 @@ def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -436,7 +450,7 @@ def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -447,13 +461,14 @@ def update_autoscaling_policy( replacements. Args: - request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest): The request object. A request to update an autoscaling policy. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The updated autoscaling policy. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -464,7 +479,7 @@ def update_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -472,7 +487,7 @@ def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) + has_flattened_params = any([policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -489,8 +504,8 @@ def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -524,10 +539,10 @@ def get_autoscaling_policy( r"""Retrieves autoscaling policy. Args: - request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest): The request object. A request to fetch an autoscaling policy. - name (:class:`str`): + name (str): Required. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. @@ -540,6 +555,7 @@ def get_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -551,7 +567,7 @@ def get_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -607,10 +623,10 @@ def list_autoscaling_policies( r"""Lists autoscaling policies in the project. Args: - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): The request object. A request to list autoscaling policies in a project. - parent (:class:`str`): + parent (str): Required. The "resource name" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -623,6 +639,7 @@ def list_autoscaling_policies( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -634,7 +651,7 @@ def list_autoscaling_policies( sent along with the request as metadata. Returns: - ~.pagers.ListAutoscalingPoliciesPager: + google.cloud.dataproc_v1.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: A response to a request to list autoscaling policies in a project. Iterating over this object will yield @@ -703,12 +720,12 @@ def delete_autoscaling_policy( more clusters. Args: - request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest): The request object. A request to delete an autoscaling policy. Autoscaling policies in use by one or more clusters will not be deleted. - name (:class:`str`): + name (str): Required. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. @@ -723,6 +740,7 @@ def delete_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py index a246d08f..85deb317 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1.types import autoscaling_policies @@ -24,7 +33,7 @@ class ListAutoscalingPoliciesPager: """A pager for iterating through ``list_autoscaling_policies`` requests. This class thinly wraps an initial - :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and provides an ``__iter__`` method to iterate through its ``policies`` field. @@ -33,7 +42,7 @@ class ListAutoscalingPoliciesPager: through the ``policies`` field on the corresponding responses. - All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): The initial request object. - response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListAutoscalingPoliciesAsyncPager: """A pager for iterating through ``list_autoscaling_policies`` requests. This class thinly wraps an initial - :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` object, and provides an ``__aiter__`` method to iterate through its ``policies`` field. @@ -95,7 +104,7 @@ class ListAutoscalingPoliciesAsyncPager: through the ``policies`` field on the corresponding responses. - All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -115,9 +124,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest): The initial request object. - response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + response (google.cloud.dataproc_v1.types.ListAutoscalingPoliciesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py index 46201f4b..6fcb1442 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py @@ -68,10 +68,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -79,6 +79,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -88,20 +91,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -119,6 +119,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -132,6 +133,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -145,6 +147,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py index 736d0870..e1df740b 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -59,6 +59,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -89,6 +90,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -103,72 +108,60 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials - - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -176,17 +169,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -200,7 +184,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py index dc52d94d..f098fe12 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -63,7 +63,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -103,6 +103,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -134,12 +135,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -148,72 +153,60 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials - - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -221,17 +214,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py index 75a59d2d..924c35f0 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py @@ -80,7 +80,36 @@ class ClusterControllerAsyncClient: ClusterControllerClient.parse_common_location_path ) - from_service_account_file = ClusterControllerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -161,22 +190,24 @@ async def create_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.CreateClusterRequest`): + request (:class:`google.cloud.dataproc_v1.types.CreateClusterRequest`): The request object. A request to create a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): Required. The cluster to create. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this @@ -189,13 +220,11 @@ async def create_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -229,6 +258,7 @@ async def create_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -267,17 +297,19 @@ async def update_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.UpdateClusterRequest`): + request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`): The request object. A request to update a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -286,12 +318,12 @@ async def update_cluster( This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (:class:`google.cloud.dataproc_v1.types.Cluster`): Required. The changes to the cluster. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` @@ -354,6 +386,7 @@ async def update_cluster( + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -365,13 +398,11 @@ async def update_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -411,6 +442,7 @@ async def update_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -447,18 +479,20 @@ async def delete_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.DeleteClusterRequest`): + request (:class:`google.cloud.dataproc_v1.types.DeleteClusterRequest`): The request object. A request to delete a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -475,24 +509,22 @@ async def delete_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -526,6 +558,7 @@ async def delete_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -560,19 +593,21 @@ async def get_cluster( project. Args: - request (:class:`~.clusters.GetClusterRequest`): + request (:class:`google.cloud.dataproc_v1.types.GetClusterRequest`): The request object. Request to get the resource representation for a cluster in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -589,7 +624,7 @@ async def get_cluster( sent along with the request as metadata. Returns: - ~.clusters.Cluster: + google.cloud.dataproc_v1.types.Cluster: Describes the identifying information, config, and status of a cluster of Compute Engine instances. @@ -630,6 +665,7 @@ async def get_cluster( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -656,19 +692,21 @@ async def list_clusters( alphabetically. Args: - request (:class:`~.clusters.ListClustersRequest`): + request (:class:`google.cloud.dataproc_v1.types.ListClustersRequest`): The request object. A request to list the clusters in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -696,6 +734,7 @@ async def list_clusters( status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -707,7 +746,7 @@ async def list_clusters( sent along with the request as metadata. Returns: - ~.pagers.ListClustersAsyncPager: + google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersAsyncPager: The list of all clusters in a project. Iterating over this object will yield @@ -750,6 +789,7 @@ async def list_clusters( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -788,19 +828,21 @@ async def diagnose_cluster( `DiagnoseClusterResults `__. Args: - request (:class:`~.clusters.DiagnoseClusterRequest`): + request (:class:`google.cloud.dataproc_v1.types.DiagnoseClusterRequest`): The request object. A request to collect cluster diagnostic information. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -817,12 +859,12 @@ async def diagnose_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:``~.clusters.DiagnoseClusterResults``: The - location of diagnostic output. + :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` + The location of diagnostic output. """ # Create or coerce a protobuf request object. @@ -856,6 +898,7 @@ async def diagnose_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1/services/cluster_controller/client.py b/google/cloud/dataproc_v1/services/cluster_controller/client.py index 42594c47..daef141a 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/client.py @@ -119,6 +119,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -131,7 +147,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + ClusterControllerClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -223,10 +239,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.ClusterControllerTransport]): The + transport (Union[str, ClusterControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -262,21 +278,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -319,7 +331,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -341,22 +353,24 @@ def create_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.CreateClusterRequest`): + request (google.cloud.dataproc_v1.types.CreateClusterRequest): The request object. A request to create a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (google.cloud.dataproc_v1.types.Cluster): Required. The cluster to create. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this @@ -369,13 +383,11 @@ def create_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -442,31 +454,33 @@ def update_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.UpdateClusterRequest`): + request (google.cloud.dataproc_v1.types.UpdateClusterRequest): The request object. A request to update a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (google.cloud.dataproc_v1.types.Cluster): Required. The changes to the cluster. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` @@ -529,6 +543,7 @@ def update_cluster( + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -540,13 +555,11 @@ def update_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -617,22 +630,24 @@ def delete_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.DeleteClusterRequest`): + request (google.cloud.dataproc_v1.types.DeleteClusterRequest): The request object. A request to delete a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -645,24 +660,22 @@ def delete_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -725,23 +738,25 @@ def get_cluster( project. Args: - request (:class:`~.clusters.GetClusterRequest`): + request (google.cloud.dataproc_v1.types.GetClusterRequest): The request object. Request to get the resource representation for a cluster in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -754,7 +769,7 @@ def get_cluster( sent along with the request as metadata. Returns: - ~.clusters.Cluster: + google.cloud.dataproc_v1.types.Cluster: Describes the identifying information, config, and status of a cluster of Compute Engine instances. @@ -812,23 +827,25 @@ def list_clusters( alphabetically. Args: - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1.types.ListClustersRequest): The request object. A request to list the clusters in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - filter (:class:`str`): + filter (str): Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: @@ -852,6 +869,7 @@ def list_clusters( status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -863,7 +881,7 @@ def list_clusters( sent along with the request as metadata. Returns: - ~.pagers.ListClustersPager: + google.cloud.dataproc_v1.services.cluster_controller.pagers.ListClustersPager: The list of all clusters in a project. Iterating over this object will yield @@ -935,23 +953,25 @@ def diagnose_cluster( `DiagnoseClusterResults `__. Args: - request (:class:`~.clusters.DiagnoseClusterRequest`): + request (google.cloud.dataproc_v1.types.DiagnoseClusterRequest): The request object. A request to collect cluster diagnostic information. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -964,12 +984,12 @@ def diagnose_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:``~.clusters.DiagnoseClusterResults``: The - location of diagnostic output. + :class:`google.cloud.dataproc_v1.types.DiagnoseClusterResults` + The location of diagnostic output. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/dataproc_v1/services/cluster_controller/pagers.py b/google/cloud/dataproc_v1/services/cluster_controller/pagers.py index c5f0fbdf..418c92b1 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/pagers.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1.types import clusters @@ -24,7 +33,7 @@ class ListClustersPager: """A pager for iterating through ``list_clusters`` requests. This class thinly wraps an initial - :class:`~.clusters.ListClustersResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and provides an ``__iter__`` method to iterate through its ``clusters`` field. @@ -33,7 +42,7 @@ class ListClustersPager: through the ``clusters`` field on the corresponding responses. - All the usual :class:`~.clusters.ListClustersResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1.types.ListClustersRequest): The initial request object. - response (:class:`~.clusters.ListClustersResponse`): + response (google.cloud.dataproc_v1.types.ListClustersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListClustersAsyncPager: """A pager for iterating through ``list_clusters`` requests. This class thinly wraps an initial - :class:`~.clusters.ListClustersResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListClustersResponse` object, and provides an ``__aiter__`` method to iterate through its ``clusters`` field. @@ -95,7 +104,7 @@ class ListClustersAsyncPager: through the ``clusters`` field on the corresponding responses. - All the usual :class:`~.clusters.ListClustersResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListClustersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -113,9 +122,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1.types.ListClustersRequest): The initial request object. - response (:class:`~.clusters.ListClustersResponse`): + response (google.cloud.dataproc_v1.types.ListClustersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py index caccd04e..3583488a 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,20 +92,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -113,6 +113,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -124,6 +125,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -135,6 +137,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -150,6 +153,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -165,6 +169,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -176,6 +181,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py index ae1b8d78..f8c3e60a 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -90,6 +91,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -104,72 +109,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -177,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -202,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py index b3b50cf4..e27a348b 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -104,6 +104,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -135,12 +136,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -149,72 +154,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -222,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1/services/job_controller/async_client.py b/google/cloud/dataproc_v1/services/job_controller/async_client.py index 8eaf753e..cc5d6522 100644 --- a/google/cloud/dataproc_v1/services/job_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/job_controller/async_client.py @@ -75,7 +75,36 @@ class JobControllerAsyncClient: JobControllerClient.parse_common_location_path ) - from_service_account_file = JobControllerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -153,22 +182,24 @@ async def submit_job( r"""Submits a job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (:class:`google.cloud.dataproc_v1.types.Job`): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -181,7 +212,7 @@ async def submit_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -215,6 +246,7 @@ async def submit_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -240,22 +272,24 @@ async def submit_job_as_operation( r"""Submits job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.SubmitJobRequest`): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (:class:`google.cloud.dataproc_v1.types.Job`): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -268,11 +302,12 @@ async def submit_job_as_operation( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:``~.jobs.Job``: A Dataproc job resource. + :class:`google.cloud.dataproc_v1.types.Job` A Dataproc + job resource. """ # Create or coerce a protobuf request object. @@ -306,6 +341,7 @@ async def submit_job_as_operation( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -340,19 +376,21 @@ async def get_job( project. Args: - request (:class:`~.jobs.GetJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.GetJobRequest`): The request object. A request to get the resource representation for a job in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -369,7 +407,7 @@ async def get_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -407,6 +445,7 @@ async def get_job( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -432,18 +471,20 @@ async def list_jobs( r"""Lists regions/{region}/jobs in a project. Args: - request (:class:`~.jobs.ListJobsRequest`): + request (:class:`google.cloud.dataproc_v1.types.ListJobsRequest`): The request object. A request to list jobs in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -465,6 +506,7 @@ async def list_jobs( status.state = ACTIVE AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -476,7 +518,7 @@ async def list_jobs( sent along with the request as metadata. Returns: - ~.pagers.ListJobsAsyncPager: + google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsAsyncPager: A list of jobs in a project. Iterating over this object will yield results and resolve additional pages @@ -518,6 +560,7 @@ async def list_jobs( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -546,7 +589,7 @@ async def update_job( r"""Updates a job in a project. Args: - request (:class:`~.jobs.UpdateJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.UpdateJobRequest`): The request object. A request to update a job. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -556,7 +599,7 @@ async def update_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -572,6 +615,7 @@ async def update_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -601,18 +645,20 @@ async def cancel_job( `regions/{region}/jobs.get `__. Args: - request (:class:`~.jobs.CancelJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.CancelJobRequest`): The request object. A request to cancel a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -629,7 +675,7 @@ async def cancel_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -667,6 +713,7 @@ async def cancel_job( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -693,18 +740,20 @@ async def delete_job( delete fails, and the response returns ``FAILED_PRECONDITION``. Args: - request (:class:`~.jobs.DeleteJobRequest`): + request (:class:`google.cloud.dataproc_v1.types.DeleteJobRequest`): The request object. A request to delete a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -751,6 +800,7 @@ async def delete_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1/services/job_controller/client.py b/google/cloud/dataproc_v1/services/job_controller/client.py index d101e833..92d3a67e 100644 --- a/google/cloud/dataproc_v1/services/job_controller/client.py +++ b/google/cloud/dataproc_v1/services/job_controller/client.py @@ -110,6 +110,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -122,7 +138,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + JobControllerClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -214,10 +230,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.JobControllerTransport]): The + transport (Union[str, JobControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -253,21 +269,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -310,7 +322,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -329,22 +341,24 @@ def submit_job( r"""Submits a job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (google.cloud.dataproc_v1.types.SubmitJobRequest): The request object. A request to submit a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (google.cloud.dataproc_v1.types.Job): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -357,7 +371,7 @@ def submit_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -411,22 +425,24 @@ def submit_job_as_operation( r"""Submits job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (google.cloud.dataproc_v1.types.SubmitJobRequest): The request object. A request to submit a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (google.cloud.dataproc_v1.types.Job): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -439,11 +455,12 @@ def submit_job_as_operation( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:``~.jobs.Job``: A Dataproc job resource. + :class:`google.cloud.dataproc_v1.types.Job` A Dataproc + job resource. """ # Create or coerce a protobuf request object. @@ -506,23 +523,25 @@ def get_job( project. Args: - request (:class:`~.jobs.GetJobRequest`): + request (google.cloud.dataproc_v1.types.GetJobRequest): The request object. A request to get the resource representation for a job in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -535,7 +554,7 @@ def get_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -589,22 +608,24 @@ def list_jobs( r"""Lists regions/{region}/jobs in a project. Args: - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1.types.ListJobsRequest): The request object. A request to list jobs in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - filter (:class:`str`): + filter (str): Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: @@ -622,6 +643,7 @@ def list_jobs( status.state = ACTIVE AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -633,7 +655,7 @@ def list_jobs( sent along with the request as metadata. Returns: - ~.pagers.ListJobsPager: + google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsPager: A list of jobs in a project. Iterating over this object will yield results and resolve additional pages @@ -694,7 +716,7 @@ def update_job( r"""Updates a job in a project. Args: - request (:class:`~.jobs.UpdateJobRequest`): + request (google.cloud.dataproc_v1.types.UpdateJobRequest): The request object. A request to update a job. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -704,7 +726,7 @@ def update_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -744,22 +766,24 @@ def cancel_job( `regions/{region}/jobs.get `__. Args: - request (:class:`~.jobs.CancelJobRequest`): + request (google.cloud.dataproc_v1.types.CancelJobRequest): The request object. A request to cancel a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -772,7 +796,7 @@ def cancel_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -827,22 +851,24 @@ def delete_job( delete fails, and the response returns ``FAILED_PRECONDITION``. Args: - request (:class:`~.jobs.DeleteJobRequest`): + request (google.cloud.dataproc_v1.types.DeleteJobRequest): The request object. A request to delete a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/dataproc_v1/services/job_controller/pagers.py b/google/cloud/dataproc_v1/services/job_controller/pagers.py index 185f0ace..77ae8be8 100644 --- a/google/cloud/dataproc_v1/services/job_controller/pagers.py +++ b/google/cloud/dataproc_v1/services/job_controller/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1.types import jobs @@ -24,7 +33,7 @@ class ListJobsPager: """A pager for iterating through ``list_jobs`` requests. This class thinly wraps an initial - :class:`~.jobs.ListJobsResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and provides an ``__iter__`` method to iterate through its ``jobs`` field. @@ -33,7 +42,7 @@ class ListJobsPager: through the ``jobs`` field on the corresponding responses. - All the usual :class:`~.jobs.ListJobsResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1.types.ListJobsRequest): The initial request object. - response (:class:`~.jobs.ListJobsResponse`): + response (google.cloud.dataproc_v1.types.ListJobsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListJobsAsyncPager: """A pager for iterating through ``list_jobs`` requests. This class thinly wraps an initial - :class:`~.jobs.ListJobsResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListJobsResponse` object, and provides an ``__aiter__`` method to iterate through its ``jobs`` field. @@ -95,7 +104,7 @@ class ListJobsAsyncPager: through the ``jobs`` field on the corresponding responses. - All the usual :class:`~.jobs.ListJobsResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListJobsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -113,9 +122,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1.types.ListJobsRequest): The initial request object. - response (:class:`~.jobs.ListJobsResponse`): + response (google.cloud.dataproc_v1.types.ListJobsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/google/cloud/dataproc_v1/services/job_controller/transports/base.py index c8538dd1..15bf4766 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/base.py @@ -70,10 +70,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -81,6 +81,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -90,20 +93,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -114,6 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -125,6 +126,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -140,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -155,6 +158,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -166,6 +170,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -181,6 +186,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -192,6 +198,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py index 5802abf7..9842af0e 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -90,6 +91,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -104,72 +109,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -177,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -202,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py index 04011df0..e9b2d197 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -104,6 +104,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -135,12 +136,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -149,72 +154,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -222,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py index 44cb69b3..cbfafedc 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py @@ -90,7 +90,36 @@ class WorkflowTemplateServiceAsyncClient: WorkflowTemplateServiceClient.parse_common_location_path ) - from_service_account_file = WorkflowTemplateServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -168,7 +197,7 @@ async def create_workflow_template( r"""Creates new workflow template. Args: - request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest`): The request object. A request to create a workflow template. parent (:class:`str`): @@ -184,12 +213,14 @@ async def create_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): Required. The Dataproc workflow template to create. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -201,7 +232,7 @@ async def create_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -235,6 +266,7 @@ async def create_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -266,7 +298,7 @@ async def get_workflow_template( specifying optional version parameter. Args: - request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest`): The request object. A request to fetch a workflow template. name (:class:`str`): @@ -283,6 +315,7 @@ async def get_workflow_template( resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -294,7 +327,7 @@ async def get_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -330,6 +363,7 @@ async def get_workflow_template( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -382,7 +416,7 @@ async def instantiate_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest`): The request object. A request to instantiate a workflow template. name (:class:`str`): @@ -401,14 +435,16 @@ async def instantiate_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + parameters (:class:`Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. + This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -420,24 +456,22 @@ async def instantiate_workflow_template( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -470,6 +504,7 @@ async def instantiate_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -533,7 +568,7 @@ async def instantiate_inline_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest`): The request object. A request to instantiate an inline workflow template. parent (:class:`str`): @@ -551,12 +586,14 @@ async def instantiate_inline_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): Required. The workflow template to instantiate. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -568,24 +605,22 @@ async def instantiate_inline_workflow_template( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -617,6 +652,7 @@ async def instantiate_inline_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -656,14 +692,15 @@ async def update_workflow_template( server version. Args: - request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest`): The request object. A request to update a workflow template. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): Required. The updated workflow template. The ``template.version`` field must match the current version. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -675,7 +712,7 @@ async def update_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -707,6 +744,7 @@ async def update_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -739,7 +777,7 @@ async def list_workflow_templates( the request. Args: - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (:class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest`): The request object. A request to list workflow templates in a project. parent (:class:`str`): @@ -755,6 +793,7 @@ async def list_workflow_templates( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -766,7 +805,7 @@ async def list_workflow_templates( sent along with the request as metadata. Returns: - ~.pagers.ListWorkflowTemplatesAsyncPager: + google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: A response to a request to list workflow templates in a project. Iterating over this object will yield @@ -805,6 +844,7 @@ async def list_workflow_templates( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -841,7 +881,7 @@ async def delete_workflow_template( rogress workflows. Args: - request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest`): The request object. A request to delete a workflow template. Currently started workflows will remain running. @@ -860,6 +900,7 @@ async def delete_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -897,6 +938,7 @@ async def delete_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/google/cloud/dataproc_v1/services/workflow_template_service/client.py index 73a5626b..bb0be312 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/client.py @@ -118,6 +118,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -130,7 +146,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + WorkflowTemplateServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -240,10 +256,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport (Union[str, WorkflowTemplateServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -279,21 +295,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -336,7 +348,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -354,10 +366,10 @@ def create_workflow_template( r"""Creates new workflow template. Args: - request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest): The request object. A request to create a workflow template. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -370,12 +382,14 @@ def create_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The Dataproc workflow template to create. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -387,7 +401,7 @@ def create_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -447,10 +461,10 @@ def get_workflow_template( specifying optional version parameter. Args: - request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest): The request object. A request to fetch a workflow template. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -464,6 +478,7 @@ def get_workflow_template( resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -475,7 +490,7 @@ def get_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -554,10 +569,10 @@ def instantiate_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest): The request object. A request to instantiate a workflow template. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -573,14 +588,16 @@ def instantiate_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. + This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -592,24 +609,22 @@ def instantiate_workflow_template( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -636,9 +651,8 @@ def instantiate_workflow_template( if name is not None: request.name = name - - if parameters: - request.parameters.update(parameters) + if parameters is not None: + request.parameters = parameters # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -704,10 +718,10 @@ def instantiate_inline_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest): The request object. A request to instantiate an inline workflow template. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -722,12 +736,14 @@ def instantiate_inline_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The workflow template to instantiate. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -739,24 +755,22 @@ def instantiate_inline_workflow_template( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -828,14 +842,15 @@ def update_workflow_template( server version. Args: - request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest): The request object. A request to update a workflow template. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The updated workflow template. The ``template.version`` field must match the current version. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -847,7 +862,7 @@ def update_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -906,10 +921,10 @@ def list_workflow_templates( the request. Args: - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): The request object. A request to list workflow templates in a project. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -922,6 +937,7 @@ def list_workflow_templates( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -933,7 +949,7 @@ def list_workflow_templates( sent along with the request as metadata. Returns: - ~.pagers.ListWorkflowTemplatesPager: + google.cloud.dataproc_v1.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: A response to a request to list workflow templates in a project. Iterating over this object will yield @@ -999,11 +1015,11 @@ def delete_workflow_template( rogress workflows. Args: - request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest): The request object. A request to delete a workflow template. Currently started workflows will remain running. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -1018,6 +1034,7 @@ def delete_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py b/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py index 86a35f48..90fa03f2 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1.types import workflow_templates @@ -24,7 +33,7 @@ class ListWorkflowTemplatesPager: """A pager for iterating through ``list_workflow_templates`` requests. This class thinly wraps an initial - :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and provides an ``__iter__`` method to iterate through its ``templates`` field. @@ -33,7 +42,7 @@ class ListWorkflowTemplatesPager: through the ``templates`` field on the corresponding responses. - All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): The initial request object. - response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListWorkflowTemplatesAsyncPager: """A pager for iterating through ``list_workflow_templates`` requests. This class thinly wraps an initial - :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` object, and provides an ``__aiter__`` method to iterate through its ``templates`` field. @@ -95,7 +104,7 @@ class ListWorkflowTemplatesAsyncPager: through the ``templates`` field on the corresponding responses. - All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + All the usual :class:`google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -115,9 +124,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest): The initial request object. - response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + response (google.cloud.dataproc_v1.types.ListWorkflowTemplatesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py index 967002f5..bded001b 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py @@ -70,10 +70,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -81,6 +81,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -90,20 +93,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -114,6 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -129,6 +130,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -140,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -151,6 +154,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -162,6 +166,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -177,6 +182,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -188,6 +194,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py index 98d84293..e2bbf535 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -61,6 +61,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -91,6 +92,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -105,72 +110,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -178,18 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -203,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py index 1024ab1b..1f93da89 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -65,7 +65,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -105,6 +105,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -136,12 +137,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -150,72 +155,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -223,18 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py index 3c02d690..d79923a4 100644 --- a/google/cloud/dataproc_v1/types/__init__.py +++ b/google/cloud/dataproc_v1/types/__init__.py @@ -19,179 +19,179 @@ AutoscalingPolicy, BasicAutoscalingAlgorithm, BasicYarnAutoscalingConfig, - InstanceGroupAutoscalingPolicyConfig, CreateAutoscalingPolicyRequest, - GetAutoscalingPolicyRequest, - UpdateAutoscalingPolicyRequest, DeleteAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + InstanceGroupAutoscalingPolicyConfig, ListAutoscalingPoliciesRequest, ListAutoscalingPoliciesResponse, + UpdateAutoscalingPolicyRequest, ) from .clusters import ( + AcceleratorConfig, + AutoscalingConfig, Cluster, ClusterConfig, - EndpointConfig, - AutoscalingConfig, + ClusterMetrics, + ClusterStatus, + CreateClusterRequest, + DeleteClusterRequest, + DiagnoseClusterRequest, + DiagnoseClusterResults, + DiskConfig, EncryptionConfig, + EndpointConfig, GceClusterConfig, + GetClusterRequest, InstanceGroupConfig, - ManagedGroupConfig, - AcceleratorConfig, - DiskConfig, - NodeInitializationAction, - ClusterStatus, - SecurityConfig, KerberosConfig, - SoftwareConfig, LifecycleConfig, - ClusterMetrics, - CreateClusterRequest, - UpdateClusterRequest, - DeleteClusterRequest, - GetClusterRequest, ListClustersRequest, ListClustersResponse, - DiagnoseClusterRequest, - DiagnoseClusterResults, + ManagedGroupConfig, + NodeInitializationAction, ReservationAffinity, + SecurityConfig, + SoftwareConfig, + UpdateClusterRequest, ) from .jobs import ( - LoggingConfig, + CancelJobRequest, + DeleteJobRequest, + GetJobRequest, HadoopJob, - SparkJob, - PySparkJob, - QueryList, HiveJob, - SparkSqlJob, - PigJob, - SparkRJob, - PrestoJob, + Job, + JobMetadata, JobPlacement, - JobStatus, JobReference, - YarnApplication, - Job, JobScheduling, - SubmitJobRequest, - JobMetadata, - GetJobRequest, + JobStatus, ListJobsRequest, - UpdateJobRequest, ListJobsResponse, - CancelJobRequest, - DeleteJobRequest, + LoggingConfig, + PigJob, + PrestoJob, + PySparkJob, + QueryList, + SparkJob, + SparkRJob, + SparkSqlJob, + SubmitJobRequest, + UpdateJobRequest, + YarnApplication, ) from .operations import ( - ClusterOperationStatus, ClusterOperationMetadata, + ClusterOperationStatus, ) from .workflow_templates import ( - WorkflowTemplate, - WorkflowTemplatePlacement, - ManagedCluster, + ClusterOperation, ClusterSelector, + CreateWorkflowTemplateRequest, + DeleteWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + ManagedCluster, OrderedJob, - TemplateParameter, ParameterValidation, RegexValidation, + TemplateParameter, + UpdateWorkflowTemplateRequest, ValueValidation, - WorkflowMetadata, - ClusterOperation, WorkflowGraph, + WorkflowMetadata, WorkflowNode, - CreateWorkflowTemplateRequest, - GetWorkflowTemplateRequest, - InstantiateWorkflowTemplateRequest, - InstantiateInlineWorkflowTemplateRequest, - UpdateWorkflowTemplateRequest, - ListWorkflowTemplatesRequest, - ListWorkflowTemplatesResponse, - DeleteWorkflowTemplateRequest, + WorkflowTemplate, + WorkflowTemplatePlacement, ) __all__ = ( "AutoscalingPolicy", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", - "InstanceGroupAutoscalingPolicyConfig", "CreateAutoscalingPolicyRequest", - "GetAutoscalingPolicyRequest", - "UpdateAutoscalingPolicyRequest", "DeleteAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "InstanceGroupAutoscalingPolicyConfig", "ListAutoscalingPoliciesRequest", "ListAutoscalingPoliciesResponse", - "Component", + "UpdateAutoscalingPolicyRequest", + "AcceleratorConfig", + "AutoscalingConfig", "Cluster", "ClusterConfig", - "EndpointConfig", - "AutoscalingConfig", + "ClusterMetrics", + "ClusterStatus", + "CreateClusterRequest", + "DeleteClusterRequest", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "DiskConfig", "EncryptionConfig", + "EndpointConfig", "GceClusterConfig", + "GetClusterRequest", "InstanceGroupConfig", - "ManagedGroupConfig", - "AcceleratorConfig", - "DiskConfig", - "NodeInitializationAction", - "ClusterStatus", - "SecurityConfig", "KerberosConfig", - "SoftwareConfig", "LifecycleConfig", - "ClusterMetrics", - "CreateClusterRequest", - "UpdateClusterRequest", - "DeleteClusterRequest", - "GetClusterRequest", "ListClustersRequest", "ListClustersResponse", - "DiagnoseClusterRequest", - "DiagnoseClusterResults", + "ManagedGroupConfig", + "NodeInitializationAction", "ReservationAffinity", - "LoggingConfig", + "SecurityConfig", + "SoftwareConfig", + "UpdateClusterRequest", + "CancelJobRequest", + "DeleteJobRequest", + "GetJobRequest", "HadoopJob", - "SparkJob", - "PySparkJob", - "QueryList", "HiveJob", - "SparkSqlJob", - "PigJob", - "SparkRJob", - "PrestoJob", + "Job", + "JobMetadata", "JobPlacement", - "JobStatus", "JobReference", - "YarnApplication", - "Job", "JobScheduling", - "SubmitJobRequest", - "JobMetadata", - "GetJobRequest", + "JobStatus", "ListJobsRequest", - "UpdateJobRequest", "ListJobsResponse", - "CancelJobRequest", - "DeleteJobRequest", - "ClusterOperationStatus", + "LoggingConfig", + "PigJob", + "PrestoJob", + "PySparkJob", + "QueryList", + "SparkJob", + "SparkRJob", + "SparkSqlJob", + "SubmitJobRequest", + "UpdateJobRequest", + "YarnApplication", "ClusterOperationMetadata", - "WorkflowTemplate", - "WorkflowTemplatePlacement", - "ManagedCluster", + "ClusterOperationStatus", + "Component", + "ClusterOperation", "ClusterSelector", + "CreateWorkflowTemplateRequest", + "DeleteWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "ManagedCluster", "OrderedJob", - "TemplateParameter", "ParameterValidation", "RegexValidation", + "TemplateParameter", + "UpdateWorkflowTemplateRequest", "ValueValidation", - "WorkflowMetadata", - "ClusterOperation", "WorkflowGraph", + "WorkflowMetadata", "WorkflowNode", - "CreateWorkflowTemplateRequest", - "GetWorkflowTemplateRequest", - "InstantiateWorkflowTemplateRequest", - "InstantiateInlineWorkflowTemplateRequest", - "UpdateWorkflowTemplateRequest", - "ListWorkflowTemplatesRequest", - "ListWorkflowTemplatesResponse", - "DeleteWorkflowTemplateRequest", + "WorkflowTemplate", + "WorkflowTemplatePlacement", ) diff --git a/google/cloud/dataproc_v1/types/autoscaling_policies.py b/google/cloud/dataproc_v1/types/autoscaling_policies.py index edd3806a..84259027 100644 --- a/google/cloud/dataproc_v1/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1/types/autoscaling_policies.py @@ -62,12 +62,12 @@ class AutoscalingPolicy(proto.Message): - For ``projects.locations.autoscalingPolicies``, the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - basic_algorithm (~.autoscaling_policies.BasicAutoscalingAlgorithm): + basic_algorithm (google.cloud.dataproc_v1.types.BasicAutoscalingAlgorithm): - worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): Required. Describes how the autoscaler will operate for primary workers. - secondary_worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): Optional. Describes how the autoscaler will operate for secondary workers. """ @@ -93,9 +93,9 @@ class BasicAutoscalingAlgorithm(proto.Message): r"""Basic algorithm for autoscaling. Attributes: - yarn_config (~.autoscaling_policies.BasicYarnAutoscalingConfig): + yarn_config (google.cloud.dataproc_v1.types.BasicYarnAutoscalingConfig): Required. YARN autoscaling configuration. - cooldown_period (~.duration.Duration): + cooldown_period (google.protobuf.duration_pb2.Duration): Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. @@ -114,7 +114,7 @@ class BasicYarnAutoscalingConfig(proto.Message): r"""Basic autoscaling configurations for YARN. Attributes: - graceful_decommission_timeout (~.duration.Duration): + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially @@ -244,13 +244,13 @@ class CreateAutoscalingPolicyRequest(proto.Message): - For ``projects.locations.autoscalingPolicies.create``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - policy (~.autoscaling_policies.AutoscalingPolicy): + policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The autoscaling policy to create. """ parent = proto.Field(proto.STRING, number=1) - policy = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) + policy_ = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) class GetAutoscalingPolicyRequest(proto.Message): @@ -278,11 +278,11 @@ class UpdateAutoscalingPolicyRequest(proto.Message): r"""A request to update an autoscaling policy. Attributes: - policy (~.autoscaling_policies.AutoscalingPolicy): + policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The updated autoscaling policy. """ - policy = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) + policy_ = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) class DeleteAutoscalingPolicyRequest(proto.Message): @@ -346,7 +346,7 @@ class ListAutoscalingPoliciesResponse(proto.Message): project. Attributes: - policies (Sequence[~.autoscaling_policies.AutoscalingPolicy]): + policies (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]): Output only. Autoscaling policies list. next_page_token (str): Output only. This token is included in the diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py index 9cb83872..008b4866 100644 --- a/google/cloud/dataproc_v1/types/clusters.py +++ b/google/cloud/dataproc_v1/types/clusters.py @@ -69,11 +69,11 @@ class Cluster(proto.Message): Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused. - config (~.gcd_clusters.ClusterConfig): + config (google.cloud.dataproc_v1.types.ClusterConfig): Required. The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated. - labels (Sequence[~.gcd_clusters.Cluster.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.Cluster.LabelsEntry]): Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -81,15 +81,15 @@ class Cluster(proto.Message): 1 to 63 characters, and must conform to `RFC 1035 `__. No more than 32 labels can be associated with a cluster. - status (~.gcd_clusters.ClusterStatus): + status (google.cloud.dataproc_v1.types.ClusterStatus): Output only. Cluster status. - status_history (Sequence[~.gcd_clusters.ClusterStatus]): + status_history (Sequence[google.cloud.dataproc_v1.types.ClusterStatus]): Output only. The previous cluster status. cluster_uuid (str): Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. - metrics (~.gcd_clusters.ClusterMetrics): + metrics (google.cloud.dataproc_v1.types.ClusterMetrics): Output only. Contains cluster daemon metrics such as HDFS and YARN stats. @@ -142,22 +142,22 @@ class ClusterConfig(proto.Message): this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. - gce_cluster_config (~.gcd_clusters.GceClusterConfig): + gce_cluster_config (google.cloud.dataproc_v1.types.GceClusterConfig): Optional. The shared Compute Engine config settings for all instances in a cluster. - master_config (~.gcd_clusters.InstanceGroupConfig): + master_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): Optional. The Compute Engine config settings for the master instance in a cluster. - worker_config (~.gcd_clusters.InstanceGroupConfig): + worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): Optional. The Compute Engine config settings for worker instances in a cluster. - secondary_worker_config (~.gcd_clusters.InstanceGroupConfig): + secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): Optional. The Compute Engine config settings for additional worker instances in a cluster. - software_config (~.gcd_clusters.SoftwareConfig): + software_config (google.cloud.dataproc_v1.types.SoftwareConfig): Optional. The config settings for software inside the cluster. - initialization_actions (Sequence[~.gcd_clusters.NodeInitializationAction]): + initialization_actions (Sequence[google.cloud.dataproc_v1.types.NodeInitializationAction]): Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's ``role`` metadata to run @@ -173,18 +173,18 @@ class ClusterConfig(proto.Message): else ... worker specific actions ... fi - encryption_config (~.gcd_clusters.EncryptionConfig): + encryption_config (google.cloud.dataproc_v1.types.EncryptionConfig): Optional. Encryption settings for the cluster. - autoscaling_config (~.gcd_clusters.AutoscalingConfig): + autoscaling_config (google.cloud.dataproc_v1.types.AutoscalingConfig): Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. - security_config (~.gcd_clusters.SecurityConfig): + security_config (google.cloud.dataproc_v1.types.SecurityConfig): Optional. Security settings for the cluster. - lifecycle_config (~.gcd_clusters.LifecycleConfig): + lifecycle_config (google.cloud.dataproc_v1.types.LifecycleConfig): Optional. Lifecycle setting for the cluster. - endpoint_config (~.gcd_clusters.EndpointConfig): + endpoint_config (google.cloud.dataproc_v1.types.EndpointConfig): Optional. Port/endpoint configuration for this cluster """ @@ -232,7 +232,7 @@ class EndpointConfig(proto.Message): r"""Endpoint config for this cluster Attributes: - http_ports (Sequence[~.gcd_clusters.EndpointConfig.HttpPortsEntry]): + http_ports (Sequence[google.cloud.dataproc_v1.types.EndpointConfig.HttpPortsEntry]): Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. enable_http_port_access (bool): @@ -360,11 +360,11 @@ class GceClusterConfig(proto.Message): The Compute Engine tags to add to all instances (see `Tagging instances `__). - metadata (Sequence[~.gcd_clusters.GceClusterConfig.MetadataEntry]): + metadata (Sequence[google.cloud.dataproc_v1.types.GceClusterConfig.MetadataEntry]): The Compute Engine metadata entries to add to all instances (see `Project and instance metadata `__). - reservation_affinity (~.gcd_clusters.ReservationAffinity): + reservation_affinity (google.cloud.dataproc_v1.types.ReservationAffinity): Optional. Reservation Affinity for consuming Zonal reservation. """ @@ -438,12 +438,12 @@ class InstanceGroupConfig(proto.Message): Placement `__ feature, you must use the short name of the machine type resource, for example, ``n1-standard-2``. - disk_config (~.gcd_clusters.DiskConfig): + disk_config (google.cloud.dataproc_v1.types.DiskConfig): Optional. Disk option config settings. is_preemptible (bool): Output only. Specifies that this instance group contains preemptible instances. - preemptibility (~.gcd_clusters.InstanceGroupConfig.Preemptibility): + preemptibility (google.cloud.dataproc_v1.types.InstanceGroupConfig.Preemptibility): Optional. Specifies the preemptibility of the instance group. @@ -452,12 +452,12 @@ class InstanceGroupConfig(proto.Message): The default value for secondary instances is ``PREEMPTIBLE``. - managed_group_config (~.gcd_clusters.ManagedGroupConfig): + managed_group_config (google.cloud.dataproc_v1.types.ManagedGroupConfig): Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. - accelerators (Sequence[~.gcd_clusters.AcceleratorConfig]): + accelerators (Sequence[google.cloud.dataproc_v1.types.AcceleratorConfig]): Optional. The Compute Engine accelerator configuration for these instances. min_cpu_platform (str): @@ -588,7 +588,7 @@ class NodeInitializationAction(proto.Message): executable_file (str): Required. Cloud Storage URI of executable file. - execution_timeout (~.duration.Duration): + execution_timeout (google.protobuf.duration_pb2.Duration): Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of `Duration `__). @@ -608,16 +608,16 @@ class ClusterStatus(proto.Message): r"""The status of a cluster and its instances. Attributes: - state (~.gcd_clusters.ClusterStatus.State): + state (google.cloud.dataproc_v1.types.ClusterStatus.State): Output only. The cluster's state. detail (str): Optional. Output only. Details of cluster's state. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when this state was entered (see JSON representation of `Timestamp `__). - substate (~.gcd_clusters.ClusterStatus.Substate): + substate (google.cloud.dataproc_v1.types.ClusterStatus.Substate): Output only. Additional state information that includes status reported by the agent. """ @@ -652,7 +652,7 @@ class SecurityConfig(proto.Message): r"""Security related configuration, including Kerberos. Attributes: - kerberos_config (~.gcd_clusters.KerberosConfig): + kerberos_config (google.cloud.dataproc_v1.types.KerberosConfig): Kerberos related configuration. """ @@ -778,7 +778,7 @@ class SoftwareConfig(proto.Message): "1.2.29"), or the `"preview" version `__. If unspecified, it defaults to the latest Debian version. - properties (Sequence[~.gcd_clusters.SoftwareConfig.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.SoftwareConfig.PropertiesEntry]): Optional. The properties to set on daemon config files. Property keys are specified in ``prefix:property`` format, @@ -797,7 +797,7 @@ class SoftwareConfig(proto.Message): For more information, see `Cluster properties `__. - optional_components (Sequence[~.shared.Component]): + optional_components (Sequence[google.cloud.dataproc_v1.types.Component]): Optional. The set of components to activate on the cluster. """ @@ -815,24 +815,24 @@ class LifecycleConfig(proto.Message): r"""Specifies the cluster auto-delete schedule configuration. Attributes: - idle_delete_ttl (~.duration.Duration): + idle_delete_ttl (google.protobuf.duration_pb2.Duration): Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of `Duration `__. - auto_delete_time (~.timestamp.Timestamp): + auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): Optional. The time when cluster will be auto-deleted (see JSON representation of `Timestamp `__). - auto_delete_ttl (~.duration.Duration): + auto_delete_ttl (google.protobuf.duration_pb2.Duration): Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of `Duration `__). - idle_start_time (~.timestamp.Timestamp): + idle_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of @@ -859,9 +859,9 @@ class ClusterMetrics(proto.Message): only. It may be changed before final release. Attributes: - hdfs_metrics (Sequence[~.gcd_clusters.ClusterMetrics.HdfsMetricsEntry]): + hdfs_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.HdfsMetricsEntry]): The HDFS metrics. - yarn_metrics (Sequence[~.gcd_clusters.ClusterMetrics.YarnMetricsEntry]): + yarn_metrics (Sequence[google.cloud.dataproc_v1.types.ClusterMetrics.YarnMetricsEntry]): The YARN metrics. """ @@ -880,7 +880,7 @@ class CreateClusterRequest(proto.Message): region (str): Required. The Dataproc region in which to handle the request. - cluster (~.gcd_clusters.Cluster): + cluster (google.cloud.dataproc_v1.types.Cluster): Required. The cluster to create. request_id (str): Optional. A unique id used to identify the request. If the @@ -920,9 +920,9 @@ class UpdateClusterRequest(proto.Message): handle the request. cluster_name (str): Required. The cluster name. - cluster (~.gcd_clusters.Cluster): + cluster (google.cloud.dataproc_v1.types.Cluster): Required. The changes to the cluster. - graceful_decommission_timeout (~.duration.Duration): + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how @@ -934,7 +934,7 @@ class UpdateClusterRequest(proto.Message): `Duration `__). Only supported on Dataproc image versions 1.2 and higher. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter @@ -1148,7 +1148,7 @@ class ListClustersResponse(proto.Message): r"""The list of all clusters in a project. Attributes: - clusters (Sequence[~.gcd_clusters.Cluster]): + clusters (Sequence[google.cloud.dataproc_v1.types.Cluster]): Output only. The clusters in the project. next_page_token (str): Output only. This token is included in the response if there @@ -1205,7 +1205,7 @@ class ReservationAffinity(proto.Message): r"""Reservation Affinity for consuming Zonal reservation. Attributes: - consume_reservation_type (~.gcd_clusters.ReservationAffinity.Type): + consume_reservation_type (google.cloud.dataproc_v1.types.ReservationAffinity.Type): Optional. Type of reservation to consume key (str): Optional. Corresponds to the label key of diff --git a/google/cloud/dataproc_v1/types/jobs.py b/google/cloud/dataproc_v1/types/jobs.py index 84c0e3f6..cfb19555 100644 --- a/google/cloud/dataproc_v1/types/jobs.py +++ b/google/cloud/dataproc_v1/types/jobs.py @@ -57,7 +57,7 @@ class LoggingConfig(proto.Message): r"""The runtime logging config of the job. Attributes: - driver_log_levels (Sequence[~.gcd_jobs.LoggingConfig.DriverLogLevelsEntry]): + driver_log_levels (Sequence[google.cloud.dataproc_v1.types.LoggingConfig.DriverLogLevelsEntry]): The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: @@ -120,13 +120,13 @@ class HadoopJob(proto.Message): extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. - properties (Sequence[~.gcd_jobs.HadoopJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.HadoopJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -177,14 +177,14 @@ class SparkJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.SparkJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.SparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -236,7 +236,7 @@ class PySparkJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.PySparkJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.PySparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc @@ -244,7 +244,7 @@ class PySparkJob(proto.Message): set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -301,16 +301,16 @@ class HiveJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains Hive queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Sequence[~.gcd_jobs.HiveJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1.types.HiveJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Hive command: ``SET name="value";``). - properties (Sequence[~.gcd_jobs.HiveJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.HiveJob.PropertiesEntry]): Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties @@ -346,13 +346,13 @@ class SparkSqlJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains SQL queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1.types.QueryList): A list of queries. - script_variables (Sequence[~.gcd_jobs.SparkSqlJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET ``name="value";``). - properties (Sequence[~.gcd_jobs.SparkSqlJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.SparkSqlJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the @@ -360,7 +360,7 @@ class SparkSqlJob(proto.Message): jar_file_uris (Sequence[str]): Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -388,16 +388,16 @@ class PigJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains the Pig queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Sequence[~.gcd_jobs.PigJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1.types.PigJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Pig command: ``name=[value]``). - properties (Sequence[~.gcd_jobs.PigJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.PigJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties @@ -407,7 +407,7 @@ class PigJob(proto.Message): Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -452,7 +452,7 @@ class SparkRJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.SparkRJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.SparkRJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc @@ -460,7 +460,7 @@ class SparkRJob(proto.Message): set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -489,7 +489,7 @@ class PrestoJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains SQL queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query @@ -502,12 +502,12 @@ class PrestoJob(proto.Message): client_tags (Sequence[str]): Optional. Presto client tags to attach to this query - properties (Sequence[~.gcd_jobs.PrestoJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1.types.PrestoJob.PropertiesEntry]): Optional. A mapping of property names to values. Used to set Presto `session properties `__ Equivalent to using the --session flag in the Presto CLI - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -550,17 +550,17 @@ class JobStatus(proto.Message): r"""Dataproc job status. Attributes: - state (~.gcd_jobs.JobStatus.State): + state (google.cloud.dataproc_v1.types.JobStatus.State): Output only. A state message specifying the overall job state. details (str): Optional. Output only. Job state details, such as an error description if the state is ERROR. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time when this state was entered. - substate (~.gcd_jobs.JobStatus.Substate): + substate (google.cloud.dataproc_v1.types.JobStatus.Substate): Output only. Additional state information, which includes status reported by the agent. """ @@ -632,7 +632,7 @@ class YarnApplication(proto.Message): Attributes: name (str): Required. The application name. - state (~.gcd_jobs.YarnApplication.State): + state (google.cloud.dataproc_v1.types.YarnApplication.State): Required. The application state. progress (float): Required. The numerical progress of the @@ -673,37 +673,37 @@ class Job(proto.Message): r"""A Dataproc job resource. Attributes: - reference (~.gcd_jobs.JobReference): + reference (google.cloud.dataproc_v1.types.JobReference): Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id. - placement (~.gcd_jobs.JobPlacement): + placement (google.cloud.dataproc_v1.types.JobPlacement): Required. Job information, including how, when, and where to run the job. - hadoop_job (~.gcd_jobs.HadoopJob): + hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): Optional. Job is a Hadoop job. - spark_job (~.gcd_jobs.SparkJob): + spark_job (google.cloud.dataproc_v1.types.SparkJob): Optional. Job is a Spark job. - pyspark_job (~.gcd_jobs.PySparkJob): + pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): Optional. Job is a PySpark job. - hive_job (~.gcd_jobs.HiveJob): + hive_job (google.cloud.dataproc_v1.types.HiveJob): Optional. Job is a Hive job. - pig_job (~.gcd_jobs.PigJob): + pig_job (google.cloud.dataproc_v1.types.PigJob): Optional. Job is a Pig job. - spark_r_job (~.gcd_jobs.SparkRJob): + spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): Optional. Job is a SparkR job. - spark_sql_job (~.gcd_jobs.SparkSqlJob): + spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): Optional. Job is a SparkSql job. - presto_job (~.gcd_jobs.PrestoJob): + presto_job (google.cloud.dataproc_v1.types.PrestoJob): Optional. Job is a Presto job. - status (~.gcd_jobs.JobStatus): + status (google.cloud.dataproc_v1.types.JobStatus): Output only. The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields. - status_history (Sequence[~.gcd_jobs.JobStatus]): + status_history (Sequence[google.cloud.dataproc_v1.types.JobStatus]): Output only. The previous job status. - yarn_applications (Sequence[~.gcd_jobs.YarnApplication]): + yarn_applications (Sequence[google.cloud.dataproc_v1.types.YarnApplication]): Output only. The collection of YARN applications spun up by this job. @@ -717,7 +717,7 @@ class Job(proto.Message): control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as ``driver_output_uri``. - labels (Sequence[~.gcd_jobs.Job.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.Job.LabelsEntry]): Optional. The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -725,7 +725,7 @@ class Job(proto.Message): 1 to 63 characters, and must conform to `RFC 1035 `__. No more than 32 labels can be associated with a job. - scheduling (~.gcd_jobs.JobScheduling): + scheduling (google.cloud.dataproc_v1.types.JobScheduling): Optional. Job scheduling configuration. job_uuid (str): Output only. A UUID that uniquely identifies a job within @@ -823,7 +823,7 @@ class SubmitJobRequest(proto.Message): region (str): Required. The Dataproc region in which to handle the request. - job (~.gcd_jobs.Job): + job (google.cloud.dataproc_v1.types.Job): Required. The job resource. request_id (str): Optional. A unique id used to identify the request. If the @@ -856,11 +856,11 @@ class JobMetadata(proto.Message): Attributes: job_id (str): Output only. The job id. - status (~.gcd_jobs.JobStatus): + status (google.cloud.dataproc_v1.types.JobStatus): Output only. Most recent job status. operation_type (str): Output only. Operation type. - start_time (~.timestamp.Timestamp): + start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Job submission time. """ @@ -916,7 +916,7 @@ class ListJobsRequest(proto.Message): Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster. - job_state_matcher (~.gcd_jobs.ListJobsRequest.JobStateMatcher): + job_state_matcher (google.cloud.dataproc_v1.types.ListJobsRequest.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs). @@ -974,9 +974,9 @@ class UpdateJobRequest(proto.Message): handle the request. job_id (str): Required. The job ID. - job (~.gcd_jobs.Job): + job (google.cloud.dataproc_v1.types.Job): Required. The changes to the job. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the @@ -999,7 +999,7 @@ class ListJobsResponse(proto.Message): r"""A list of jobs in a project. Attributes: - jobs (Sequence[~.gcd_jobs.Job]): + jobs (Sequence[google.cloud.dataproc_v1.types.Job]): Output only. Jobs list. next_page_token (str): Optional. This token is included in the response if there diff --git a/google/cloud/dataproc_v1/types/operations.py b/google/cloud/dataproc_v1/types/operations.py index 042e8c77..4584b2ab 100644 --- a/google/cloud/dataproc_v1/types/operations.py +++ b/google/cloud/dataproc_v1/types/operations.py @@ -31,7 +31,7 @@ class ClusterOperationStatus(proto.Message): r"""The status of the operation. Attributes: - state (~.operations.ClusterOperationStatus.State): + state (google.cloud.dataproc_v1.types.ClusterOperationStatus.State): Output only. A message containing the operation state. inner_state (str): @@ -40,7 +40,7 @@ class ClusterOperationStatus(proto.Message): details (str): Output only. A message containing any operation metadata details. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time this state was entered. """ @@ -71,15 +71,15 @@ class ClusterOperationMetadata(proto.Message): operation. cluster_uuid (str): Output only. Cluster UUID for the operation. - status (~.operations.ClusterOperationStatus): + status (google.cloud.dataproc_v1.types.ClusterOperationStatus): Output only. Current operation status. - status_history (Sequence[~.operations.ClusterOperationStatus]): + status_history (Sequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): Output only. The previous operation status. operation_type (str): Output only. The operation type. description (str): Output only. Short description of operation. - labels (Sequence[~.operations.ClusterOperationMetadata.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.ClusterOperationMetadata.LabelsEntry]): Output only. Labels associated with the operation warnings (Sequence[str]): diff --git a/google/cloud/dataproc_v1/types/workflow_templates.py b/google/cloud/dataproc_v1/types/workflow_templates.py index 50e8a469..027fbc74 100644 --- a/google/cloud/dataproc_v1/types/workflow_templates.py +++ b/google/cloud/dataproc_v1/types/workflow_templates.py @@ -81,12 +81,12 @@ class WorkflowTemplate(proto.Message): ``version`` field filled in with the current server version. The user updates other fields in the template, then returns it as part of the ``UpdateWorkflowTemplate`` request. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time template was created. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time template was last updated. - labels (Sequence[~.workflow_templates.WorkflowTemplate.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate.LabelsEntry]): Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. @@ -100,13 +100,13 @@ class WorkflowTemplate(proto.Message): 1035 `__. No more than 32 labels can be associated with a template. - placement (~.workflow_templates.WorkflowTemplatePlacement): + placement (google.cloud.dataproc_v1.types.WorkflowTemplatePlacement): Required. WorkflowTemplate scheduling information. - jobs (Sequence[~.workflow_templates.OrderedJob]): + jobs (Sequence[google.cloud.dataproc_v1.types.OrderedJob]): Required. The Directed Acyclic Graph of Jobs to submit. - parameters (Sequence[~.workflow_templates.TemplateParameter]): + parameters (Sequence[google.cloud.dataproc_v1.types.TemplateParameter]): Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is @@ -142,9 +142,9 @@ class WorkflowTemplatePlacement(proto.Message): Either ``managed_cluster`` or ``cluster_selector`` is required. Attributes: - managed_cluster (~.workflow_templates.ManagedCluster): + managed_cluster (google.cloud.dataproc_v1.types.ManagedCluster): A cluster that is managed by the workflow. - cluster_selector (~.workflow_templates.ClusterSelector): + cluster_selector (google.cloud.dataproc_v1.types.ClusterSelector): Optional. A selector that chooses target cluster for jobs based on metadata. @@ -174,9 +174,9 @@ class ManagedCluster(proto.Message): begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. - config (~.clusters.ClusterConfig): + config (google.cloud.dataproc_v1.types.ClusterConfig): Required. The cluster configuration. - labels (Sequence[~.workflow_templates.ManagedCluster.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.ManagedCluster.LabelsEntry]): Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and @@ -209,7 +209,7 @@ class ClusterSelector(proto.Message): selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. - cluster_labels (Sequence[~.workflow_templates.ClusterSelector.ClusterLabelsEntry]): + cluster_labels (Sequence[google.cloud.dataproc_v1.types.ClusterSelector.ClusterLabelsEntry]): Required. The cluster labels. Cluster must have all labels to match. """ @@ -236,23 +236,23 @@ class OrderedJob(proto.Message): underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop_job (~.gcd_jobs.HadoopJob): + hadoop_job (google.cloud.dataproc_v1.types.HadoopJob): Optional. Job is a Hadoop job. - spark_job (~.gcd_jobs.SparkJob): + spark_job (google.cloud.dataproc_v1.types.SparkJob): Optional. Job is a Spark job. - pyspark_job (~.gcd_jobs.PySparkJob): + pyspark_job (google.cloud.dataproc_v1.types.PySparkJob): Optional. Job is a PySpark job. - hive_job (~.gcd_jobs.HiveJob): + hive_job (google.cloud.dataproc_v1.types.HiveJob): Optional. Job is a Hive job. - pig_job (~.gcd_jobs.PigJob): + pig_job (google.cloud.dataproc_v1.types.PigJob): Optional. Job is a Pig job. - spark_r_job (~.gcd_jobs.SparkRJob): + spark_r_job (google.cloud.dataproc_v1.types.SparkRJob): Optional. Job is a SparkR job. - spark_sql_job (~.gcd_jobs.SparkSqlJob): + spark_sql_job (google.cloud.dataproc_v1.types.SparkSqlJob): Optional. Job is a SparkSql job. - presto_job (~.gcd_jobs.PrestoJob): + presto_job (google.cloud.dataproc_v1.types.PrestoJob): Optional. Job is a Presto job. - labels (Sequence[~.workflow_templates.OrderedJob.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1.types.OrderedJob.LabelsEntry]): Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and @@ -264,7 +264,7 @@ class OrderedJob(proto.Message): [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 32 labels can be associated with a given job. - scheduling (~.gcd_jobs.JobScheduling): + scheduling (google.cloud.dataproc_v1.types.JobScheduling): Optional. Job scheduling configuration. prerequisite_step_ids (Sequence[str]): Optional. The optional list of prerequisite job step_ids. If @@ -387,7 +387,7 @@ class TemplateParameter(proto.Message): description (str): Optional. Brief description of the parameter. Must not exceed 1024 characters. - validation (~.workflow_templates.ParameterValidation): + validation (google.cloud.dataproc_v1.types.ParameterValidation): Optional. Validation rules to be applied to this parameter's value. """ @@ -405,9 +405,9 @@ class ParameterValidation(proto.Message): r"""Configuration for parameter validation. Attributes: - regex (~.workflow_templates.RegexValidation): + regex (google.cloud.dataproc_v1.types.RegexValidation): Validation based on regular expressions. - values (~.workflow_templates.ValueValidation): + values (google.cloud.dataproc_v1.types.ValueValidation): Validation based on a list of allowed values. """ @@ -465,24 +465,24 @@ class WorkflowMetadata(proto.Message): version (int): Output only. The version of template at the time of workflow instantiation. - create_cluster (~.workflow_templates.ClusterOperation): + create_cluster (google.cloud.dataproc_v1.types.ClusterOperation): Output only. The create cluster operation metadata. - graph (~.workflow_templates.WorkflowGraph): + graph (google.cloud.dataproc_v1.types.WorkflowGraph): Output only. The workflow graph. - delete_cluster (~.workflow_templates.ClusterOperation): + delete_cluster (google.cloud.dataproc_v1.types.ClusterOperation): Output only. The delete cluster operation metadata. - state (~.workflow_templates.WorkflowMetadata.State): + state (google.cloud.dataproc_v1.types.WorkflowMetadata.State): Output only. The workflow state. cluster_name (str): Output only. The name of the target cluster. - parameters (Sequence[~.workflow_templates.WorkflowMetadata.ParametersEntry]): + parameters (Sequence[google.cloud.dataproc_v1.types.WorkflowMetadata.ParametersEntry]): Map from parameter names to values that were used for those parameters. - start_time (~.timestamp.Timestamp): + start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Workflow start time. - end_time (~.timestamp.Timestamp): + end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Workflow end time. cluster_uuid (str): Output only. The UUID of target cluster. @@ -541,7 +541,7 @@ class WorkflowGraph(proto.Message): r"""The workflow graph. Attributes: - nodes (Sequence[~.workflow_templates.WorkflowNode]): + nodes (Sequence[google.cloud.dataproc_v1.types.WorkflowNode]): Output only. The workflow nodes. """ @@ -559,7 +559,7 @@ class WorkflowNode(proto.Message): job_id (str): Output only. The job id; populated after the node enters RUNNING state. - state (~.workflow_templates.WorkflowNode.NodeState): + state (google.cloud.dataproc_v1.types.WorkflowNode.NodeState): Output only. The node state. error (str): Output only. The error detail. @@ -601,7 +601,7 @@ class CreateWorkflowTemplateRequest(proto.Message): - For ``projects.locations.workflowTemplates.create``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The Dataproc workflow template to create. """ @@ -675,7 +675,7 @@ class InstantiateWorkflowTemplateRequest(proto.Message): The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. - parameters (Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]): + parameters (Sequence[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. @@ -708,7 +708,7 @@ class InstantiateInlineWorkflowTemplateRequest(proto.Message): ``projects.locations.workflowTemplates.instantiateinline``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The workflow template to instantiate. request_id (str): @@ -735,7 +735,7 @@ class UpdateWorkflowTemplateRequest(proto.Message): r"""A request to update a workflow template. Attributes: - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1.types.WorkflowTemplate): Required. The updated workflow template. The ``template.version`` field must match the current @@ -782,7 +782,7 @@ class ListWorkflowTemplatesResponse(proto.Message): project. Attributes: - templates (Sequence[~.workflow_templates.WorkflowTemplate]): + templates (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate]): Output only. WorkflowTemplates list. next_page_token (str): Output only. This token is included in the response if there diff --git a/google/cloud/dataproc_v1beta2/__init__.py b/google/cloud/dataproc_v1beta2/__init__.py index f5dee477..1a0d3c1a 100644 --- a/google/cloud/dataproc_v1beta2/__init__.py +++ b/google/cloud/dataproc_v1beta2/__init__.py @@ -116,6 +116,7 @@ "CancelJobRequest", "Cluster", "ClusterConfig", + "ClusterControllerClient", "ClusterMetrics", "ClusterOperation", "ClusterOperationMetadata", @@ -193,7 +194,6 @@ "WorkflowNode", "WorkflowTemplate", "WorkflowTemplatePlacement", - "WorkflowTemplateServiceClient", "YarnApplication", - "ClusterControllerClient", + "WorkflowTemplateServiceClient", ) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py index d3f3c9c9..cfc8452e 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py @@ -86,7 +86,36 @@ class AutoscalingPolicyServiceAsyncClient: AutoscalingPolicyServiceClient.parse_common_location_path ) - from_service_account_file = AutoscalingPolicyServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_info.__func__(AutoscalingPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceAsyncClient: The constructed client. + """ + return AutoscalingPolicyServiceClient.from_service_account_file.__func__(AutoscalingPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -156,7 +185,7 @@ async def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -164,7 +193,7 @@ async def create_autoscaling_policy( r"""Creates new autoscaling policy. Args: - request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest`): The request object. A request to create an autoscaling policy. parent (:class:`str`): @@ -180,13 +209,15 @@ async def create_autoscaling_policy( ``projects.locations.autoscalingPolicies.create``, the resource name has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): Required. The autoscaling policy to create. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -197,7 +228,7 @@ async def create_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -205,7 +236,7 @@ async def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) + has_flattened_params = any([parent, policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -219,8 +250,8 @@ async def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -246,7 +277,7 @@ async def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -257,13 +288,14 @@ async def update_autoscaling_policy( replacements. Args: - request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest`): The request object. A request to update an autoscaling policy. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): Required. The updated autoscaling policy. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -274,7 +306,7 @@ async def update_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -282,7 +314,7 @@ async def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) + has_flattened_params = any([policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -294,8 +326,8 @@ async def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -308,6 +340,7 @@ async def update_autoscaling_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -339,7 +372,7 @@ async def get_autoscaling_policy( r"""Retrieves autoscaling policy. Args: - request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest`): The request object. A request to fetch an autoscaling policy. name (:class:`str`): @@ -355,6 +388,7 @@ async def get_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -366,7 +400,7 @@ async def get_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -400,6 +434,7 @@ async def get_autoscaling_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -429,7 +464,7 @@ async def list_autoscaling_policies( r"""Lists autoscaling policies in the project. Args: - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest`): The request object. A request to list autoscaling policies in a project. parent (:class:`str`): @@ -445,6 +480,7 @@ async def list_autoscaling_policies( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -456,7 +492,7 @@ async def list_autoscaling_policies( sent along with the request as metadata. Returns: - ~.pagers.ListAutoscalingPoliciesAsyncPager: + google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesAsyncPager: A response to a request to list autoscaling policies in a project. Iterating over this object will yield @@ -493,6 +529,7 @@ async def list_autoscaling_policies( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -530,7 +567,7 @@ async def delete_autoscaling_policy( more clusters. Args: - request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest`): The request object. A request to delete an autoscaling policy. Autoscaling policies in use by one or more clusters will @@ -550,6 +587,7 @@ async def delete_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py index bc80019f..aad6370c 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py @@ -114,6 +114,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalingPolicyServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -126,7 +142,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + AutoscalingPolicyServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -236,10 +252,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport (Union[str, AutoscalingPolicyServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -275,21 +291,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -332,7 +344,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -342,7 +354,7 @@ def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -350,10 +362,10 @@ def create_autoscaling_policy( r"""Creates new autoscaling policy. Args: - request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1beta2.types.CreateAutoscalingPolicyRequest): The request object. A request to create an autoscaling policy. - parent (:class:`str`): + parent (str): Required. The "resource name" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -366,13 +378,15 @@ def create_autoscaling_policy( ``projects.locations.autoscalingPolicies.create``, the resource name has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The autoscaling policy to create. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -383,7 +397,7 @@ def create_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -391,7 +405,7 @@ def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy]) + has_flattened_params = any([parent, policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -410,8 +424,8 @@ def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -435,7 +449,7 @@ def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy_: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -446,13 +460,14 @@ def update_autoscaling_policy( replacements. Args: - request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest): The request object. A request to update an autoscaling policy. - policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The updated autoscaling policy. - This corresponds to the ``policy`` field + + This corresponds to the ``policy_`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -463,7 +478,7 @@ def update_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -471,7 +486,7 @@ def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy]) + has_flattened_params = any([policy_]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -488,8 +503,8 @@ def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy is not None: - request.policy = policy + if policy_ is not None: + request.policy_ = policy_ # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -523,10 +538,10 @@ def get_autoscaling_policy( r"""Retrieves autoscaling policy. Args: - request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1beta2.types.GetAutoscalingPolicyRequest): The request object. A request to fetch an autoscaling policy. - name (:class:`str`): + name (str): Required. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. @@ -539,6 +554,7 @@ def get_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -550,7 +566,7 @@ def get_autoscaling_policy( sent along with the request as metadata. Returns: - ~.autoscaling_policies.AutoscalingPolicy: + google.cloud.dataproc_v1beta2.types.AutoscalingPolicy: Describes an autoscaling policy for Dataproc cluster autoscaler. @@ -606,10 +622,10 @@ def list_autoscaling_policies( r"""Lists autoscaling policies in the project. Args: - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): The request object. A request to list autoscaling policies in a project. - parent (:class:`str`): + parent (str): Required. The "resource name" of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -622,6 +638,7 @@ def list_autoscaling_policies( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -633,7 +650,7 @@ def list_autoscaling_policies( sent along with the request as metadata. Returns: - ~.pagers.ListAutoscalingPoliciesPager: + google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.pagers.ListAutoscalingPoliciesPager: A response to a request to list autoscaling policies in a project. Iterating over this object will yield @@ -702,12 +719,12 @@ def delete_autoscaling_policy( more clusters. Args: - request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + request (google.cloud.dataproc_v1beta2.types.DeleteAutoscalingPolicyRequest): The request object. A request to delete an autoscaling policy. Autoscaling policies in use by one or more clusters will not be deleted. - name (:class:`str`): + name (str): Required. The "resource name" of the autoscaling policy, as described in https://cloud.google.com/apis/design/resource_names. @@ -722,6 +739,7 @@ def delete_autoscaling_policy( the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py index 4a9a6942..b74bf96a 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1beta2.types import autoscaling_policies @@ -24,7 +33,7 @@ class ListAutoscalingPoliciesPager: """A pager for iterating through ``list_autoscaling_policies`` requests. This class thinly wraps an initial - :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and provides an ``__iter__`` method to iterate through its ``policies`` field. @@ -33,7 +42,7 @@ class ListAutoscalingPoliciesPager: through the ``policies`` field on the corresponding responses. - All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): The initial request object. - response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListAutoscalingPoliciesAsyncPager: """A pager for iterating through ``list_autoscaling_policies`` requests. This class thinly wraps an initial - :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` object, and provides an ``__aiter__`` method to iterate through its ``policies`` field. @@ -95,7 +104,7 @@ class ListAutoscalingPoliciesAsyncPager: through the ``policies`` field on the corresponding responses. - All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -115,9 +124,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesRequest): The initial request object. - response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + response (google.cloud.dataproc_v1beta2.types.ListAutoscalingPoliciesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py index bc039c5e..64375cee 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py @@ -68,10 +68,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -79,6 +79,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -88,20 +91,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -119,6 +119,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -132,6 +133,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -145,6 +147,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py index 01896af1..28018b7c 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py @@ -59,6 +59,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -89,6 +90,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -103,72 +108,60 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials - - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -176,17 +169,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -200,7 +184,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py index cf4811e1..53d446d8 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -63,7 +63,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -103,6 +103,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -134,12 +135,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -148,72 +153,60 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials - - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -221,17 +214,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py index 9ed3eff3..a9f5f7e7 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py @@ -83,7 +83,36 @@ class ClusterControllerAsyncClient: ClusterControllerClient.parse_common_location_path ) - from_service_account_file = ClusterControllerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_info.__func__(ClusterControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerAsyncClient: The constructed client. + """ + return ClusterControllerClient.from_service_account_file.__func__(ClusterControllerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -164,22 +193,24 @@ async def create_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.CreateClusterRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.CreateClusterRequest`): The request object. A request to create a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): Required. The cluster to create. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this @@ -192,13 +223,11 @@ async def create_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -232,6 +261,7 @@ async def create_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -270,17 +300,19 @@ async def update_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.UpdateClusterRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateClusterRequest`): The request object. A request to update a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -289,12 +321,12 @@ async def update_cluster( This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (:class:`google.cloud.dataproc_v1beta2.types.Cluster`): Required. The changes to the cluster. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` @@ -365,6 +397,7 @@ async def update_cluster( autoscaling policies + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -376,13 +409,11 @@ async def update_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -422,6 +453,7 @@ async def update_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -458,18 +490,20 @@ async def delete_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.DeleteClusterRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteClusterRequest`): The request object. A request to delete a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -486,24 +520,22 @@ async def delete_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -537,6 +569,7 @@ async def delete_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -571,19 +604,21 @@ async def get_cluster( project. Args: - request (:class:`~.clusters.GetClusterRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.GetClusterRequest`): The request object. Request to get the resource representation for a cluster in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -600,7 +635,7 @@ async def get_cluster( sent along with the request as metadata. Returns: - ~.clusters.Cluster: + google.cloud.dataproc_v1beta2.types.Cluster: Describes the identifying information, config, and status of a cluster of Compute Engine instances. @@ -641,6 +676,7 @@ async def get_cluster( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -667,19 +703,21 @@ async def list_clusters( alphabetically. Args: - request (:class:`~.clusters.ListClustersRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.ListClustersRequest`): The request object. A request to list the clusters in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -707,6 +745,7 @@ async def list_clusters( status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -718,7 +757,7 @@ async def list_clusters( sent along with the request as metadata. Returns: - ~.pagers.ListClustersAsyncPager: + google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersAsyncPager: The list of all clusters in a project. Iterating over this object will yield @@ -761,6 +800,7 @@ async def list_clusters( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, @@ -798,19 +838,21 @@ async def diagnose_cluster( contains [Empty][google.protobuf.Empty]. Args: - request (:class:`~.clusters.DiagnoseClusterRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest`): The request object. A request to collect cluster diagnostic information. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -827,24 +869,22 @@ async def diagnose_cluster( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -878,6 +918,7 @@ async def diagnose_cluster( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py index 4be2492d..167dfd57 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py @@ -119,6 +119,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -131,7 +147,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + ClusterControllerClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -239,10 +255,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.ClusterControllerTransport]): The + transport (Union[str, ClusterControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -278,21 +294,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -335,7 +347,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -357,22 +369,24 @@ def create_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.CreateClusterRequest`): + request (google.cloud.dataproc_v1beta2.types.CreateClusterRequest): The request object. A request to create a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (google.cloud.dataproc_v1beta2.types.Cluster): Required. The cluster to create. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this @@ -385,13 +399,11 @@ def create_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -458,31 +470,33 @@ def update_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.UpdateClusterRequest`): + request (google.cloud.dataproc_v1beta2.types.UpdateClusterRequest): The request object. A request to update a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster (:class:`~.clusters.Cluster`): + cluster (google.cloud.dataproc_v1beta2.types.Cluster): Required. The changes to the cluster. This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` @@ -553,6 +567,7 @@ def update_cluster( autoscaling policies + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -564,13 +579,11 @@ def update_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.clusters.Cluster``: Describes the identifying - information, config, and status of a cluster of Compute - Engine instances. + The result type for the operation will be :class:`google.cloud.dataproc_v1beta2.types.Cluster` Describes the identifying information, config, and status of + a cluster of Compute Engine instances. """ # Create or coerce a protobuf request object. @@ -641,22 +654,24 @@ def delete_cluster( `ClusterOperationMetadata `__. Args: - request (:class:`~.clusters.DeleteClusterRequest`): + request (google.cloud.dataproc_v1beta2.types.DeleteClusterRequest): The request object. A request to delete a cluster. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -669,24 +684,22 @@ def delete_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -749,23 +762,25 @@ def get_cluster( project. Args: - request (:class:`~.clusters.GetClusterRequest`): + request (google.cloud.dataproc_v1beta2.types.GetClusterRequest): The request object. Request to get the resource representation for a cluster in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -778,7 +793,7 @@ def get_cluster( sent along with the request as metadata. Returns: - ~.clusters.Cluster: + google.cloud.dataproc_v1beta2.types.Cluster: Describes the identifying information, config, and status of a cluster of Compute Engine instances. @@ -836,23 +851,25 @@ def list_clusters( alphabetically. Args: - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): The request object. A request to list the clusters in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - filter (:class:`str`): + filter (str): Optional. A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: @@ -876,6 +893,7 @@ def list_clusters( status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -887,7 +905,7 @@ def list_clusters( sent along with the request as metadata. Returns: - ~.pagers.ListClustersPager: + google.cloud.dataproc_v1beta2.services.cluster_controller.pagers.ListClustersPager: The list of all clusters in a project. Iterating over this object will yield @@ -958,23 +976,25 @@ def diagnose_cluster( contains [Empty][google.protobuf.Empty]. Args: - request (:class:`~.clusters.DiagnoseClusterRequest`): + request (google.cloud.dataproc_v1beta2.types.DiagnoseClusterRequest): The request object. A request to collect cluster diagnostic information. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - cluster_name (:class:`str`): + cluster_name (str): Required. The cluster name. This corresponds to the ``cluster_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -987,24 +1007,22 @@ def diagnose_cluster( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py index d7c6c416..84576e5f 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1beta2.types import clusters @@ -24,7 +33,7 @@ class ListClustersPager: """A pager for iterating through ``list_clusters`` requests. This class thinly wraps an initial - :class:`~.clusters.ListClustersResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and provides an ``__iter__`` method to iterate through its ``clusters`` field. @@ -33,7 +42,7 @@ class ListClustersPager: through the ``clusters`` field on the corresponding responses. - All the usual :class:`~.clusters.ListClustersResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): The initial request object. - response (:class:`~.clusters.ListClustersResponse`): + response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListClustersAsyncPager: """A pager for iterating through ``list_clusters`` requests. This class thinly wraps an initial - :class:`~.clusters.ListClustersResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` object, and provides an ``__aiter__`` method to iterate through its ``clusters`` field. @@ -95,7 +104,7 @@ class ListClustersAsyncPager: through the ``clusters`` field on the corresponding responses. - All the usual :class:`~.clusters.ListClustersResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListClustersResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -113,9 +122,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.clusters.ListClustersRequest`): + request (google.cloud.dataproc_v1beta2.types.ListClustersRequest): The initial request object. - response (:class:`~.clusters.ListClustersResponse`): + response (google.cloud.dataproc_v1beta2.types.ListClustersResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py index 5e0d3298..10250808 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,20 +92,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -113,6 +113,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -124,6 +125,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -135,6 +137,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -150,6 +153,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -165,6 +169,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, @@ -176,6 +181,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=300.0, ), default_timeout=300.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py index 2af74b62..cb2b0558 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -90,6 +91,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -104,72 +109,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -177,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -202,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py index 186cc414..80c4e84d 100644 --- a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -104,6 +104,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -135,12 +136,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -149,72 +154,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -222,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py b/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py index 57234d85..38af5a95 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py @@ -75,7 +75,36 @@ class JobControllerAsyncClient: JobControllerClient.parse_common_location_path ) - from_service_account_file = JobControllerClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_info.__func__(JobControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerAsyncClient: The constructed client. + """ + return JobControllerClient.from_service_account_file.__func__(JobControllerAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -153,22 +182,24 @@ async def submit_job( r"""Submits a job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (:class:`google.cloud.dataproc_v1beta2.types.Job`): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -181,7 +212,7 @@ async def submit_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -215,6 +246,7 @@ async def submit_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -240,22 +272,24 @@ async def submit_job_as_operation( r"""Submits job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.SubmitJobRequest`): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (:class:`google.cloud.dataproc_v1beta2.types.Job`): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -268,11 +302,12 @@ async def submit_job_as_operation( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:``~.jobs.Job``: A Dataproc job resource. + :class:`google.cloud.dataproc_v1beta2.types.Job` A + Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -306,6 +341,7 @@ async def submit_job_as_operation( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -340,19 +376,21 @@ async def get_job( project. Args: - request (:class:`~.jobs.GetJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.GetJobRequest`): The request object. A request to get the resource representation for a job in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -369,7 +407,7 @@ async def get_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -407,6 +445,7 @@ async def get_job( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -432,18 +471,20 @@ async def list_jobs( r"""Lists regions/{region}/jobs in a project. Args: - request (:class:`~.jobs.ListJobsRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.ListJobsRequest`): The request object. A request to list jobs in a project. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -465,6 +506,7 @@ async def list_jobs( status.state = ACTIVE AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -476,7 +518,7 @@ async def list_jobs( sent along with the request as metadata. Returns: - ~.pagers.ListJobsAsyncPager: + google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsAsyncPager: A list of jobs in a project. Iterating over this object will yield results and resolve additional pages @@ -518,6 +560,7 @@ async def list_jobs( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -546,7 +589,7 @@ async def update_job( r"""Updates a job in a project. Args: - request (:class:`~.jobs.UpdateJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateJobRequest`): The request object. A request to update a job. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -556,7 +599,7 @@ async def update_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -572,6 +615,7 @@ async def update_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -601,18 +645,20 @@ async def cancel_job( `regions/{region}/jobs.get `__. Args: - request (:class:`~.jobs.CancelJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.CancelJobRequest`): The request object. A request to cancel a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -629,7 +675,7 @@ async def cancel_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -667,6 +713,7 @@ async def cancel_job( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, @@ -693,18 +740,20 @@ async def delete_job( delete fails, and the response returns ``FAILED_PRECONDITION``. Args: - request (:class:`~.jobs.DeleteJobRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteJobRequest`): The request object. A request to delete a job. project_id (:class:`str`): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. region (:class:`str`): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -751,6 +800,7 @@ async def delete_job( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/client.py b/google/cloud/dataproc_v1beta2/services/job_controller/client.py index 0989f37a..b487648e 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/client.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/client.py @@ -110,6 +110,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -122,7 +138,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + JobControllerClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -214,10 +230,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.JobControllerTransport]): The + transport (Union[str, JobControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -253,21 +269,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -310,7 +322,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -329,22 +341,24 @@ def submit_job( r"""Submits a job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): The request object. A request to submit a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (google.cloud.dataproc_v1beta2.types.Job): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -357,7 +371,7 @@ def submit_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -411,22 +425,24 @@ def submit_job_as_operation( r"""Submits job to a cluster. Args: - request (:class:`~.jobs.SubmitJobRequest`): + request (google.cloud.dataproc_v1beta2.types.SubmitJobRequest): The request object. A request to submit a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job (:class:`~.jobs.Job`): + job (google.cloud.dataproc_v1beta2.types.Job): Required. The job resource. This corresponds to the ``job`` field on the ``request`` instance; if ``request`` is provided, this @@ -439,11 +455,12 @@ def submit_job_as_operation( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:``~.jobs.Job``: A Dataproc job resource. + :class:`google.cloud.dataproc_v1beta2.types.Job` A + Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -506,23 +523,25 @@ def get_job( project. Args: - request (:class:`~.jobs.GetJobRequest`): + request (google.cloud.dataproc_v1beta2.types.GetJobRequest): The request object. A request to get the resource representation for a job in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -535,7 +554,7 @@ def get_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -589,22 +608,24 @@ def list_jobs( r"""Lists regions/{region}/jobs in a project. Args: - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): The request object. A request to list jobs in a project. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - filter (:class:`str`): + filter (str): Optional. A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: @@ -622,6 +643,7 @@ def list_jobs( status.state = ACTIVE AND labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -633,7 +655,7 @@ def list_jobs( sent along with the request as metadata. Returns: - ~.pagers.ListJobsPager: + google.cloud.dataproc_v1beta2.services.job_controller.pagers.ListJobsPager: A list of jobs in a project. Iterating over this object will yield results and resolve additional pages @@ -694,7 +716,7 @@ def update_job( r"""Updates a job in a project. Args: - request (:class:`~.jobs.UpdateJobRequest`): + request (google.cloud.dataproc_v1beta2.types.UpdateJobRequest): The request object. A request to update a job. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -704,7 +726,7 @@ def update_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -744,22 +766,24 @@ def cancel_job( `regions/{region}/jobs.get `__. Args: - request (:class:`~.jobs.CancelJobRequest`): + request (google.cloud.dataproc_v1beta2.types.CancelJobRequest): The request object. A request to cancel a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -772,7 +796,7 @@ def cancel_job( sent along with the request as metadata. Returns: - ~.jobs.Job: + google.cloud.dataproc_v1beta2.types.Job: A Dataproc job resource. """ # Create or coerce a protobuf request object. @@ -827,22 +851,24 @@ def delete_job( delete fails, and the response returns ``FAILED_PRECONDITION``. Args: - request (:class:`~.jobs.DeleteJobRequest`): + request (google.cloud.dataproc_v1beta2.types.DeleteJobRequest): The request object. A request to delete a job. - project_id (:class:`str`): + project_id (str): Required. The ID of the Google Cloud Platform project that the job belongs to. + This corresponds to the ``project_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - region (:class:`str`): + region (str): Required. The Dataproc region in which to handle the request. + This corresponds to the ``region`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - job_id (:class:`str`): + job_id (str): Required. The job ID. This corresponds to the ``job_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py b/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py index 98cd30cb..1e104382 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1beta2.types import jobs @@ -24,7 +33,7 @@ class ListJobsPager: """A pager for iterating through ``list_jobs`` requests. This class thinly wraps an initial - :class:`~.jobs.ListJobsResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and provides an ``__iter__`` method to iterate through its ``jobs`` field. @@ -33,7 +42,7 @@ class ListJobsPager: through the ``jobs`` field on the corresponding responses. - All the usual :class:`~.jobs.ListJobsResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): The initial request object. - response (:class:`~.jobs.ListJobsResponse`): + response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListJobsAsyncPager: """A pager for iterating through ``list_jobs`` requests. This class thinly wraps an initial - :class:`~.jobs.ListJobsResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` object, and provides an ``__aiter__`` method to iterate through its ``jobs`` field. @@ -95,7 +104,7 @@ class ListJobsAsyncPager: through the ``jobs`` field on the corresponding responses. - All the usual :class:`~.jobs.ListJobsResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListJobsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -113,9 +122,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.jobs.ListJobsRequest`): + request (google.cloud.dataproc_v1beta2.types.ListJobsRequest): The initial request object. - response (:class:`~.jobs.ListJobsResponse`): + response (google.cloud.dataproc_v1beta2.types.ListJobsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py index deea5d1c..7b47cb8f 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py @@ -70,10 +70,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -81,6 +81,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -90,20 +93,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -114,6 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -125,6 +126,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -140,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -155,6 +158,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -166,6 +170,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -181,6 +186,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, @@ -192,6 +198,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=900.0, ), default_timeout=900.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py index 4eb0020e..78a1139b 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py @@ -60,6 +60,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -90,6 +91,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -104,72 +109,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -177,18 +171,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -202,7 +186,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py index 1be3cb35..28be8d15 100644 --- a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py @@ -64,7 +64,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -104,6 +104,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -135,12 +136,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -149,72 +154,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -222,18 +216,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py index 71993784..5f2f82c1 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py @@ -93,7 +93,36 @@ class WorkflowTemplateServiceAsyncClient: WorkflowTemplateServiceClient.parse_common_location_path ) - from_service_account_file = WorkflowTemplateServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_info.__func__(WorkflowTemplateServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceAsyncClient: The constructed client. + """ + return WorkflowTemplateServiceClient.from_service_account_file.__func__(WorkflowTemplateServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -171,7 +200,7 @@ async def create_workflow_template( r"""Creates new workflow template. Args: - request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest`): The request object. A request to create a workflow template. parent (:class:`str`): @@ -187,12 +216,14 @@ async def create_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): Required. The Dataproc workflow template to create. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -204,7 +235,7 @@ async def create_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -238,6 +269,7 @@ async def create_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -269,7 +301,7 @@ async def get_workflow_template( specifying optional version parameter. Args: - request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest`): The request object. A request to fetch a workflow template. name (:class:`str`): @@ -286,6 +318,7 @@ async def get_workflow_template( resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -297,7 +330,7 @@ async def get_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -333,6 +366,7 @@ async def get_workflow_template( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -385,7 +419,7 @@ async def instantiate_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest`): The request object. A request to instantiate a workflow template. name (:class:`str`): @@ -404,14 +438,16 @@ async def instantiate_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + parameters (:class:`Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]`): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. + This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -423,24 +459,22 @@ async def instantiate_workflow_template( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -473,6 +507,7 @@ async def instantiate_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -536,7 +571,7 @@ async def instantiate_inline_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest`): The request object. A request to instantiate an inline workflow template. parent (:class:`str`): @@ -554,12 +589,14 @@ async def instantiate_inline_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): Required. The workflow template to instantiate. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -571,24 +608,22 @@ async def instantiate_inline_workflow_template( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -620,6 +655,7 @@ async def instantiate_inline_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -659,14 +695,15 @@ async def update_workflow_template( server version. Args: - request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest`): The request object. A request to update a workflow template. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (:class:`google.cloud.dataproc_v1beta2.types.WorkflowTemplate`): Required. The updated workflow template. The ``template.version`` field must match the current version. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -678,7 +715,7 @@ async def update_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -710,6 +747,7 @@ async def update_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -742,7 +780,7 @@ async def list_workflow_templates( the request. Args: - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest`): The request object. A request to list workflow templates in a project. parent (:class:`str`): @@ -758,6 +796,7 @@ async def list_workflow_templates( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -769,7 +808,7 @@ async def list_workflow_templates( sent along with the request as metadata. Returns: - ~.pagers.ListWorkflowTemplatesAsyncPager: + google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesAsyncPager: A response to a request to list workflow templates in a project. Iterating over this object will yield @@ -808,6 +847,7 @@ async def list_workflow_templates( exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -844,7 +884,7 @@ async def delete_workflow_template( rogress workflows. Args: - request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + request (:class:`google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest`): The request object. A request to delete a workflow template. Currently started workflows will remain running. @@ -863,6 +903,7 @@ async def delete_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -900,6 +941,7 @@ async def delete_workflow_template( maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py index 5c473496..3c23cd89 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py @@ -119,6 +119,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + WorkflowTemplateServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -131,7 +147,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + WorkflowTemplateServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -257,10 +273,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport (Union[str, WorkflowTemplateServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -296,21 +312,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -353,7 +365,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -371,10 +383,10 @@ def create_workflow_template( r"""Creates new workflow template. Args: - request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.CreateWorkflowTemplateRequest): The request object. A request to create a workflow template. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -387,12 +399,14 @@ def create_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The Dataproc workflow template to create. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -404,7 +418,7 @@ def create_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -464,10 +478,10 @@ def get_workflow_template( specifying optional version parameter. Args: - request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.GetWorkflowTemplateRequest): The request object. A request to fetch a workflow template. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -481,6 +495,7 @@ def get_workflow_template( resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -492,7 +507,7 @@ def get_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -571,10 +586,10 @@ def instantiate_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest): The request object. A request to instantiate a workflow template. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -590,14 +605,16 @@ def instantiate_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. + This corresponds to the ``parameters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -609,24 +626,22 @@ def instantiate_workflow_template( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -653,9 +668,8 @@ def instantiate_workflow_template( if name is not None: request.name = name - - if parameters: - request.parameters.update(parameters) + if parameters is not None: + request.parameters = parameters # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -721,10 +735,10 @@ def instantiate_inline_workflow_template( be [Empty][google.protobuf.Empty]. Args: - request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.InstantiateInlineWorkflowTemplateRequest): The request object. A request to instantiate an inline workflow template. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -739,12 +753,14 @@ def instantiate_inline_workflow_template( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The workflow template to instantiate. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -756,24 +772,22 @@ def instantiate_inline_workflow_template( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -845,14 +859,15 @@ def update_workflow_template( server version. Args: - request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.UpdateWorkflowTemplateRequest): The request object. A request to update a workflow template. - template (:class:`~.workflow_templates.WorkflowTemplate`): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The updated workflow template. The ``template.version`` field must match the current version. + This corresponds to the ``template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -864,7 +879,7 @@ def update_workflow_template( sent along with the request as metadata. Returns: - ~.workflow_templates.WorkflowTemplate: + google.cloud.dataproc_v1beta2.types.WorkflowTemplate: A Dataproc workflow template resource. @@ -923,10 +938,10 @@ def list_workflow_templates( the request. Args: - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): The request object. A request to list workflow templates in a project. - parent (:class:`str`): + parent (str): Required. The resource name of the region or location, as described in https://cloud.google.com/apis/design/resource_names. @@ -939,6 +954,7 @@ def list_workflow_templates( the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -950,7 +966,7 @@ def list_workflow_templates( sent along with the request as metadata. Returns: - ~.pagers.ListWorkflowTemplatesPager: + google.cloud.dataproc_v1beta2.services.workflow_template_service.pagers.ListWorkflowTemplatesPager: A response to a request to list workflow templates in a project. Iterating over this object will yield @@ -1016,11 +1032,11 @@ def delete_workflow_template( rogress workflows. Args: - request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + request (google.cloud.dataproc_v1beta2.types.DeleteWorkflowTemplateRequest): The request object. A request to delete a workflow template. Currently started workflows will remain running. - name (:class:`str`): + name (str): Required. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. @@ -1035,6 +1051,7 @@ def delete_workflow_template( the resource name of the template has the following format: ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py index 205f2657..83bfd1f9 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.dataproc_v1beta2.types import workflow_templates @@ -24,7 +33,7 @@ class ListWorkflowTemplatesPager: """A pager for iterating through ``list_workflow_templates`` requests. This class thinly wraps an initial - :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and provides an ``__iter__`` method to iterate through its ``templates`` field. @@ -33,7 +42,7 @@ class ListWorkflowTemplatesPager: through the ``templates`` field on the corresponding responses. - All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -51,9 +60,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): The initial request object. - response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -86,7 +95,7 @@ class ListWorkflowTemplatesAsyncPager: """A pager for iterating through ``list_workflow_templates`` requests. This class thinly wraps an initial - :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` object, and provides an ``__aiter__`` method to iterate through its ``templates`` field. @@ -95,7 +104,7 @@ class ListWorkflowTemplatesAsyncPager: through the ``templates`` field on the corresponding responses. - All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + All the usual :class:`google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -115,9 +124,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + request (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesRequest): The initial request object. - response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + response (google.cloud.dataproc_v1beta2.types.ListWorkflowTemplatesResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py index 2495d556..40ff6b61 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py @@ -70,10 +70,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -81,6 +81,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -90,20 +93,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -114,6 +114,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -129,6 +130,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -140,6 +142,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -151,6 +154,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -162,6 +166,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -177,6 +182,7 @@ def _prep_wrapped_messages(self, client_info): exceptions.InternalServerError, exceptions.ServiceUnavailable, ), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -188,6 +194,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py index 8de2cce0..4d514e2d 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py @@ -61,6 +61,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -91,6 +92,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -105,72 +110,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -178,18 +172,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -203,7 +187,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py index 81ffd56b..b02a4e15 100644 --- a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py @@ -65,7 +65,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -105,6 +105,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -136,12 +137,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -150,72 +155,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -223,18 +217,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/dataproc_v1beta2/types/__init__.py b/google/cloud/dataproc_v1beta2/types/__init__.py index 23523cce..c640c043 100644 --- a/google/cloud/dataproc_v1beta2/types/__init__.py +++ b/google/cloud/dataproc_v1beta2/types/__init__.py @@ -19,181 +19,181 @@ AutoscalingPolicy, BasicAutoscalingAlgorithm, BasicYarnAutoscalingConfig, - InstanceGroupAutoscalingPolicyConfig, CreateAutoscalingPolicyRequest, - GetAutoscalingPolicyRequest, - UpdateAutoscalingPolicyRequest, DeleteAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + InstanceGroupAutoscalingPolicyConfig, ListAutoscalingPoliciesRequest, ListAutoscalingPoliciesResponse, + UpdateAutoscalingPolicyRequest, ) from .clusters import ( + AcceleratorConfig, + AutoscalingConfig, Cluster, ClusterConfig, - GkeClusterConfig, - EndpointConfig, - AutoscalingConfig, - EncryptionConfig, - GceClusterConfig, - InstanceGroupConfig, - ManagedGroupConfig, - AcceleratorConfig, - DiskConfig, - LifecycleConfig, - SecurityConfig, - KerberosConfig, - NodeInitializationAction, - ClusterStatus, - SoftwareConfig, ClusterMetrics, + ClusterStatus, CreateClusterRequest, - UpdateClusterRequest, DeleteClusterRequest, + DiagnoseClusterRequest, + DiagnoseClusterResults, + DiskConfig, + EncryptionConfig, + EndpointConfig, + GceClusterConfig, GetClusterRequest, + GkeClusterConfig, + InstanceGroupConfig, + KerberosConfig, + LifecycleConfig, ListClustersRequest, ListClustersResponse, - DiagnoseClusterRequest, - DiagnoseClusterResults, + ManagedGroupConfig, + NodeInitializationAction, ReservationAffinity, + SecurityConfig, + SoftwareConfig, + UpdateClusterRequest, ) from .jobs import ( - LoggingConfig, + CancelJobRequest, + DeleteJobRequest, + GetJobRequest, HadoopJob, - SparkJob, - PySparkJob, - QueryList, HiveJob, - SparkSqlJob, - PigJob, - SparkRJob, - PrestoJob, + Job, + JobMetadata, JobPlacement, - JobStatus, JobReference, - YarnApplication, - Job, JobScheduling, - JobMetadata, - SubmitJobRequest, - GetJobRequest, + JobStatus, ListJobsRequest, - UpdateJobRequest, ListJobsResponse, - CancelJobRequest, - DeleteJobRequest, + LoggingConfig, + PigJob, + PrestoJob, + PySparkJob, + QueryList, + SparkJob, + SparkRJob, + SparkSqlJob, + SubmitJobRequest, + UpdateJobRequest, + YarnApplication, ) from .operations import ( - ClusterOperationStatus, ClusterOperationMetadata, + ClusterOperationStatus, ) from .workflow_templates import ( - WorkflowTemplate, - WorkflowTemplatePlacement, - ManagedCluster, + ClusterOperation, ClusterSelector, + CreateWorkflowTemplateRequest, + DeleteWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + ManagedCluster, OrderedJob, - TemplateParameter, ParameterValidation, RegexValidation, + TemplateParameter, + UpdateWorkflowTemplateRequest, ValueValidation, - WorkflowMetadata, - ClusterOperation, WorkflowGraph, + WorkflowMetadata, WorkflowNode, - CreateWorkflowTemplateRequest, - GetWorkflowTemplateRequest, - InstantiateWorkflowTemplateRequest, - InstantiateInlineWorkflowTemplateRequest, - UpdateWorkflowTemplateRequest, - ListWorkflowTemplatesRequest, - ListWorkflowTemplatesResponse, - DeleteWorkflowTemplateRequest, + WorkflowTemplate, + WorkflowTemplatePlacement, ) __all__ = ( "AutoscalingPolicy", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", - "InstanceGroupAutoscalingPolicyConfig", "CreateAutoscalingPolicyRequest", - "GetAutoscalingPolicyRequest", - "UpdateAutoscalingPolicyRequest", "DeleteAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "InstanceGroupAutoscalingPolicyConfig", "ListAutoscalingPoliciesRequest", "ListAutoscalingPoliciesResponse", - "Component", + "UpdateAutoscalingPolicyRequest", + "AcceleratorConfig", + "AutoscalingConfig", "Cluster", "ClusterConfig", - "GkeClusterConfig", - "EndpointConfig", - "AutoscalingConfig", - "EncryptionConfig", - "GceClusterConfig", - "InstanceGroupConfig", - "ManagedGroupConfig", - "AcceleratorConfig", - "DiskConfig", - "LifecycleConfig", - "SecurityConfig", - "KerberosConfig", - "NodeInitializationAction", - "ClusterStatus", - "SoftwareConfig", "ClusterMetrics", + "ClusterStatus", "CreateClusterRequest", - "UpdateClusterRequest", "DeleteClusterRequest", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "DiskConfig", + "EncryptionConfig", + "EndpointConfig", + "GceClusterConfig", "GetClusterRequest", + "GkeClusterConfig", + "InstanceGroupConfig", + "KerberosConfig", + "LifecycleConfig", "ListClustersRequest", "ListClustersResponse", - "DiagnoseClusterRequest", - "DiagnoseClusterResults", + "ManagedGroupConfig", + "NodeInitializationAction", "ReservationAffinity", - "LoggingConfig", + "SecurityConfig", + "SoftwareConfig", + "UpdateClusterRequest", + "CancelJobRequest", + "DeleteJobRequest", + "GetJobRequest", "HadoopJob", - "SparkJob", - "PySparkJob", - "QueryList", "HiveJob", - "SparkSqlJob", - "PigJob", - "SparkRJob", - "PrestoJob", + "Job", + "JobMetadata", "JobPlacement", - "JobStatus", "JobReference", - "YarnApplication", - "Job", "JobScheduling", - "JobMetadata", - "SubmitJobRequest", - "GetJobRequest", + "JobStatus", "ListJobsRequest", - "UpdateJobRequest", "ListJobsResponse", - "CancelJobRequest", - "DeleteJobRequest", - "ClusterOperationStatus", + "LoggingConfig", + "PigJob", + "PrestoJob", + "PySparkJob", + "QueryList", + "SparkJob", + "SparkRJob", + "SparkSqlJob", + "SubmitJobRequest", + "UpdateJobRequest", + "YarnApplication", "ClusterOperationMetadata", - "WorkflowTemplate", - "WorkflowTemplatePlacement", - "ManagedCluster", + "ClusterOperationStatus", + "Component", + "ClusterOperation", "ClusterSelector", + "CreateWorkflowTemplateRequest", + "DeleteWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "ManagedCluster", "OrderedJob", - "TemplateParameter", "ParameterValidation", "RegexValidation", + "TemplateParameter", + "UpdateWorkflowTemplateRequest", "ValueValidation", - "WorkflowMetadata", - "ClusterOperation", "WorkflowGraph", + "WorkflowMetadata", "WorkflowNode", - "CreateWorkflowTemplateRequest", - "GetWorkflowTemplateRequest", - "InstantiateWorkflowTemplateRequest", - "InstantiateInlineWorkflowTemplateRequest", - "UpdateWorkflowTemplateRequest", - "ListWorkflowTemplatesRequest", - "ListWorkflowTemplatesResponse", - "DeleteWorkflowTemplateRequest", + "WorkflowTemplate", + "WorkflowTemplatePlacement", ) diff --git a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py index 1a3c408f..b52a838f 100644 --- a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py @@ -62,12 +62,12 @@ class AutoscalingPolicy(proto.Message): - For ``projects.locations.autoscalingPolicies``, the resource name of the policy has the following format: ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - basic_algorithm (~.autoscaling_policies.BasicAutoscalingAlgorithm): + basic_algorithm (google.cloud.dataproc_v1beta2.types.BasicAutoscalingAlgorithm): - worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): Required. Describes how the autoscaler will operate for primary workers. - secondary_worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupAutoscalingPolicyConfig): Optional. Describes how the autoscaler will operate for secondary workers. """ @@ -93,9 +93,9 @@ class BasicAutoscalingAlgorithm(proto.Message): r"""Basic algorithm for autoscaling. Attributes: - yarn_config (~.autoscaling_policies.BasicYarnAutoscalingConfig): + yarn_config (google.cloud.dataproc_v1beta2.types.BasicYarnAutoscalingConfig): Required. YARN autoscaling configuration. - cooldown_period (~.duration.Duration): + cooldown_period (google.protobuf.duration_pb2.Duration): Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. @@ -114,7 +114,7 @@ class BasicYarnAutoscalingConfig(proto.Message): r"""Basic autoscaling configurations for YARN. Attributes: - graceful_decommission_timeout (~.duration.Duration): + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially @@ -244,13 +244,13 @@ class CreateAutoscalingPolicyRequest(proto.Message): - For ``projects.locations.autoscalingPolicies.create``, the resource name has the following format: ``projects/{project_id}/locations/{location}`` - policy (~.autoscaling_policies.AutoscalingPolicy): + policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The autoscaling policy to create. """ parent = proto.Field(proto.STRING, number=1) - policy = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) + policy_ = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) class GetAutoscalingPolicyRequest(proto.Message): @@ -278,11 +278,11 @@ class UpdateAutoscalingPolicyRequest(proto.Message): r"""A request to update an autoscaling policy. Attributes: - policy (~.autoscaling_policies.AutoscalingPolicy): + policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The updated autoscaling policy. """ - policy = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) + policy_ = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) class DeleteAutoscalingPolicyRequest(proto.Message): @@ -346,7 +346,7 @@ class ListAutoscalingPoliciesResponse(proto.Message): project. Attributes: - policies (Sequence[~.autoscaling_policies.AutoscalingPolicy]): + policies (Sequence[google.cloud.dataproc_v1beta2.types.AutoscalingPolicy]): Output only. Autoscaling policies list. next_page_token (str): Output only. This token is included in the diff --git a/google/cloud/dataproc_v1beta2/types/clusters.py b/google/cloud/dataproc_v1beta2/types/clusters.py index 97903e6f..71047f01 100644 --- a/google/cloud/dataproc_v1beta2/types/clusters.py +++ b/google/cloud/dataproc_v1beta2/types/clusters.py @@ -70,11 +70,11 @@ class Cluster(proto.Message): Required. The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused. - config (~.gcd_clusters.ClusterConfig): + config (google.cloud.dataproc_v1beta2.types.ClusterConfig): Required. The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated. - labels (Sequence[~.gcd_clusters.Cluster.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.Cluster.LabelsEntry]): Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -82,15 +82,15 @@ class Cluster(proto.Message): 1 to 63 characters, and must conform to `RFC 1035 `__. No more than 32 labels can be associated with a cluster. - status (~.gcd_clusters.ClusterStatus): + status (google.cloud.dataproc_v1beta2.types.ClusterStatus): Output only. Cluster status. - status_history (Sequence[~.gcd_clusters.ClusterStatus]): + status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterStatus]): Output only. The previous cluster status. cluster_uuid (str): Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. - metrics (~.gcd_clusters.ClusterMetrics): + metrics (google.cloud.dataproc_v1beta2.types.ClusterMetrics): Output only. Contains cluster daemon metrics such as HDFS and YARN stats. @@ -143,25 +143,25 @@ class ClusterConfig(proto.Message): project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. - gce_cluster_config (~.gcd_clusters.GceClusterConfig): + gce_cluster_config (google.cloud.dataproc_v1beta2.types.GceClusterConfig): Optional. The shared Compute Engine config settings for all instances in a cluster. - master_config (~.gcd_clusters.InstanceGroupConfig): + master_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): Optional. The Compute Engine config settings for the master instance in a cluster. - worker_config (~.gcd_clusters.InstanceGroupConfig): + worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): Optional. The Compute Engine config settings for worker instances in a cluster. - secondary_worker_config (~.gcd_clusters.InstanceGroupConfig): + secondary_worker_config (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig): Optional. The Compute Engine config settings for additional worker instances in a cluster. - software_config (~.gcd_clusters.SoftwareConfig): + software_config (google.cloud.dataproc_v1beta2.types.SoftwareConfig): Optional. The config settings for software inside the cluster. - lifecycle_config (~.gcd_clusters.LifecycleConfig): + lifecycle_config (google.cloud.dataproc_v1beta2.types.LifecycleConfig): Optional. The config setting for auto delete cluster schedule. - initialization_actions (Sequence[~.gcd_clusters.NodeInitializationAction]): + initialization_actions (Sequence[google.cloud.dataproc_v1beta2.types.NodeInitializationAction]): Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an @@ -177,19 +177,19 @@ class ClusterConfig(proto.Message): else ... worker specific actions ... fi - encryption_config (~.gcd_clusters.EncryptionConfig): + encryption_config (google.cloud.dataproc_v1beta2.types.EncryptionConfig): Optional. Encryption settings for the cluster. - autoscaling_config (~.gcd_clusters.AutoscalingConfig): + autoscaling_config (google.cloud.dataproc_v1beta2.types.AutoscalingConfig): Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. - endpoint_config (~.gcd_clusters.EndpointConfig): + endpoint_config (google.cloud.dataproc_v1beta2.types.EndpointConfig): Optional. Port/endpoint configuration for this cluster - security_config (~.gcd_clusters.SecurityConfig): + security_config (google.cloud.dataproc_v1beta2.types.SecurityConfig): Optional. Security related configuration. - gke_cluster_config (~.gcd_clusters.GkeClusterConfig): + gke_cluster_config (google.cloud.dataproc_v1beta2.types.GkeClusterConfig): Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as @@ -245,7 +245,7 @@ class GkeClusterConfig(proto.Message): r"""The GKE config for this cluster. Attributes: - namespaced_gke_deployment_target (~.gcd_clusters.GkeClusterConfig.NamespacedGkeDeploymentTarget): + namespaced_gke_deployment_target (google.cloud.dataproc_v1beta2.types.GkeClusterConfig.NamespacedGkeDeploymentTarget): Optional. A target for the deployment. """ @@ -275,7 +275,7 @@ class EndpointConfig(proto.Message): r"""Endpoint config for this cluster Attributes: - http_ports (Sequence[~.gcd_clusters.EndpointConfig.HttpPortsEntry]): + http_ports (Sequence[google.cloud.dataproc_v1beta2.types.EndpointConfig.HttpPortsEntry]): Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. enable_http_port_access (bool): @@ -403,11 +403,11 @@ class GceClusterConfig(proto.Message): The Compute Engine tags to add to all instances (see `Tagging instances `__). - metadata (Sequence[~.gcd_clusters.GceClusterConfig.MetadataEntry]): + metadata (Sequence[google.cloud.dataproc_v1beta2.types.GceClusterConfig.MetadataEntry]): The Compute Engine metadata entries to add to all instances (see `Project and instance metadata `__). - reservation_affinity (~.gcd_clusters.ReservationAffinity): + reservation_affinity (google.cloud.dataproc_v1beta2.types.ReservationAffinity): Optional. Reservation Affinity for consuming Zonal reservation. """ @@ -481,12 +481,12 @@ class InstanceGroupConfig(proto.Message): Placement `__ feature, you must use the short name of the machine type resource, for example, ``n1-standard-2``. - disk_config (~.gcd_clusters.DiskConfig): + disk_config (google.cloud.dataproc_v1beta2.types.DiskConfig): Optional. Disk option config settings. is_preemptible (bool): Output only. Specifies that this instance group contains preemptible instances. - preemptibility (~.gcd_clusters.InstanceGroupConfig.Preemptibility): + preemptibility (google.cloud.dataproc_v1beta2.types.InstanceGroupConfig.Preemptibility): Optional. Specifies the preemptibility of the instance group. @@ -495,12 +495,12 @@ class InstanceGroupConfig(proto.Message): The default value for secondary instances is ``PREEMPTIBLE``. - managed_group_config (~.gcd_clusters.ManagedGroupConfig): + managed_group_config (google.cloud.dataproc_v1beta2.types.ManagedGroupConfig): Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. - accelerators (Sequence[~.gcd_clusters.AcceleratorConfig]): + accelerators (Sequence[google.cloud.dataproc_v1beta2.types.AcceleratorConfig]): Optional. The Compute Engine accelerator configuration for these instances. min_cpu_platform (str): @@ -627,24 +627,24 @@ class LifecycleConfig(proto.Message): r"""Specifies the cluster auto-delete schedule configuration. Attributes: - idle_delete_ttl (~.duration.Duration): + idle_delete_ttl (google.protobuf.duration_pb2.Duration): Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of `Duration `__. - auto_delete_time (~.timestamp.Timestamp): + auto_delete_time (google.protobuf.timestamp_pb2.Timestamp): Optional. The time when cluster will be auto-deleted. (see JSON representation of `Timestamp `__). - auto_delete_ttl (~.duration.Duration): + auto_delete_ttl (google.protobuf.duration_pb2.Duration): Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of `Duration `__). - idle_start_time (~.timestamp.Timestamp): + idle_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of @@ -669,7 +669,7 @@ class SecurityConfig(proto.Message): Kerberos, etc. Attributes: - kerberos_config (~.gcd_clusters.KerberosConfig): + kerberos_config (google.cloud.dataproc_v1beta2.types.KerberosConfig): Kerberos related configuration. """ @@ -790,7 +790,7 @@ class NodeInitializationAction(proto.Message): executable_file (str): Required. Cloud Storage URI of executable file. - execution_timeout (~.duration.Duration): + execution_timeout (google.protobuf.duration_pb2.Duration): Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of `Duration `__). @@ -810,16 +810,16 @@ class ClusterStatus(proto.Message): r"""The status of a cluster and its instances. Attributes: - state (~.gcd_clusters.ClusterStatus.State): + state (google.cloud.dataproc_v1beta2.types.ClusterStatus.State): Output only. The cluster's state. detail (str): Output only. Optional details of cluster's state. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when this state was entered (see JSON representation of `Timestamp `__). - substate (~.gcd_clusters.ClusterStatus.Substate): + substate (google.cloud.dataproc_v1beta2.types.ClusterStatus.Substate): Output only. Additional state information that includes status reported by the agent. """ @@ -866,7 +866,7 @@ class SoftwareConfig(proto.Message): "1.2.29"), or the `"preview" version `__. If unspecified, it defaults to the latest Debian version. - properties (Sequence[~.gcd_clusters.SoftwareConfig.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.SoftwareConfig.PropertiesEntry]): Optional. The properties to set on daemon config files. Property keys are specified in ``prefix:property`` format, @@ -885,7 +885,7 @@ class SoftwareConfig(proto.Message): For more information, see `Cluster properties `__. - optional_components (Sequence[~.shared.Component]): + optional_components (Sequence[google.cloud.dataproc_v1beta2.types.Component]): The set of optional components to activate on the cluster. """ @@ -906,9 +906,9 @@ class ClusterMetrics(proto.Message): only. It may be changed before final release. Attributes: - hdfs_metrics (Sequence[~.gcd_clusters.ClusterMetrics.HdfsMetricsEntry]): + hdfs_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.HdfsMetricsEntry]): The HDFS metrics. - yarn_metrics (Sequence[~.gcd_clusters.ClusterMetrics.YarnMetricsEntry]): + yarn_metrics (Sequence[google.cloud.dataproc_v1beta2.types.ClusterMetrics.YarnMetricsEntry]): The YARN metrics. """ @@ -927,7 +927,7 @@ class CreateClusterRequest(proto.Message): region (str): Required. The Dataproc region in which to handle the request. - cluster (~.gcd_clusters.Cluster): + cluster (google.cloud.dataproc_v1beta2.types.Cluster): Required. The cluster to create. request_id (str): Optional. A unique id used to identify the request. If the @@ -967,9 +967,9 @@ class UpdateClusterRequest(proto.Message): handle the request. cluster_name (str): Required. The cluster name. - cluster (~.gcd_clusters.Cluster): + cluster (google.cloud.dataproc_v1beta2.types.Cluster): Required. The changes to the cluster. - graceful_decommission_timeout (~.duration.Duration): + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): Optional. Timeout for graceful YARN decomissioning. Graceful decommissioning allows removing nodes from the cluster without interrupting jobs in progress. Timeout specifies how @@ -981,7 +981,7 @@ class UpdateClusterRequest(proto.Message): `Duration `__). Only supported on Dataproc image versions 1.2 and higher. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to ``Cluster``, of the field to update. For example, to change the number of workers in a cluster to 5, the ``update_mask`` parameter @@ -1203,7 +1203,7 @@ class ListClustersResponse(proto.Message): r"""The list of all clusters in a project. Attributes: - clusters (Sequence[~.gcd_clusters.Cluster]): + clusters (Sequence[google.cloud.dataproc_v1beta2.types.Cluster]): Output only. The clusters in the project. next_page_token (str): Output only. This token is included in the response if there @@ -1260,7 +1260,7 @@ class ReservationAffinity(proto.Message): r"""Reservation Affinity for consuming Zonal reservation. Attributes: - consume_reservation_type (~.gcd_clusters.ReservationAffinity.Type): + consume_reservation_type (google.cloud.dataproc_v1beta2.types.ReservationAffinity.Type): Optional. Type of reservation to consume key (str): Optional. Corresponds to the label key of diff --git a/google/cloud/dataproc_v1beta2/types/jobs.py b/google/cloud/dataproc_v1beta2/types/jobs.py index c3b57d43..3b1f50b5 100644 --- a/google/cloud/dataproc_v1beta2/types/jobs.py +++ b/google/cloud/dataproc_v1beta2/types/jobs.py @@ -57,7 +57,7 @@ class LoggingConfig(proto.Message): r"""The runtime logging config of the job. Attributes: - driver_log_levels (Sequence[~.gcd_jobs.LoggingConfig.DriverLogLevelsEntry]): + driver_log_levels (Sequence[google.cloud.dataproc_v1beta2.types.LoggingConfig.DriverLogLevelsEntry]): The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: @@ -120,13 +120,13 @@ class HadoopJob(proto.Message): extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. - properties (Sequence[~.gcd_jobs.HadoopJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.HadoopJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -182,14 +182,14 @@ class SparkJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.SparkJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -241,7 +241,7 @@ class PySparkJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.PySparkJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.PySparkJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc @@ -249,7 +249,7 @@ class PySparkJob(proto.Message): set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -306,16 +306,16 @@ class HiveJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains Hive queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1beta2.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Sequence[~.gcd_jobs.HiveJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Hive command: ``SET name="value";``). - properties (Sequence[~.gcd_jobs.HiveJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.HiveJob.PropertiesEntry]): Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties @@ -351,13 +351,13 @@ class SparkSqlJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains SQL queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1beta2.types.QueryList): A list of queries. - script_variables (Sequence[~.gcd_jobs.SparkSqlJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET ``name="value";``). - properties (Sequence[~.gcd_jobs.SparkSqlJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkSqlJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the @@ -365,7 +365,7 @@ class SparkSqlJob(proto.Message): jar_file_uris (Sequence[str]): Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -393,16 +393,16 @@ class PigJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains the Pig queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1beta2.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Sequence[~.gcd_jobs.PigJob.ScriptVariablesEntry]): + script_variables (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.ScriptVariablesEntry]): Optional. Mapping of query variable names to values (equivalent to the Pig command: ``name=[value]``). - properties (Sequence[~.gcd_jobs.PigJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.PigJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties @@ -412,7 +412,7 @@ class PigJob(proto.Message): Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -457,7 +457,7 @@ class SparkRJob(proto.Message): extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Sequence[~.gcd_jobs.SparkRJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.SparkRJob.PropertiesEntry]): Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc @@ -465,7 +465,7 @@ class SparkRJob(proto.Message): set in /etc/spark/conf/spark-defaults.conf and classes in user code. - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -494,7 +494,7 @@ class PrestoJob(proto.Message): query_file_uri (str): The HCFS URI of the script that contains SQL queries. - query_list (~.gcd_jobs.QueryList): + query_list (google.cloud.dataproc_v1beta2.types.QueryList): A list of queries. continue_on_failure (bool): Optional. Whether to continue executing queries if a query @@ -507,12 +507,12 @@ class PrestoJob(proto.Message): client_tags (Sequence[str]): Optional. Presto client tags to attach to this query - properties (Sequence[~.gcd_jobs.PrestoJob.PropertiesEntry]): + properties (Sequence[google.cloud.dataproc_v1beta2.types.PrestoJob.PropertiesEntry]): Optional. A mapping of property names to values. Used to set Presto `session properties `__ Equivalent to using the --session flag in the Presto CLI - logging_config (~.gcd_jobs.LoggingConfig): + logging_config (google.cloud.dataproc_v1beta2.types.LoggingConfig): Optional. The runtime log config for job execution. """ @@ -555,17 +555,17 @@ class JobStatus(proto.Message): r"""Dataproc job status. Attributes: - state (~.gcd_jobs.JobStatus.State): + state (google.cloud.dataproc_v1beta2.types.JobStatus.State): Output only. A state message specifying the overall job state. details (str): Output only. Optional Job state details, such as an error description if the state is ERROR. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time when this state was entered. - substate (~.gcd_jobs.JobStatus.Substate): + substate (google.cloud.dataproc_v1beta2.types.JobStatus.Substate): Output only. Additional state information, which includes status reported by the agent. """ @@ -635,7 +635,7 @@ class YarnApplication(proto.Message): Attributes: name (str): Output only. The application name. - state (~.gcd_jobs.YarnApplication.State): + state (google.cloud.dataproc_v1beta2.types.YarnApplication.State): Output only. The application state. progress (float): Output only. The numerical progress of the @@ -676,37 +676,37 @@ class Job(proto.Message): r"""A Dataproc job resource. Attributes: - reference (~.gcd_jobs.JobReference): + reference (google.cloud.dataproc_v1beta2.types.JobReference): Optional. The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id. - placement (~.gcd_jobs.JobPlacement): + placement (google.cloud.dataproc_v1beta2.types.JobPlacement): Required. Job information, including how, when, and where to run the job. - hadoop_job (~.gcd_jobs.HadoopJob): + hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): Optional. Job is a Hadoop job. - spark_job (~.gcd_jobs.SparkJob): + spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): Optional. Job is a Spark job. - pyspark_job (~.gcd_jobs.PySparkJob): + pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): Optional. Job is a PySpark job. - hive_job (~.gcd_jobs.HiveJob): + hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): Optional. Job is a Hive job. - pig_job (~.gcd_jobs.PigJob): + pig_job (google.cloud.dataproc_v1beta2.types.PigJob): Optional. Job is a Pig job. - spark_r_job (~.gcd_jobs.SparkRJob): + spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): Optional. Job is a SparkR job. - spark_sql_job (~.gcd_jobs.SparkSqlJob): + spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): Optional. Job is a SparkSql job. - presto_job (~.gcd_jobs.PrestoJob): + presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): Optional. Job is a Presto job. - status (~.gcd_jobs.JobStatus): + status (google.cloud.dataproc_v1beta2.types.JobStatus): Output only. The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields. - status_history (Sequence[~.gcd_jobs.JobStatus]): + status_history (Sequence[google.cloud.dataproc_v1beta2.types.JobStatus]): Output only. The previous job status. - yarn_applications (Sequence[~.gcd_jobs.YarnApplication]): + yarn_applications (Sequence[google.cloud.dataproc_v1beta2.types.YarnApplication]): Output only. The collection of YARN applications spun up by this job. @@ -725,7 +725,7 @@ class Job(proto.Message): control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as ``driver_output_uri``. - labels (Sequence[~.gcd_jobs.Job.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.Job.LabelsEntry]): Optional. The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -733,7 +733,7 @@ class Job(proto.Message): 1 to 63 characters, and must conform to `RFC 1035 `__. No more than 32 labels can be associated with a job. - scheduling (~.gcd_jobs.JobScheduling): + scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): Optional. Job scheduling configuration. job_uuid (str): Output only. A UUID that uniquely identifies a job within @@ -829,11 +829,11 @@ class JobMetadata(proto.Message): Attributes: job_id (str): Output only. The job id. - status (~.gcd_jobs.JobStatus): + status (google.cloud.dataproc_v1beta2.types.JobStatus): Output only. Most recent job status. operation_type (str): Output only. Operation type. - start_time (~.timestamp.Timestamp): + start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Job submission time. """ @@ -856,7 +856,7 @@ class SubmitJobRequest(proto.Message): region (str): Required. The Dataproc region in which to handle the request. - job (~.gcd_jobs.Job): + job (google.cloud.dataproc_v1beta2.types.Job): Required. The job resource. request_id (str): Optional. A unique id used to identify the request. If the @@ -927,7 +927,7 @@ class ListJobsRequest(proto.Message): Optional. If set, the returned jobs list includes only jobs that were submitted to the named cluster. - job_state_matcher (~.gcd_jobs.ListJobsRequest.JobStateMatcher): + job_state_matcher (google.cloud.dataproc_v1beta2.types.ListJobsRequest.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default = match ALL jobs). @@ -985,9 +985,9 @@ class UpdateJobRequest(proto.Message): handle the request. job_id (str): Required. The job ID. - job (~.gcd_jobs.Job): + job (google.cloud.dataproc_v1beta2.types.Job): Required. The changes to the job. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. Specifies the path, relative to Job, of the field to update. For example, to update the labels of a Job the update_mask parameter would be specified as labels, and the @@ -1010,7 +1010,7 @@ class ListJobsResponse(proto.Message): r"""A list of jobs in a project. Attributes: - jobs (Sequence[~.gcd_jobs.Job]): + jobs (Sequence[google.cloud.dataproc_v1beta2.types.Job]): Output only. Jobs list. next_page_token (str): Optional. This token is included in the response if there diff --git a/google/cloud/dataproc_v1beta2/types/operations.py b/google/cloud/dataproc_v1beta2/types/operations.py index 13baf0df..469cc92f 100644 --- a/google/cloud/dataproc_v1beta2/types/operations.py +++ b/google/cloud/dataproc_v1beta2/types/operations.py @@ -31,7 +31,7 @@ class ClusterOperationStatus(proto.Message): r"""The status of the operation. Attributes: - state (~.operations.ClusterOperationStatus.State): + state (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus.State): Output only. A message containing the operation state. inner_state (str): @@ -40,7 +40,7 @@ class ClusterOperationStatus(proto.Message): details (str): Output only. A message containing any operation metadata details. - state_start_time (~.timestamp.Timestamp): + state_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time this state was entered. """ @@ -71,15 +71,15 @@ class ClusterOperationMetadata(proto.Message): operation. cluster_uuid (str): Output only. Cluster UUID for the operation. - status (~.operations.ClusterOperationStatus): + status (google.cloud.dataproc_v1beta2.types.ClusterOperationStatus): Output only. Current operation status. - status_history (Sequence[~.operations.ClusterOperationStatus]): + status_history (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationStatus]): Output only. The previous operation status. operation_type (str): Output only. The operation type. description (str): Output only. Short description of operation. - labels (Sequence[~.operations.ClusterOperationMetadata.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterOperationMetadata.LabelsEntry]): Output only. Labels associated with the operation warnings (Sequence[str]): diff --git a/google/cloud/dataproc_v1beta2/types/workflow_templates.py b/google/cloud/dataproc_v1beta2/types/workflow_templates.py index 22b8d11d..6704df9e 100644 --- a/google/cloud/dataproc_v1beta2/types/workflow_templates.py +++ b/google/cloud/dataproc_v1beta2/types/workflow_templates.py @@ -89,12 +89,12 @@ class WorkflowTemplate(proto.Message): ``version`` field filled in with the current server version. The user updates other fields in the template, then returns it as part of the ``UpdateWorkflowTemplate`` request. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time template was created. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time template was last updated. - labels (Sequence[~.workflow_templates.WorkflowTemplate.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate.LabelsEntry]): Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. @@ -108,18 +108,18 @@ class WorkflowTemplate(proto.Message): 1035 `__. No more than 32 labels can be associated with a template. - placement (~.workflow_templates.WorkflowTemplatePlacement): + placement (google.cloud.dataproc_v1beta2.types.WorkflowTemplatePlacement): Required. WorkflowTemplate scheduling information. - jobs (Sequence[~.workflow_templates.OrderedJob]): + jobs (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob]): Required. The Directed Acyclic Graph of Jobs to submit. - parameters (Sequence[~.workflow_templates.TemplateParameter]): + parameters (Sequence[google.cloud.dataproc_v1beta2.types.TemplateParameter]): Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. - dag_timeout (~.duration.Duration): + dag_timeout (google.protobuf.duration_pb2.Duration): Optional. Timeout duration for the DAG of jobs. You can use "s", "m", "h", and "d" suffixes for second, minute, hour, and day duration values, respectively. The timeout duration @@ -163,10 +163,10 @@ class WorkflowTemplatePlacement(proto.Message): Either ``managed_cluster`` or ``cluster_selector`` is required. Attributes: - managed_cluster (~.workflow_templates.ManagedCluster): + managed_cluster (google.cloud.dataproc_v1beta2.types.ManagedCluster): Optional. A cluster that is managed by the workflow. - cluster_selector (~.workflow_templates.ClusterSelector): + cluster_selector (google.cloud.dataproc_v1beta2.types.ClusterSelector): Optional. A selector that chooses target cluster for jobs based on metadata. @@ -196,9 +196,9 @@ class ManagedCluster(proto.Message): begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. - config (~.clusters.ClusterConfig): + config (google.cloud.dataproc_v1beta2.types.ClusterConfig): Required. The cluster configuration. - labels (Sequence[~.workflow_templates.ManagedCluster.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.ManagedCluster.LabelsEntry]): Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and @@ -231,7 +231,7 @@ class ClusterSelector(proto.Message): selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. - cluster_labels (Sequence[~.workflow_templates.ClusterSelector.ClusterLabelsEntry]): + cluster_labels (Sequence[google.cloud.dataproc_v1beta2.types.ClusterSelector.ClusterLabelsEntry]): Required. The cluster labels. Cluster must have all labels to match. """ @@ -258,23 +258,23 @@ class OrderedJob(proto.Message): underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. - hadoop_job (~.gcd_jobs.HadoopJob): + hadoop_job (google.cloud.dataproc_v1beta2.types.HadoopJob): Optional. Job is a Hadoop job. - spark_job (~.gcd_jobs.SparkJob): + spark_job (google.cloud.dataproc_v1beta2.types.SparkJob): Optional. Job is a Spark job. - pyspark_job (~.gcd_jobs.PySparkJob): + pyspark_job (google.cloud.dataproc_v1beta2.types.PySparkJob): Optional. Job is a PySpark job. - hive_job (~.gcd_jobs.HiveJob): + hive_job (google.cloud.dataproc_v1beta2.types.HiveJob): Optional. Job is a Hive job. - pig_job (~.gcd_jobs.PigJob): + pig_job (google.cloud.dataproc_v1beta2.types.PigJob): Optional. Job is a Pig job. - spark_r_job (~.gcd_jobs.SparkRJob): + spark_r_job (google.cloud.dataproc_v1beta2.types.SparkRJob): Optional. Job is a SparkR job. - spark_sql_job (~.gcd_jobs.SparkSqlJob): + spark_sql_job (google.cloud.dataproc_v1beta2.types.SparkSqlJob): Optional. Job is a SparkSql job. - presto_job (~.gcd_jobs.PrestoJob): + presto_job (google.cloud.dataproc_v1beta2.types.PrestoJob): Optional. Job is a Presto job. - labels (Sequence[~.workflow_templates.OrderedJob.LabelsEntry]): + labels (Sequence[google.cloud.dataproc_v1beta2.types.OrderedJob.LabelsEntry]): Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and @@ -286,7 +286,7 @@ class OrderedJob(proto.Message): [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 32 labels can be associated with a given job. - scheduling (~.gcd_jobs.JobScheduling): + scheduling (google.cloud.dataproc_v1beta2.types.JobScheduling): Optional. Job scheduling configuration. prerequisite_step_ids (Sequence[str]): Optional. The optional list of prerequisite job step_ids. If @@ -409,7 +409,7 @@ class TemplateParameter(proto.Message): description (str): Optional. Brief description of the parameter. Must not exceed 1024 characters. - validation (~.workflow_templates.ParameterValidation): + validation (google.cloud.dataproc_v1beta2.types.ParameterValidation): Optional. Validation rules to be applied to this parameter's value. """ @@ -427,9 +427,9 @@ class ParameterValidation(proto.Message): r"""Configuration for parameter validation. Attributes: - regex (~.workflow_templates.RegexValidation): + regex (google.cloud.dataproc_v1beta2.types.RegexValidation): Validation based on regular expressions. - values (~.workflow_templates.ValueValidation): + values (google.cloud.dataproc_v1beta2.types.ValueValidation): Validation based on a list of allowed values. """ @@ -487,39 +487,39 @@ class WorkflowMetadata(proto.Message): version (int): Output only. The version of template at the time of workflow instantiation. - create_cluster (~.workflow_templates.ClusterOperation): + create_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): Output only. The create cluster operation metadata. - graph (~.workflow_templates.WorkflowGraph): + graph (google.cloud.dataproc_v1beta2.types.WorkflowGraph): Output only. The workflow graph. - delete_cluster (~.workflow_templates.ClusterOperation): + delete_cluster (google.cloud.dataproc_v1beta2.types.ClusterOperation): Output only. The delete cluster operation metadata. - state (~.workflow_templates.WorkflowMetadata.State): + state (google.cloud.dataproc_v1beta2.types.WorkflowMetadata.State): Output only. The workflow state. cluster_name (str): Output only. The name of the target cluster. - parameters (Sequence[~.workflow_templates.WorkflowMetadata.ParametersEntry]): + parameters (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowMetadata.ParametersEntry]): Map from parameter names to values that were used for those parameters. - start_time (~.timestamp.Timestamp): + start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Workflow start time. - end_time (~.timestamp.Timestamp): + end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Workflow end time. cluster_uuid (str): Output only. The UUID of target cluster. - dag_timeout (~.duration.Duration): + dag_timeout (google.protobuf.duration_pb2.Duration): Output only. The timeout duration for the DAG of jobs. Minimum timeout duration is 10 minutes and maximum is 24 hours, expressed as a [google.protobuf.Duration][https://developers.google.com/protocol-buffers/docs/proto3#json_mapping]. For example, "1800" = 1800 seconds/30 minutes duration. - dag_start_time (~.timestamp.Timestamp): + dag_start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. DAG start time, which is only set for workflows with [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] when the DAG begins. - dag_end_time (~.timestamp.Timestamp): + dag_end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. DAG end time, which is only set for workflows with [dag_timeout][google.cloud.dataproc.v1beta2.WorkflowMetadata.dag_timeout] @@ -585,7 +585,7 @@ class WorkflowGraph(proto.Message): r"""The workflow graph. Attributes: - nodes (Sequence[~.workflow_templates.WorkflowNode]): + nodes (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowNode]): Output only. The workflow nodes. """ @@ -603,7 +603,7 @@ class WorkflowNode(proto.Message): job_id (str): Output only. The job id; populated after the node enters RUNNING state. - state (~.workflow_templates.WorkflowNode.NodeState): + state (google.cloud.dataproc_v1beta2.types.WorkflowNode.NodeState): Output only. The node state. error (str): Output only. The error detail. @@ -645,7 +645,7 @@ class CreateWorkflowTemplateRequest(proto.Message): - For ``projects.locations.workflowTemplates.create``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The Dataproc workflow template to create. """ @@ -721,7 +721,7 @@ class InstantiateWorkflowTemplateRequest(proto.Message): The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. - parameters (Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]): + parameters (Sequence[google.cloud.dataproc_v1beta2.types.InstantiateWorkflowTemplateRequest.ParametersEntry]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 100 characters. @@ -756,7 +756,7 @@ class InstantiateInlineWorkflowTemplateRequest(proto.Message): ``projects.locations.workflowTemplates.instantiateinline``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The workflow template to instantiate. instance_id (str): @@ -787,7 +787,7 @@ class UpdateWorkflowTemplateRequest(proto.Message): r"""A request to update a workflow template. Attributes: - template (~.workflow_templates.WorkflowTemplate): + template (google.cloud.dataproc_v1beta2.types.WorkflowTemplate): Required. The updated workflow template. The ``template.version`` field must match the current @@ -834,7 +834,7 @@ class ListWorkflowTemplatesResponse(proto.Message): project. Attributes: - templates (Sequence[~.workflow_templates.WorkflowTemplate]): + templates (Sequence[google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Output only. WorkflowTemplates list. next_page_token (str): Output only. This token is included in the response if there diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py index 92228e53..3c2a901b 100644 --- a/scripts/fixup_dataproc_v1_keywords.py +++ b/scripts/fixup_dataproc_v1_keywords.py @@ -42,7 +42,7 @@ class dataprocCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_autoscaling_policy': ('parent', 'policy_', ), 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), 'create_workflow_template': ('parent', 'template', ), 'delete_autoscaling_policy': ('name', ), @@ -62,7 +62,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), 'submit_job': ('project_id', 'region', 'job', 'request_id', ), 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy', ), + 'update_autoscaling_policy': ('policy_', ), 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), 'update_workflow_template': ('template', ), diff --git a/scripts/fixup_dataproc_v1beta2_keywords.py b/scripts/fixup_dataproc_v1beta2_keywords.py index 11f2e445..ca3056b5 100644 --- a/scripts/fixup_dataproc_v1beta2_keywords.py +++ b/scripts/fixup_dataproc_v1beta2_keywords.py @@ -42,7 +42,7 @@ class dataprocCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_autoscaling_policy': ('parent', 'policy_', ), 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), 'create_workflow_template': ('parent', 'template', ), 'delete_autoscaling_policy': ('name', ), @@ -62,7 +62,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), 'submit_job': ('project_id', 'region', 'job', 'request_id', ), 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy', ), + 'update_autoscaling_policy': ('policy_', ), 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), 'update_workflow_template': ('template', ), diff --git a/synth.metadata b/synth.metadata index 2dec65f8..c5ca6a7e 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "1b1164217295fde7a25df720d4e79d975a0ce67a" + "remote": "git@github.com:googleapis/python-dataproc.git", + "sha": "818f840fb35163a2850cfc505e2698e1eb2c0f94" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "69697504d9eba1d064820c3085b4750767be6d08", - "internalRef": "348952930" + "sha": "d78dc2e0cb627d3e48e910abf4b991264affcb56", + "internalRef": "366838867" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4501974ad08b5d693311457e2ea4ce845676e329" + "sha": "bf04d88353da12e9326236164dcb3150dfbfcff8" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4501974ad08b5d693311457e2ea4ce845676e329" + "sha": "bf04d88353da12e9326236164dcb3150dfbfcff8" } } ], @@ -49,195 +49,5 @@ "generator": "bazel" } } - ], - "generatedFiles": [ - ".flake8", - ".github/CONTRIBUTING.md", - ".github/ISSUE_TEMPLATE/bug_report.md", - ".github/ISSUE_TEMPLATE/feature_request.md", - ".github/ISSUE_TEMPLATE/support_request.md", - ".github/PULL_REQUEST_TEMPLATE.md", - ".github/header-checker-lint.yml", - ".github/release-please.yml", - ".github/snippet-bot.yml", - ".gitignore", - ".kokoro/build.sh", - ".kokoro/continuous/common.cfg", - ".kokoro/continuous/continuous.cfg", - ".kokoro/docker/docs/Dockerfile", - ".kokoro/docker/docs/fetch_gpg_keys.sh", - ".kokoro/docs/common.cfg", - ".kokoro/docs/docs-presubmit.cfg", - ".kokoro/docs/docs.cfg", - ".kokoro/populate-secrets.sh", - ".kokoro/presubmit/common.cfg", - ".kokoro/presubmit/presubmit.cfg", - ".kokoro/publish-docs.sh", - ".kokoro/release.sh", - ".kokoro/release/common.cfg", - ".kokoro/release/release.cfg", - ".kokoro/samples/lint/common.cfg", - ".kokoro/samples/lint/continuous.cfg", - ".kokoro/samples/lint/periodic.cfg", - ".kokoro/samples/lint/presubmit.cfg", - ".kokoro/samples/python3.6/common.cfg", - ".kokoro/samples/python3.6/continuous.cfg", - ".kokoro/samples/python3.6/periodic-head.cfg", - ".kokoro/samples/python3.6/periodic.cfg", - ".kokoro/samples/python3.6/presubmit.cfg", - ".kokoro/samples/python3.7/common.cfg", - ".kokoro/samples/python3.7/continuous.cfg", - ".kokoro/samples/python3.7/periodic-head.cfg", - ".kokoro/samples/python3.7/periodic.cfg", - ".kokoro/samples/python3.7/presubmit.cfg", - ".kokoro/samples/python3.8/common.cfg", - ".kokoro/samples/python3.8/continuous.cfg", - ".kokoro/samples/python3.8/periodic-head.cfg", - ".kokoro/samples/python3.8/periodic.cfg", - ".kokoro/samples/python3.8/presubmit.cfg", - ".kokoro/test-samples-against-head.sh", - ".kokoro/test-samples-impl.sh", - ".kokoro/test-samples.sh", - ".kokoro/trampoline.sh", - ".kokoro/trampoline_v2.sh", - ".pre-commit-config.yaml", - ".trampolinerc", - "CODE_OF_CONDUCT.md", - "CONTRIBUTING.rst", - "LICENSE", - "MANIFEST.in", - "docs/_static/custom.css", - "docs/_templates/layout.html", - "docs/conf.py", - "docs/dataproc_v1/services.rst", - "docs/dataproc_v1/types.rst", - "docs/dataproc_v1beta2/services.rst", - "docs/dataproc_v1beta2/types.rst", - "docs/multiprocessing.rst", - "google/cloud/dataproc/__init__.py", - "google/cloud/dataproc/py.typed", - "google/cloud/dataproc_v1/__init__.py", - "google/cloud/dataproc_v1/proto/autoscaling_policies.proto", - "google/cloud/dataproc_v1/proto/clusters.proto", - "google/cloud/dataproc_v1/proto/jobs.proto", - "google/cloud/dataproc_v1/proto/operations.proto", - "google/cloud/dataproc_v1/proto/shared.proto", - "google/cloud/dataproc_v1/proto/workflow_templates.proto", - "google/cloud/dataproc_v1/py.typed", - "google/cloud/dataproc_v1/services/__init__.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/__init__.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py", - "google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1/services/cluster_controller/__init__.py", - "google/cloud/dataproc_v1/services/cluster_controller/async_client.py", - "google/cloud/dataproc_v1/services/cluster_controller/client.py", - "google/cloud/dataproc_v1/services/cluster_controller/pagers.py", - "google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py", - "google/cloud/dataproc_v1/services/cluster_controller/transports/base.py", - "google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py", - "google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1/services/job_controller/__init__.py", - "google/cloud/dataproc_v1/services/job_controller/async_client.py", - "google/cloud/dataproc_v1/services/job_controller/client.py", - "google/cloud/dataproc_v1/services/job_controller/pagers.py", - "google/cloud/dataproc_v1/services/job_controller/transports/__init__.py", - "google/cloud/dataproc_v1/services/job_controller/transports/base.py", - "google/cloud/dataproc_v1/services/job_controller/transports/grpc.py", - "google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1/services/workflow_template_service/__init__.py", - "google/cloud/dataproc_v1/services/workflow_template_service/async_client.py", - "google/cloud/dataproc_v1/services/workflow_template_service/client.py", - "google/cloud/dataproc_v1/services/workflow_template_service/pagers.py", - "google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py", - "google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py", - "google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py", - "google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1/types/__init__.py", - "google/cloud/dataproc_v1/types/autoscaling_policies.py", - "google/cloud/dataproc_v1/types/clusters.py", - "google/cloud/dataproc_v1/types/jobs.py", - "google/cloud/dataproc_v1/types/operations.py", - "google/cloud/dataproc_v1/types/shared.py", - "google/cloud/dataproc_v1/types/workflow_templates.py", - "google/cloud/dataproc_v1beta2/__init__.py", - "google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto", - "google/cloud/dataproc_v1beta2/proto/clusters.proto", - "google/cloud/dataproc_v1beta2/proto/jobs.proto", - "google/cloud/dataproc_v1beta2/proto/operations.proto", - "google/cloud/dataproc_v1beta2/proto/shared.proto", - "google/cloud/dataproc_v1beta2/proto/workflow_templates.proto", - "google/cloud/dataproc_v1beta2/py.typed", - "google/cloud/dataproc_v1beta2/services/__init__.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py", - "google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/client.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py", - "google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1beta2/services/job_controller/__init__.py", - "google/cloud/dataproc_v1beta2/services/job_controller/async_client.py", - "google/cloud/dataproc_v1beta2/services/job_controller/client.py", - "google/cloud/dataproc_v1beta2/services/job_controller/pagers.py", - "google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py", - "google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py", - "google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py", - "google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py", - "google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py", - "google/cloud/dataproc_v1beta2/types/__init__.py", - "google/cloud/dataproc_v1beta2/types/autoscaling_policies.py", - "google/cloud/dataproc_v1beta2/types/clusters.py", - "google/cloud/dataproc_v1beta2/types/jobs.py", - "google/cloud/dataproc_v1beta2/types/operations.py", - "google/cloud/dataproc_v1beta2/types/shared.py", - "google/cloud/dataproc_v1beta2/types/workflow_templates.py", - "mypy.ini", - "noxfile.py", - "renovate.json", - "samples/AUTHORING_GUIDE.md", - "samples/CONTRIBUTING.md", - "samples/snippets/noxfile.py", - "scripts/decrypt-secrets.sh", - "scripts/fixup_dataproc_v1_keywords.py", - "scripts/fixup_dataproc_v1beta2_keywords.py", - "scripts/readme-gen/readme_gen.py", - "scripts/readme-gen/templates/README.tmpl.rst", - "scripts/readme-gen/templates/auth.tmpl.rst", - "scripts/readme-gen/templates/auth_api_key.tmpl.rst", - "scripts/readme-gen/templates/install_deps.tmpl.rst", - "scripts/readme-gen/templates/install_portaudio.tmpl.rst", - "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/dataproc_v1/__init__.py", - "tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py", - "tests/unit/gapic/dataproc_v1/test_cluster_controller.py", - "tests/unit/gapic/dataproc_v1/test_job_controller.py", - "tests/unit/gapic/dataproc_v1/test_workflow_template_service.py", - "tests/unit/gapic/dataproc_v1beta2/__init__.py", - "tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py", - "tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py", - "tests/unit/gapic/dataproc_v1beta2/test_job_controller.py", - "tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py" ] } \ No newline at end of file diff --git a/tests/unit/gapic/dataproc_v1/__init__.py b/tests/unit/gapic/dataproc_v1/__init__.py index 8b137891..42ffdf2b 100644 --- a/tests/unit/gapic/dataproc_v1/__init__.py +++ b/tests/unit/gapic/dataproc_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py index 5fabfeb8..10d4f3d2 100644 --- a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -92,7 +92,25 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( "client_class", - [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient], + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,], +) +def test_autoscaling_policy_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,], ) def test_autoscaling_policy_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -102,16 +120,21 @@ def test_autoscaling_policy_service_client_from_service_account_file(client_clas factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_autoscaling_policy_service_client_get_transport_class(): transport = AutoscalingPolicyServiceClient.get_transport_class() - assert transport == transports.AutoscalingPolicyServiceGrpcTransport + available_transports = [ + transports.AutoscalingPolicyServiceGrpcTransport, + ] + assert transport in available_transports transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") assert transport == transports.AutoscalingPolicyServiceGrpcTransport @@ -170,7 +193,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -186,7 +209,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -202,7 +225,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -230,7 +253,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -291,29 +314,25 @@ def test_autoscaling_policy_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -322,66 +341,53 @@ def test_autoscaling_policy_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -411,7 +417,7 @@ def test_autoscaling_policy_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -445,7 +451,7 @@ def test_autoscaling_policy_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -464,7 +470,7 @@ def test_autoscaling_policy_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -518,6 +524,24 @@ def test_create_autoscaling_policy_from_dict(): test_create_autoscaling_policy(request_type=dict) +def test_create_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), "__call__" + ) as call: + client.create_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_create_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -636,7 +660,7 @@ def test_create_autoscaling_policy_flattened(): # using the keyword arguments to the method. client.create_autoscaling_policy( parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -646,7 +670,7 @@ def test_create_autoscaling_policy_flattened(): assert args[0].parent == "parent_value" - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_create_autoscaling_policy_flattened_error(): @@ -660,7 +684,7 @@ def test_create_autoscaling_policy_flattened_error(): client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -684,7 +708,7 @@ async def test_create_autoscaling_policy_flattened_async(): # using the keyword arguments to the method. response = await client.create_autoscaling_policy( parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -694,7 +718,7 @@ async def test_create_autoscaling_policy_flattened_async(): assert args[0].parent == "parent_value" - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -709,7 +733,7 @@ async def test_create_autoscaling_policy_flattened_error_async(): await client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -761,6 +785,24 @@ def test_update_autoscaling_policy_from_dict(): test_update_autoscaling_policy(request_type=dict) +def test_update_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), "__call__" + ) as call: + client.update_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_update_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -878,7 +920,7 @@ def test_update_autoscaling_policy_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -886,7 +928,7 @@ def test_update_autoscaling_policy_flattened(): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_update_autoscaling_policy_flattened_error(): @@ -899,7 +941,7 @@ def test_update_autoscaling_policy_flattened_error(): with pytest.raises(ValueError): client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -922,7 +964,7 @@ async def test_update_autoscaling_policy_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -930,7 +972,7 @@ async def test_update_autoscaling_policy_flattened_async(): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -944,7 +986,7 @@ async def test_update_autoscaling_policy_flattened_error_async(): with pytest.raises(ValueError): await client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -996,6 +1038,24 @@ def test_get_autoscaling_policy_from_dict(): test_get_autoscaling_policy(request_type=dict) +def test_get_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), "__call__" + ) as call: + client.get_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_get_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -1217,6 +1277,24 @@ def test_list_autoscaling_policies_from_dict(): test_list_autoscaling_policies(request_type=dict) +def test_list_autoscaling_policies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), "__call__" + ) as call: + client.list_autoscaling_policies() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + @pytest.mark.asyncio async def test_list_autoscaling_policies_async( transport: str = "grpc_asyncio", @@ -1617,6 +1695,24 @@ def test_delete_autoscaling_policy_from_dict(): test_delete_autoscaling_policy(request_type=dict) +def test_delete_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), "__call__" + ) as call: + client.delete_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_delete_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -1955,6 +2051,53 @@ def test_autoscaling_policy_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_autoscaling_policy_service_host_no_port(): client = AutoscalingPolicyServiceClient( credentials=credentials.AnonymousCredentials(), @@ -1976,7 +2119,7 @@ def test_autoscaling_policy_service_host_with_port(): def test_autoscaling_policy_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoscalingPolicyServiceGrpcTransport( @@ -1988,7 +2131,7 @@ def test_autoscaling_policy_service_grpc_transport_channel(): def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( @@ -1999,6 +2142,8 @@ def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2013,7 +2158,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -2051,6 +2196,8 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2066,7 +2213,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index 6d36f46b..e12e0109 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -97,7 +97,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [ClusterControllerClient, ClusterControllerAsyncClient] + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient,] +) +def test_cluster_controller_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient,] ) def test_cluster_controller_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -107,16 +124,21 @@ def test_cluster_controller_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_cluster_controller_client_get_transport_class(): transport = ClusterControllerClient.get_transport_class() - assert transport == transports.ClusterControllerGrpcTransport + available_transports = [ + transports.ClusterControllerGrpcTransport, + ] + assert transport in available_transports transport = ClusterControllerClient.get_transport_class("grpc") assert transport == transports.ClusterControllerGrpcTransport @@ -167,7 +189,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -183,7 +205,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -199,7 +221,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -227,7 +249,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -288,29 +310,25 @@ def test_cluster_controller_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -319,66 +337,53 @@ def test_cluster_controller_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -404,7 +409,7 @@ def test_cluster_controller_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -434,7 +439,7 @@ def test_cluster_controller_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -453,7 +458,7 @@ def test_cluster_controller_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -491,6 +496,22 @@ def test_create_cluster_from_dict(): test_create_cluster(request_type=dict) +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.CreateClusterRequest() + + @pytest.mark.asyncio async def test_create_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.CreateClusterRequest @@ -652,6 +673,22 @@ def test_update_cluster_from_dict(): test_update_cluster(request_type=dict) +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.UpdateClusterRequest() + + @pytest.mark.asyncio async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.UpdateClusterRequest @@ -829,6 +866,22 @@ def test_delete_cluster_from_dict(): test_delete_cluster(request_type=dict) +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DeleteClusterRequest() + + @pytest.mark.asyncio async def test_delete_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.DeleteClusterRequest @@ -999,6 +1052,22 @@ def test_get_cluster_from_dict(): test_get_cluster(request_type=dict) +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.GetClusterRequest() + + @pytest.mark.asyncio async def test_get_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.GetClusterRequest @@ -1173,6 +1242,22 @@ def test_list_clusters_from_dict(): test_list_clusters(request_type=dict) +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.ListClustersRequest() + + @pytest.mark.asyncio async def test_list_clusters_async( transport: str = "grpc_asyncio", request_type=clusters.ListClustersRequest @@ -1451,6 +1536,22 @@ def test_diagnose_cluster_from_dict(): test_diagnose_cluster(request_type=dict) +def test_diagnose_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.diagnose_cluster), "__call__") as call: + client.diagnose_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DiagnoseClusterRequest() + + @pytest.mark.asyncio async def test_diagnose_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.DiagnoseClusterRequest @@ -1749,6 +1850,51 @@ def test_cluster_controller_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +def test_cluster_controller_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_cluster_controller_host_no_port(): client = ClusterControllerClient( credentials=credentials.AnonymousCredentials(), @@ -1770,7 +1916,7 @@ def test_cluster_controller_host_with_port(): def test_cluster_controller_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ClusterControllerGrpcTransport( @@ -1782,7 +1928,7 @@ def test_cluster_controller_grpc_transport_channel(): def test_cluster_controller_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ClusterControllerGrpcAsyncIOTransport( @@ -1793,6 +1939,8 @@ def test_cluster_controller_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1807,7 +1955,7 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1845,6 +1993,8 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1860,7 +2010,7 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_job_controller.py b/tests/unit/gapic/dataproc_v1/test_job_controller.py index 5529d6fa..8ba7c041 100644 --- a/tests/unit/gapic/dataproc_v1/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_job_controller.py @@ -91,7 +91,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [JobControllerClient, JobControllerAsyncClient] + "client_class", [JobControllerClient, JobControllerAsyncClient,] +) +def test_job_controller_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [JobControllerClient, JobControllerAsyncClient,] ) def test_job_controller_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -101,16 +118,21 @@ def test_job_controller_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_job_controller_client_get_transport_class(): transport = JobControllerClient.get_transport_class() - assert transport == transports.JobControllerGrpcTransport + available_transports = [ + transports.JobControllerGrpcTransport, + ] + assert transport in available_transports transport = JobControllerClient.get_transport_class("grpc") assert transport == transports.JobControllerGrpcTransport @@ -161,7 +183,7 @@ def test_job_controller_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -177,7 +199,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -193,7 +215,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -221,7 +243,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -272,29 +294,25 @@ def test_job_controller_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -303,66 +321,53 @@ def test_job_controller_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -388,7 +393,7 @@ def test_job_controller_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -418,7 +423,7 @@ def test_job_controller_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -437,7 +442,7 @@ def test_job_controller_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -488,6 +493,22 @@ def test_submit_job_from_dict(): test_submit_job(request_type=dict) +def test_submit_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.submit_job), "__call__") as call: + client.submit_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + @pytest.mark.asyncio async def test_submit_job_async( transport: str = "grpc_asyncio", request_type=jobs.SubmitJobRequest @@ -662,6 +683,24 @@ def test_submit_job_as_operation_from_dict(): test_submit_job_as_operation(request_type=dict) +def test_submit_job_as_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), "__call__" + ) as call: + client.submit_job_as_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + @pytest.mark.asyncio async def test_submit_job_as_operation_async( transport: str = "grpc_asyncio", request_type=jobs.SubmitJobRequest @@ -842,6 +881,22 @@ def test_get_job_from_dict(): test_get_job(request_type=dict) +def test_get_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_job), "__call__") as call: + client.get_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.GetJobRequest() + + @pytest.mark.asyncio async def test_get_job_async( transport: str = "grpc_asyncio", request_type=jobs.GetJobRequest @@ -1009,6 +1064,22 @@ def test_list_jobs_from_dict(): test_list_jobs(request_type=dict) +def test_list_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_jobs), "__call__") as call: + client.list_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.ListJobsRequest() + + @pytest.mark.asyncio async def test_list_jobs_async( transport: str = "grpc_asyncio", request_type=jobs.ListJobsRequest @@ -1276,6 +1347,22 @@ def test_update_job_from_dict(): test_update_job(request_type=dict) +def test_update_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_job), "__call__") as call: + client.update_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.UpdateJobRequest() + + @pytest.mark.asyncio async def test_update_job_async( transport: str = "grpc_asyncio", request_type=jobs.UpdateJobRequest @@ -1370,6 +1457,22 @@ def test_cancel_job_from_dict(): test_cancel_job(request_type=dict) +def test_cancel_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_job), "__call__") as call: + client.cancel_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.CancelJobRequest() + + @pytest.mark.asyncio async def test_cancel_job_async( transport: str = "grpc_asyncio", request_type=jobs.CancelJobRequest @@ -1532,6 +1635,22 @@ def test_delete_job_from_dict(): test_delete_job(request_type=dict) +def test_delete_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_job), "__call__") as call: + client.delete_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.DeleteJobRequest() + + @pytest.mark.asyncio async def test_delete_job_async( transport: str = "grpc_asyncio", request_type=jobs.DeleteJobRequest @@ -1819,6 +1938,51 @@ def test_job_controller_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +def test_job_controller_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_job_controller_host_no_port(): client = JobControllerClient( credentials=credentials.AnonymousCredentials(), @@ -1840,7 +2004,7 @@ def test_job_controller_host_with_port(): def test_job_controller_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.JobControllerGrpcTransport( @@ -1852,7 +2016,7 @@ def test_job_controller_grpc_transport_channel(): def test_job_controller_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.JobControllerGrpcAsyncIOTransport( @@ -1863,6 +2027,8 @@ def test_job_controller_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1875,7 +2041,7 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1913,6 +2079,8 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1928,7 +2096,7 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py index 2f171a18..379887a9 100644 --- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -99,7 +99,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient] + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient,] +) +def test_workflow_template_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient,] ) def test_workflow_template_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -109,16 +126,21 @@ def test_workflow_template_service_client_from_service_account_file(client_class factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_workflow_template_service_client_get_transport_class(): transport = WorkflowTemplateServiceClient.get_transport_class() - assert transport == transports.WorkflowTemplateServiceGrpcTransport + available_transports = [ + transports.WorkflowTemplateServiceGrpcTransport, + ] + assert transport in available_transports transport = WorkflowTemplateServiceClient.get_transport_class("grpc") assert transport == transports.WorkflowTemplateServiceGrpcTransport @@ -173,7 +195,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -189,7 +211,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -205,7 +227,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -233,7 +255,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -294,29 +316,25 @@ def test_workflow_template_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -325,66 +343,53 @@ def test_workflow_template_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -414,7 +419,7 @@ def test_workflow_template_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -448,7 +453,7 @@ def test_workflow_template_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -467,7 +472,7 @@ def test_workflow_template_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -517,6 +522,24 @@ def test_create_workflow_template_from_dict(): test_create_workflow_template(request_type=dict) +def test_create_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), "__call__" + ) as call: + client.create_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_create_workflow_template_async( transport: str = "grpc_asyncio", @@ -759,6 +782,24 @@ def test_get_workflow_template_from_dict(): test_get_workflow_template(request_type=dict) +def test_get_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), "__call__" + ) as call: + client.get_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_get_workflow_template_async( transport: str = "grpc_asyncio", @@ -979,6 +1020,24 @@ def test_instantiate_workflow_template_from_dict(): test_instantiate_workflow_template(request_type=dict) +def test_instantiate_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), "__call__" + ) as call: + client.instantiate_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_instantiate_workflow_template_async( transport: str = "grpc_asyncio", @@ -1203,6 +1262,24 @@ def test_instantiate_inline_workflow_template_from_dict(): test_instantiate_inline_workflow_template(request_type=dict) +def test_instantiate_inline_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), "__call__" + ) as call: + client.instantiate_inline_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_instantiate_inline_workflow_template_async( transport: str = "grpc_asyncio", @@ -1438,6 +1515,24 @@ def test_update_workflow_template_from_dict(): test_update_workflow_template(request_type=dict) +def test_update_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), "__call__" + ) as call: + client.update_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_update_workflow_template_async( transport: str = "grpc_asyncio", @@ -1673,6 +1768,24 @@ def test_list_workflow_templates_from_dict(): test_list_workflow_templates(request_type=dict) +def test_list_workflow_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), "__call__" + ) as call: + client.list_workflow_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + @pytest.mark.asyncio async def test_list_workflow_templates_async( transport: str = "grpc_asyncio", @@ -2069,6 +2182,24 @@ def test_delete_workflow_template_from_dict(): test_delete_workflow_template(request_type=dict) +def test_delete_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), "__call__" + ) as call: + client.delete_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_delete_workflow_template_async( transport: str = "grpc_asyncio", @@ -2414,6 +2545,53 @@ def test_workflow_template_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_workflow_template_service_host_no_port(): client = WorkflowTemplateServiceClient( credentials=credentials.AnonymousCredentials(), @@ -2435,7 +2613,7 @@ def test_workflow_template_service_host_with_port(): def test_workflow_template_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.WorkflowTemplateServiceGrpcTransport( @@ -2447,7 +2625,7 @@ def test_workflow_template_service_grpc_transport_channel(): def test_workflow_template_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( @@ -2458,6 +2636,8 @@ def test_workflow_template_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2472,7 +2652,7 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -2510,6 +2690,8 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2525,7 +2707,7 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/__init__.py b/tests/unit/gapic/dataproc_v1beta2/__init__.py index 8b137891..42ffdf2b 100644 --- a/tests/unit/gapic/dataproc_v1beta2/__init__.py +++ b/tests/unit/gapic/dataproc_v1beta2/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py index 1f24d213..87752c94 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -92,7 +92,25 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( "client_class", - [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient], + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,], +) +def test_autoscaling_policy_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient,], ) def test_autoscaling_policy_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -102,16 +120,21 @@ def test_autoscaling_policy_service_client_from_service_account_file(client_clas factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_autoscaling_policy_service_client_get_transport_class(): transport = AutoscalingPolicyServiceClient.get_transport_class() - assert transport == transports.AutoscalingPolicyServiceGrpcTransport + available_transports = [ + transports.AutoscalingPolicyServiceGrpcTransport, + ] + assert transport in available_transports transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") assert transport == transports.AutoscalingPolicyServiceGrpcTransport @@ -170,7 +193,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -186,7 +209,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -202,7 +225,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -230,7 +253,7 @@ def test_autoscaling_policy_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -291,29 +314,25 @@ def test_autoscaling_policy_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -322,66 +341,53 @@ def test_autoscaling_policy_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -411,7 +417,7 @@ def test_autoscaling_policy_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -445,7 +451,7 @@ def test_autoscaling_policy_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -464,7 +470,7 @@ def test_autoscaling_policy_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -518,6 +524,24 @@ def test_create_autoscaling_policy_from_dict(): test_create_autoscaling_policy(request_type=dict) +def test_create_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_autoscaling_policy), "__call__" + ) as call: + client.create_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_create_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -636,7 +660,7 @@ def test_create_autoscaling_policy_flattened(): # using the keyword arguments to the method. client.create_autoscaling_policy( parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -646,7 +670,7 @@ def test_create_autoscaling_policy_flattened(): assert args[0].parent == "parent_value" - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_create_autoscaling_policy_flattened_error(): @@ -660,7 +684,7 @@ def test_create_autoscaling_policy_flattened_error(): client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -684,7 +708,7 @@ async def test_create_autoscaling_policy_flattened_async(): # using the keyword arguments to the method. response = await client.create_autoscaling_policy( parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -694,7 +718,7 @@ async def test_create_autoscaling_policy_flattened_async(): assert args[0].parent == "parent_value" - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -709,7 +733,7 @@ async def test_create_autoscaling_policy_flattened_error_async(): await client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -761,6 +785,24 @@ def test_update_autoscaling_policy_from_dict(): test_update_autoscaling_policy(request_type=dict) +def test_update_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_autoscaling_policy), "__call__" + ) as call: + client.update_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_update_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -878,7 +920,7 @@ def test_update_autoscaling_policy_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -886,7 +928,7 @@ def test_update_autoscaling_policy_flattened(): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_update_autoscaling_policy_flattened_error(): @@ -899,7 +941,7 @@ def test_update_autoscaling_policy_flattened_error(): with pytest.raises(ValueError): client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -922,7 +964,7 @@ async def test_update_autoscaling_policy_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_autoscaling_policy( - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -930,7 +972,7 @@ async def test_update_autoscaling_policy_flattened_async(): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -944,7 +986,7 @@ async def test_update_autoscaling_policy_flattened_error_async(): with pytest.raises(ValueError): await client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -996,6 +1038,24 @@ def test_get_autoscaling_policy_from_dict(): test_get_autoscaling_policy(request_type=dict) +def test_get_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_autoscaling_policy), "__call__" + ) as call: + client.get_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_get_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -1217,6 +1277,24 @@ def test_list_autoscaling_policies_from_dict(): test_list_autoscaling_policies(request_type=dict) +def test_list_autoscaling_policies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_autoscaling_policies), "__call__" + ) as call: + client.list_autoscaling_policies() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + @pytest.mark.asyncio async def test_list_autoscaling_policies_async( transport: str = "grpc_asyncio", @@ -1617,6 +1695,24 @@ def test_delete_autoscaling_policy_from_dict(): test_delete_autoscaling_policy(request_type=dict) +def test_delete_autoscaling_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_autoscaling_policy), "__call__" + ) as call: + client.delete_autoscaling_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + @pytest.mark.asyncio async def test_delete_autoscaling_policy_async( transport: str = "grpc_asyncio", @@ -1955,6 +2051,53 @@ def test_autoscaling_policy_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.AutoscalingPolicyServiceGrpcTransport, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + ], +) +def test_autoscaling_policy_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_autoscaling_policy_service_host_no_port(): client = AutoscalingPolicyServiceClient( credentials=credentials.AnonymousCredentials(), @@ -1976,7 +2119,7 @@ def test_autoscaling_policy_service_host_with_port(): def test_autoscaling_policy_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoscalingPolicyServiceGrpcTransport( @@ -1988,7 +2131,7 @@ def test_autoscaling_policy_service_grpc_transport_channel(): def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( @@ -1999,6 +2142,8 @@ def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2013,7 +2158,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -2051,6 +2196,8 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_client_cert_sour assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2066,7 +2213,7 @@ def test_autoscaling_policy_service_transport_channel_mtls_with_adc(transport_cl ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py index 863598c8..91f7f5a7 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py @@ -99,7 +99,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [ClusterControllerClient, ClusterControllerAsyncClient] + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient,] +) +def test_cluster_controller_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient,] ) def test_cluster_controller_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -109,16 +126,21 @@ def test_cluster_controller_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_cluster_controller_client_get_transport_class(): transport = ClusterControllerClient.get_transport_class() - assert transport == transports.ClusterControllerGrpcTransport + available_transports = [ + transports.ClusterControllerGrpcTransport, + ] + assert transport in available_transports transport = ClusterControllerClient.get_transport_class("grpc") assert transport == transports.ClusterControllerGrpcTransport @@ -169,7 +191,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -185,7 +207,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -201,7 +223,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -229,7 +251,7 @@ def test_cluster_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -290,29 +312,25 @@ def test_cluster_controller_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -321,66 +339,53 @@ def test_cluster_controller_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -406,7 +411,7 @@ def test_cluster_controller_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -436,7 +441,7 @@ def test_cluster_controller_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -455,7 +460,7 @@ def test_cluster_controller_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -493,6 +498,22 @@ def test_create_cluster_from_dict(): test_create_cluster(request_type=dict) +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.CreateClusterRequest() + + @pytest.mark.asyncio async def test_create_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.CreateClusterRequest @@ -654,6 +675,22 @@ def test_update_cluster_from_dict(): test_update_cluster(request_type=dict) +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.UpdateClusterRequest() + + @pytest.mark.asyncio async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.UpdateClusterRequest @@ -831,6 +868,22 @@ def test_delete_cluster_from_dict(): test_delete_cluster(request_type=dict) +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DeleteClusterRequest() + + @pytest.mark.asyncio async def test_delete_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.DeleteClusterRequest @@ -1001,6 +1054,22 @@ def test_get_cluster_from_dict(): test_get_cluster(request_type=dict) +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.GetClusterRequest() + + @pytest.mark.asyncio async def test_get_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.GetClusterRequest @@ -1175,6 +1244,22 @@ def test_list_clusters_from_dict(): test_list_clusters(request_type=dict) +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.ListClustersRequest() + + @pytest.mark.asyncio async def test_list_clusters_async( transport: str = "grpc_asyncio", request_type=clusters.ListClustersRequest @@ -1453,6 +1538,22 @@ def test_diagnose_cluster_from_dict(): test_diagnose_cluster(request_type=dict) +def test_diagnose_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.diagnose_cluster), "__call__") as call: + client.diagnose_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DiagnoseClusterRequest() + + @pytest.mark.asyncio async def test_diagnose_cluster_async( transport: str = "grpc_asyncio", request_type=clusters.DiagnoseClusterRequest @@ -1751,6 +1852,51 @@ def test_cluster_controller_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterControllerGrpcTransport, + transports.ClusterControllerGrpcAsyncIOTransport, + ], +) +def test_cluster_controller_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_cluster_controller_host_no_port(): client = ClusterControllerClient( credentials=credentials.AnonymousCredentials(), @@ -1772,7 +1918,7 @@ def test_cluster_controller_host_with_port(): def test_cluster_controller_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ClusterControllerGrpcTransport( @@ -1784,7 +1930,7 @@ def test_cluster_controller_grpc_transport_channel(): def test_cluster_controller_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ClusterControllerGrpcAsyncIOTransport( @@ -1795,6 +1941,8 @@ def test_cluster_controller_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1809,7 +1957,7 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1847,6 +1995,8 @@ def test_cluster_controller_transport_channel_mtls_with_client_cert_source( assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1862,7 +2012,7 @@ def test_cluster_controller_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py index d9b0e661..fa894d54 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py @@ -93,7 +93,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [JobControllerClient, JobControllerAsyncClient] + "client_class", [JobControllerClient, JobControllerAsyncClient,] +) +def test_job_controller_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [JobControllerClient, JobControllerAsyncClient,] ) def test_job_controller_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -103,16 +120,21 @@ def test_job_controller_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_job_controller_client_get_transport_class(): transport = JobControllerClient.get_transport_class() - assert transport == transports.JobControllerGrpcTransport + available_transports = [ + transports.JobControllerGrpcTransport, + ] + assert transport in available_transports transport = JobControllerClient.get_transport_class("grpc") assert transport == transports.JobControllerGrpcTransport @@ -163,7 +185,7 @@ def test_job_controller_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -179,7 +201,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -195,7 +217,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -223,7 +245,7 @@ def test_job_controller_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -274,29 +296,25 @@ def test_job_controller_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -305,66 +323,53 @@ def test_job_controller_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -390,7 +395,7 @@ def test_job_controller_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -420,7 +425,7 @@ def test_job_controller_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -439,7 +444,7 @@ def test_job_controller_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -493,6 +498,22 @@ def test_submit_job_from_dict(): test_submit_job(request_type=dict) +def test_submit_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.submit_job), "__call__") as call: + client.submit_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + @pytest.mark.asyncio async def test_submit_job_async( transport: str = "grpc_asyncio", request_type=jobs.SubmitJobRequest @@ -670,6 +691,24 @@ def test_submit_job_as_operation_from_dict(): test_submit_job_as_operation(request_type=dict) +def test_submit_job_as_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.submit_job_as_operation), "__call__" + ) as call: + client.submit_job_as_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + @pytest.mark.asyncio async def test_submit_job_as_operation_async( transport: str = "grpc_asyncio", request_type=jobs.SubmitJobRequest @@ -853,6 +892,22 @@ def test_get_job_from_dict(): test_get_job(request_type=dict) +def test_get_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_job), "__call__") as call: + client.get_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.GetJobRequest() + + @pytest.mark.asyncio async def test_get_job_async( transport: str = "grpc_asyncio", request_type=jobs.GetJobRequest @@ -1023,6 +1078,22 @@ def test_list_jobs_from_dict(): test_list_jobs(request_type=dict) +def test_list_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_jobs), "__call__") as call: + client.list_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.ListJobsRequest() + + @pytest.mark.asyncio async def test_list_jobs_async( transport: str = "grpc_asyncio", request_type=jobs.ListJobsRequest @@ -1293,6 +1364,22 @@ def test_update_job_from_dict(): test_update_job(request_type=dict) +def test_update_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_job), "__call__") as call: + client.update_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.UpdateJobRequest() + + @pytest.mark.asyncio async def test_update_job_async( transport: str = "grpc_asyncio", request_type=jobs.UpdateJobRequest @@ -1393,6 +1480,22 @@ def test_cancel_job_from_dict(): test_cancel_job(request_type=dict) +def test_cancel_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_job), "__call__") as call: + client.cancel_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.CancelJobRequest() + + @pytest.mark.asyncio async def test_cancel_job_async( transport: str = "grpc_asyncio", request_type=jobs.CancelJobRequest @@ -1558,6 +1661,22 @@ def test_delete_job_from_dict(): test_delete_job(request_type=dict) +def test_delete_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_job), "__call__") as call: + client.delete_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.DeleteJobRequest() + + @pytest.mark.asyncio async def test_delete_job_async( transport: str = "grpc_asyncio", request_type=jobs.DeleteJobRequest @@ -1845,6 +1964,51 @@ def test_job_controller_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobControllerGrpcTransport, + transports.JobControllerGrpcAsyncIOTransport, + ], +) +def test_job_controller_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_job_controller_host_no_port(): client = JobControllerClient( credentials=credentials.AnonymousCredentials(), @@ -1866,7 +2030,7 @@ def test_job_controller_host_with_port(): def test_job_controller_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.JobControllerGrpcTransport( @@ -1878,7 +2042,7 @@ def test_job_controller_grpc_transport_channel(): def test_job_controller_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.JobControllerGrpcAsyncIOTransport( @@ -1889,6 +2053,8 @@ def test_job_controller_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1901,7 +2067,7 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1939,6 +2105,8 @@ def test_job_controller_transport_channel_mtls_with_client_cert_source(transport assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1954,7 +2122,7 @@ def test_job_controller_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py index 00e5a115..4d466aa5 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py @@ -99,7 +99,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient] + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient,] +) +def test_workflow_template_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "dataproc.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient,] ) def test_workflow_template_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -109,16 +126,21 @@ def test_workflow_template_service_client_from_service_account_file(client_class factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "dataproc.googleapis.com:443" def test_workflow_template_service_client_get_transport_class(): transport = WorkflowTemplateServiceClient.get_transport_class() - assert transport == transports.WorkflowTemplateServiceGrpcTransport + available_transports = [ + transports.WorkflowTemplateServiceGrpcTransport, + ] + assert transport in available_transports transport = WorkflowTemplateServiceClient.get_transport_class("grpc") assert transport == transports.WorkflowTemplateServiceGrpcTransport @@ -173,7 +195,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -189,7 +211,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -205,7 +227,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -233,7 +255,7 @@ def test_workflow_template_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -294,29 +316,25 @@ def test_workflow_template_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -325,66 +343,53 @@ def test_workflow_template_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -414,7 +419,7 @@ def test_workflow_template_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -448,7 +453,7 @@ def test_workflow_template_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -467,7 +472,7 @@ def test_workflow_template_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -517,6 +522,24 @@ def test_create_workflow_template_from_dict(): test_create_workflow_template(request_type=dict) +def test_create_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_workflow_template), "__call__" + ) as call: + client.create_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_create_workflow_template_async( transport: str = "grpc_asyncio", @@ -759,6 +782,24 @@ def test_get_workflow_template_from_dict(): test_get_workflow_template(request_type=dict) +def test_get_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_workflow_template), "__call__" + ) as call: + client.get_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_get_workflow_template_async( transport: str = "grpc_asyncio", @@ -979,6 +1020,24 @@ def test_instantiate_workflow_template_from_dict(): test_instantiate_workflow_template(request_type=dict) +def test_instantiate_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_workflow_template), "__call__" + ) as call: + client.instantiate_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_instantiate_workflow_template_async( transport: str = "grpc_asyncio", @@ -1203,6 +1262,24 @@ def test_instantiate_inline_workflow_template_from_dict(): test_instantiate_inline_workflow_template(request_type=dict) +def test_instantiate_inline_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.instantiate_inline_workflow_template), "__call__" + ) as call: + client.instantiate_inline_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_instantiate_inline_workflow_template_async( transport: str = "grpc_asyncio", @@ -1438,6 +1515,24 @@ def test_update_workflow_template_from_dict(): test_update_workflow_template(request_type=dict) +def test_update_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_workflow_template), "__call__" + ) as call: + client.update_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_update_workflow_template_async( transport: str = "grpc_asyncio", @@ -1673,6 +1768,24 @@ def test_list_workflow_templates_from_dict(): test_list_workflow_templates(request_type=dict) +def test_list_workflow_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_workflow_templates), "__call__" + ) as call: + client.list_workflow_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + @pytest.mark.asyncio async def test_list_workflow_templates_async( transport: str = "grpc_asyncio", @@ -2069,6 +2182,24 @@ def test_delete_workflow_template_from_dict(): test_delete_workflow_template(request_type=dict) +def test_delete_workflow_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_workflow_template), "__call__" + ) as call: + client.delete_workflow_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + @pytest.mark.asyncio async def test_delete_workflow_template_async( transport: str = "grpc_asyncio", @@ -2414,6 +2545,53 @@ def test_workflow_template_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.WorkflowTemplateServiceGrpcTransport, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + ], +) +def test_workflow_template_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_workflow_template_service_host_no_port(): client = WorkflowTemplateServiceClient( credentials=credentials.AnonymousCredentials(), @@ -2435,7 +2613,7 @@ def test_workflow_template_service_host_with_port(): def test_workflow_template_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.WorkflowTemplateServiceGrpcTransport( @@ -2447,7 +2625,7 @@ def test_workflow_template_service_grpc_transport_channel(): def test_workflow_template_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( @@ -2458,6 +2636,8 @@ def test_workflow_template_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2472,7 +2652,7 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -2510,6 +2690,8 @@ def test_workflow_template_service_transport_channel_mtls_with_client_cert_sourc assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -2525,7 +2707,7 @@ def test_workflow_template_service_transport_channel_mtls_with_adc(transport_cla ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel From 0526f48a7804e774ea09f39223941b9daa811e06 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Mon, 5 Apr 2021 21:35:43 +0000 Subject: [PATCH 20/20] fix: rename `policy_` to `policy` --- .../async_client.py | 24 +++++++++---------- .../autoscaling_policy_service/client.py | 24 +++++++++---------- .../dataproc_v1/types/autoscaling_policies.py | 8 +++---- .../async_client.py | 24 +++++++++---------- .../autoscaling_policy_service/client.py | 24 +++++++++---------- .../types/autoscaling_policies.py | 8 +++---- noxfile.py | 8 +++---- scripts/fixup_dataproc_v1_keywords.py | 4 ++-- scripts/fixup_dataproc_v1beta2_keywords.py | 4 ++-- synth.metadata | 6 ++--- synth.py | 6 ++--- .../test_autoscaling_policy_service.py | 24 +++++++++---------- .../test_autoscaling_policy_service.py | 24 +++++++++---------- 13 files changed, 93 insertions(+), 95 deletions(-) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py index 5508ec26..52d43220 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py @@ -185,7 +185,7 @@ async def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -214,11 +214,11 @@ async def create_autoscaling_policy( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy_ (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): + policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): Required. The autoscaling policy to create. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -237,7 +237,7 @@ async def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy_]) + has_flattened_params = any([parent, policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -251,8 +251,8 @@ async def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -278,7 +278,7 @@ async def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -292,11 +292,11 @@ async def update_autoscaling_policy( request (:class:`google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest`): The request object. A request to update an autoscaling policy. - policy_ (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): + policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): Required. The updated autoscaling policy. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -315,7 +315,7 @@ async def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy_]) + has_flattened_params = any([policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -327,8 +327,8 @@ async def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py index 5a6d1151..83bce7d3 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py @@ -354,7 +354,7 @@ def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -383,11 +383,11 @@ def create_autoscaling_policy( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The autoscaling policy to create. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -406,7 +406,7 @@ def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy_]) + has_flattened_params = any([parent, policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -425,8 +425,8 @@ def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -450,7 +450,7 @@ def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -464,11 +464,11 @@ def update_autoscaling_policy( request (google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest): The request object. A request to update an autoscaling policy. - policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The updated autoscaling policy. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -487,7 +487,7 @@ def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy_]) + has_flattened_params = any([policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -504,8 +504,8 @@ def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/dataproc_v1/types/autoscaling_policies.py b/google/cloud/dataproc_v1/types/autoscaling_policies.py index 84259027..7fa0779f 100644 --- a/google/cloud/dataproc_v1/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1/types/autoscaling_policies.py @@ -244,13 +244,13 @@ class CreateAutoscalingPolicyRequest(proto.Message): - For ``projects.locations.autoscalingPolicies.create``, the resource name of the location has the following format: ``projects/{project_id}/locations/{location}`` - policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The autoscaling policy to create. """ parent = proto.Field(proto.STRING, number=1) - policy_ = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) + policy = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) class GetAutoscalingPolicyRequest(proto.Message): @@ -278,11 +278,11 @@ class UpdateAutoscalingPolicyRequest(proto.Message): r"""A request to update an autoscaling policy. Attributes: - policy_ (google.cloud.dataproc_v1.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1.types.AutoscalingPolicy): Required. The updated autoscaling policy. """ - policy_ = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) + policy = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) class DeleteAutoscalingPolicyRequest(proto.Message): diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py index cfc8452e..5b254961 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py @@ -185,7 +185,7 @@ async def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -213,11 +213,11 @@ async def create_autoscaling_policy( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy_ (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): + policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): Required. The autoscaling policy to create. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -236,7 +236,7 @@ async def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy_]) + has_flattened_params = any([parent, policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -250,8 +250,8 @@ async def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -277,7 +277,7 @@ async def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -291,11 +291,11 @@ async def update_autoscaling_policy( request (:class:`google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest`): The request object. A request to update an autoscaling policy. - policy_ (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): + policy (:class:`google.cloud.dataproc_v1beta2.types.AutoscalingPolicy`): Required. The updated autoscaling policy. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -314,7 +314,7 @@ async def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy_]) + has_flattened_params = any([policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -326,8 +326,8 @@ async def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py index aad6370c..d7ac0e31 100644 --- a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py @@ -354,7 +354,7 @@ def create_autoscaling_policy( request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, *, parent: str = None, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -382,11 +382,11 @@ def create_autoscaling_policy( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The autoscaling policy to create. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -405,7 +405,7 @@ def create_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, policy_]) + has_flattened_params = any([parent, policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -424,8 +424,8 @@ def create_autoscaling_policy( if parent is not None: request.parent = parent - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -449,7 +449,7 @@ def update_autoscaling_policy( self, request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, *, - policy_: autoscaling_policies.AutoscalingPolicy = None, + policy: autoscaling_policies.AutoscalingPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -463,11 +463,11 @@ def update_autoscaling_policy( request (google.cloud.dataproc_v1beta2.types.UpdateAutoscalingPolicyRequest): The request object. A request to update an autoscaling policy. - policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The updated autoscaling policy. - This corresponds to the ``policy_`` field + This corresponds to the ``policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -486,7 +486,7 @@ def update_autoscaling_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([policy_]) + has_flattened_params = any([policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -503,8 +503,8 @@ def update_autoscaling_policy( # If we have keyword arguments corresponding to fields on the # request, apply these. - if policy_ is not None: - request.policy_ = policy_ + if policy is not None: + request.policy = policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py index b52a838f..ccc13b13 100644 --- a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py @@ -244,13 +244,13 @@ class CreateAutoscalingPolicyRequest(proto.Message): - For ``projects.locations.autoscalingPolicies.create``, the resource name has the following format: ``projects/{project_id}/locations/{location}`` - policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The autoscaling policy to create. """ parent = proto.Field(proto.STRING, number=1) - policy_ = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) + policy = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",) class GetAutoscalingPolicyRequest(proto.Message): @@ -278,11 +278,11 @@ class UpdateAutoscalingPolicyRequest(proto.Message): r"""A request to update an autoscaling policy. Attributes: - policy_ (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): + policy (google.cloud.dataproc_v1beta2.types.AutoscalingPolicy): Required. The updated autoscaling policy. """ - policy_ = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) + policy = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",) class DeleteAutoscalingPolicyRequest(proto.Message): diff --git a/noxfile.py b/noxfile.py index c6bb3a90..d3524e26 100644 --- a/noxfile.py +++ b/noxfile.py @@ -179,7 +179,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=98") + session.run("coverage", "report", "--show-missing", "--fail-under=100") session.run("coverage", "erase") @@ -189,7 +189,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( @@ -211,9 +211,7 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - session.install( - "sphinx<3.0.0", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml" - ) + session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py index 3c2a901b..92228e53 100644 --- a/scripts/fixup_dataproc_v1_keywords.py +++ b/scripts/fixup_dataproc_v1_keywords.py @@ -42,7 +42,7 @@ class dataprocCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy_', ), + 'create_autoscaling_policy': ('parent', 'policy', ), 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), 'create_workflow_template': ('parent', 'template', ), 'delete_autoscaling_policy': ('name', ), @@ -62,7 +62,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), 'submit_job': ('project_id', 'region', 'job', 'request_id', ), 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy_', ), + 'update_autoscaling_policy': ('policy', ), 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), 'update_workflow_template': ('template', ), diff --git a/scripts/fixup_dataproc_v1beta2_keywords.py b/scripts/fixup_dataproc_v1beta2_keywords.py index ca3056b5..11f2e445 100644 --- a/scripts/fixup_dataproc_v1beta2_keywords.py +++ b/scripts/fixup_dataproc_v1beta2_keywords.py @@ -42,7 +42,7 @@ class dataprocCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'cancel_job': ('project_id', 'region', 'job_id', ), - 'create_autoscaling_policy': ('parent', 'policy_', ), + 'create_autoscaling_policy': ('parent', 'policy', ), 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), 'create_workflow_template': ('parent', 'template', ), 'delete_autoscaling_policy': ('name', ), @@ -62,7 +62,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), 'submit_job': ('project_id', 'region', 'job', 'request_id', ), 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), - 'update_autoscaling_policy': ('policy_', ), + 'update_autoscaling_policy': ('policy', ), 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), 'update_workflow_template': ('template', ), diff --git a/synth.metadata b/synth.metadata index de8bf19f..24170576 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "git@github.com:googleapis/python-dataproc.git", - "sha": "600793190cd1a69eeceb96a6444ad9de73f382d8" + "sha": "eeceee6fec75370b996119616928426c5bfc12bc" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "d78dc2e0cb627d3e48e910abf4b991264affcb56", - "internalRef": "366838867" + "sha": "fa7915f8d43926de5effb815129a274579fa84df", + "internalRef": "366869955" } }, { diff --git a/synth.py b/synth.py index 416f69d5..fd91ef7a 100644 --- a/synth.py +++ b/synth.py @@ -42,17 +42,17 @@ templated_files = common.py_library( samples=True, # set to True only if there are samples microgenerator=True, - cov_level=98, ) s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file +# Rename `policy_` to `policy` to avoid breaking change in a GA library +# Only replace if a non-alphanumeric (\W) character follows `policy_` +s.replace(["google/**/*.py", "scripts/fixup*.py", "tests/unit/**/*.py"], "policy_(\W)", "policy\g<1>") # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- python.py_samples(skip_readmes=True) -# TODO(busunkim): Use latest sphinx after microgenerator transition -s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') # Temporarily disable warnings due to # https://github.com/googleapis/gapic-generator-python/issues/525 diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py index 10d4f3d2..7516619f 100644 --- a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -660,7 +660,7 @@ def test_create_autoscaling_policy_flattened(): # using the keyword arguments to the method. client.create_autoscaling_policy( parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -670,7 +670,7 @@ def test_create_autoscaling_policy_flattened(): assert args[0].parent == "parent_value" - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_create_autoscaling_policy_flattened_error(): @@ -684,7 +684,7 @@ def test_create_autoscaling_policy_flattened_error(): client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -708,7 +708,7 @@ async def test_create_autoscaling_policy_flattened_async(): # using the keyword arguments to the method. response = await client.create_autoscaling_policy( parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -718,7 +718,7 @@ async def test_create_autoscaling_policy_flattened_async(): assert args[0].parent == "parent_value" - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -733,7 +733,7 @@ async def test_create_autoscaling_policy_flattened_error_async(): await client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -920,7 +920,7 @@ def test_update_autoscaling_policy_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_autoscaling_policy( - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -928,7 +928,7 @@ def test_update_autoscaling_policy_flattened(): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_update_autoscaling_policy_flattened_error(): @@ -941,7 +941,7 @@ def test_update_autoscaling_policy_flattened_error(): with pytest.raises(ValueError): client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -964,7 +964,7 @@ async def test_update_autoscaling_policy_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_autoscaling_policy( - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -972,7 +972,7 @@ async def test_update_autoscaling_policy_flattened_async(): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -986,7 +986,7 @@ async def test_update_autoscaling_policy_flattened_error_async(): with pytest.raises(ValueError): await client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) diff --git a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py index 87752c94..9af5109f 100644 --- a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py +++ b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -660,7 +660,7 @@ def test_create_autoscaling_policy_flattened(): # using the keyword arguments to the method. client.create_autoscaling_policy( parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -670,7 +670,7 @@ def test_create_autoscaling_policy_flattened(): assert args[0].parent == "parent_value" - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_create_autoscaling_policy_flattened_error(): @@ -684,7 +684,7 @@ def test_create_autoscaling_policy_flattened_error(): client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -708,7 +708,7 @@ async def test_create_autoscaling_policy_flattened_async(): # using the keyword arguments to the method. response = await client.create_autoscaling_policy( parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -718,7 +718,7 @@ async def test_create_autoscaling_policy_flattened_async(): assert args[0].parent == "parent_value" - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -733,7 +733,7 @@ async def test_create_autoscaling_policy_flattened_error_async(): await client.create_autoscaling_policy( autoscaling_policies.CreateAutoscalingPolicyRequest(), parent="parent_value", - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -920,7 +920,7 @@ def test_update_autoscaling_policy_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_autoscaling_policy( - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -928,7 +928,7 @@ def test_update_autoscaling_policy_flattened(): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") def test_update_autoscaling_policy_flattened_error(): @@ -941,7 +941,7 @@ def test_update_autoscaling_policy_flattened_error(): with pytest.raises(ValueError): client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) @@ -964,7 +964,7 @@ async def test_update_autoscaling_policy_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_autoscaling_policy( - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), ) # Establish that the underlying call was made with the expected @@ -972,7 +972,7 @@ async def test_update_autoscaling_policy_flattened_async(): assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].policy_ == autoscaling_policies.AutoscalingPolicy(id="id_value") + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") @pytest.mark.asyncio @@ -986,7 +986,7 @@ async def test_update_autoscaling_policy_flattened_error_async(): with pytest.raises(ValueError): await client.update_autoscaling_policy( autoscaling_policies.UpdateAutoscalingPolicyRequest(), - policy_=autoscaling_policies.AutoscalingPolicy(id="id_value"), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), )