- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please + As of January 1, 2020 this library no longer supports Python 2 on the latest released version. + Library versions released prior to that date will continue to be available. For more information please visit Python 2 support on Google Cloud.
{% block body %} {% endblock %} diff --git a/docs/conf.py b/docs/conf.py index 1815da57a..296607b79 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -349,6 +349,7 @@ "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), + "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/google/cloud/logging/__init__.py b/google/cloud/logging/__init__.py index 932b8f739..0953416af 100644 --- a/google/cloud/logging/__init__.py +++ b/google/cloud/logging/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,38 +15,122 @@ # limitations under the License. # -from google.cloud.logging_v2 import __version__ -from google.cloud.logging_v2 import ASCENDING -from google.cloud.logging_v2 import DESCENDING -from google.cloud.logging_v2.client import Client -from google.cloud.logging_v2.entries import logger_name_from_path -from google.cloud.logging_v2.entries import LogEntry -from google.cloud.logging_v2.entries import TextEntry -from google.cloud.logging_v2.entries import StructEntry -from google.cloud.logging_v2.entries import ProtobufEntry -from google.cloud.logging_v2 import handlers -from google.cloud.logging_v2.logger import Logger -from google.cloud.logging_v2.logger import Batch -from google.cloud.logging_v2.metric import Metric -from google.cloud.logging_v2.resource import Resource -from google.cloud.logging_v2.sink import Sink -from google.cloud.logging_v2 import types +from google.cloud.logging_v2.services.config_service_v2.async_client import ( + ConfigServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.config_service_v2.client import ( + ConfigServiceV2Client, +) +from google.cloud.logging_v2.services.logging_service_v2.async_client import ( + LoggingServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.logging_service_v2.client import ( + LoggingServiceV2Client, +) +from google.cloud.logging_v2.services.metrics_service_v2.async_client import ( + MetricsServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.metrics_service_v2.client import ( + MetricsServiceV2Client, +) +from google.cloud.logging_v2.types.log_entry import LogEntry +from google.cloud.logging_v2.types.log_entry import LogEntryOperation +from google.cloud.logging_v2.types.log_entry import LogEntrySourceLocation +from google.cloud.logging_v2.types.logging import DeleteLogRequest +from google.cloud.logging_v2.types.logging import ListLogEntriesRequest +from google.cloud.logging_v2.types.logging import ListLogEntriesResponse +from google.cloud.logging_v2.types.logging import ListLogsRequest +from google.cloud.logging_v2.types.logging import ListLogsResponse +from google.cloud.logging_v2.types.logging import ( + ListMonitoredResourceDescriptorsRequest, +) +from google.cloud.logging_v2.types.logging import ( + ListMonitoredResourceDescriptorsResponse, +) +from google.cloud.logging_v2.types.logging import WriteLogEntriesPartialErrors +from google.cloud.logging_v2.types.logging import WriteLogEntriesRequest +from google.cloud.logging_v2.types.logging import WriteLogEntriesResponse +from google.cloud.logging_v2.types.logging_config import BigQueryOptions +from google.cloud.logging_v2.types.logging_config import CmekSettings +from google.cloud.logging_v2.types.logging_config import CreateExclusionRequest +from google.cloud.logging_v2.types.logging_config import CreateSinkRequest +from google.cloud.logging_v2.types.logging_config import DeleteExclusionRequest +from google.cloud.logging_v2.types.logging_config import DeleteSinkRequest +from google.cloud.logging_v2.types.logging_config import GetBucketRequest +from google.cloud.logging_v2.types.logging_config import GetCmekSettingsRequest +from google.cloud.logging_v2.types.logging_config import GetExclusionRequest +from google.cloud.logging_v2.types.logging_config import GetSinkRequest +from google.cloud.logging_v2.types.logging_config import LifecycleState +from google.cloud.logging_v2.types.logging_config import ListBucketsRequest +from google.cloud.logging_v2.types.logging_config import ListBucketsResponse +from google.cloud.logging_v2.types.logging_config import ListExclusionsRequest +from google.cloud.logging_v2.types.logging_config import ListExclusionsResponse +from google.cloud.logging_v2.types.logging_config import ListSinksRequest +from google.cloud.logging_v2.types.logging_config import ListSinksResponse +from google.cloud.logging_v2.types.logging_config import LogBucket +from google.cloud.logging_v2.types.logging_config import LogExclusion +from google.cloud.logging_v2.types.logging_config import LogSink +from google.cloud.logging_v2.types.logging_config import UpdateBucketRequest +from google.cloud.logging_v2.types.logging_config import UpdateCmekSettingsRequest +from google.cloud.logging_v2.types.logging_config import UpdateExclusionRequest +from google.cloud.logging_v2.types.logging_config import UpdateSinkRequest +from google.cloud.logging_v2.types.logging_metrics import CreateLogMetricRequest +from google.cloud.logging_v2.types.logging_metrics import DeleteLogMetricRequest +from google.cloud.logging_v2.types.logging_metrics import GetLogMetricRequest +from google.cloud.logging_v2.types.logging_metrics import ListLogMetricsRequest +from google.cloud.logging_v2.types.logging_metrics import ListLogMetricsResponse +from google.cloud.logging_v2.types.logging_metrics import LogMetric +from google.cloud.logging_v2.types.logging_metrics import UpdateLogMetricRequest __all__ = ( - "__version__", - "ASCENDING", - "Batch", - "Client", - "DESCENDING", - "handlers", - "logger_name_from_path", - "Logger", + "BigQueryOptions", + "CmekSettings", + "ConfigServiceV2AsyncClient", + "ConfigServiceV2Client", + "CreateExclusionRequest", + "CreateLogMetricRequest", + "CreateSinkRequest", + "DeleteExclusionRequest", + "DeleteLogMetricRequest", + "DeleteLogRequest", + "DeleteSinkRequest", + "GetBucketRequest", + "GetCmekSettingsRequest", + "GetExclusionRequest", + "GetLogMetricRequest", + "GetSinkRequest", + "LifecycleState", + "ListBucketsRequest", + "ListBucketsResponse", + "ListExclusionsRequest", + "ListExclusionsResponse", + "ListLogEntriesRequest", + "ListLogEntriesResponse", + "ListLogMetricsRequest", + "ListLogMetricsResponse", + "ListLogsRequest", + "ListLogsResponse", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "ListSinksRequest", + "ListSinksResponse", + "LogBucket", "LogEntry", - "Metric", - "ProtobufEntry", - "Resource", - "Sink", - "StructEntry", - "TextEntry", - "types", + "LogEntryOperation", + "LogEntrySourceLocation", + "LogExclusion", + "LogMetric", + "LogSink", + "LoggingServiceV2AsyncClient", + "LoggingServiceV2Client", + "MetricsServiceV2AsyncClient", + "MetricsServiceV2Client", + "UpdateBucketRequest", + "UpdateCmekSettingsRequest", + "UpdateExclusionRequest", + "UpdateLogMetricRequest", + "UpdateSinkRequest", + "WriteLogEntriesPartialErrors", + "WriteLogEntriesRequest", + "WriteLogEntriesResponse", ) diff --git a/google/cloud/logging/py.typed b/google/cloud/logging/py.typed new file mode 100644 index 000000000..6c7420d0d --- /dev/null +++ b/google/cloud/logging/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-logging package uses inline types. diff --git a/google/cloud/logging_v2/__init__.py b/google/cloud/logging_v2/__init__.py index 98954d550..964c99572 100644 --- a/google/cloud/logging_v2/__init__.py +++ b/google/cloud/logging_v2/__init__.py @@ -14,50 +14,32 @@ from __future__ import absolute_import -import pkg_resources - -try: - __version__ = pkg_resources.get_distribution("google-cloud-logging").version -except pkg_resources.DistributionNotFound: - __version__ = None - - -from google.cloud.logging_v2.client import Client -from google.cloud.logging_v2.entries import logger_name_from_path -from google.cloud.logging_v2.entries import LogEntry -from google.cloud.logging_v2.entries import TextEntry -from google.cloud.logging_v2.entries import StructEntry -from google.cloud.logging_v2.entries import ProtobufEntry -from google.cloud.logging_v2 import handlers -from google.cloud.logging_v2.logger import Logger -from google.cloud.logging_v2.logger import Batch -from google.cloud.logging_v2.metric import Metric -from google.cloud.logging_v2.resource import Resource -from google.cloud.logging_v2.sink import Sink from google.cloud.logging_v2 import types +from google.cloud.logging_v2.gapic import config_service_v2_client +from google.cloud.logging_v2.gapic import enums +from google.cloud.logging_v2.gapic import logging_service_v2_client +from google.cloud.logging_v2.gapic import metrics_service_v2_client -ASCENDING = "timestamp asc" -"""Query string to order by ascending timestamps.""" -DESCENDING = "timestamp desc" -"""Query string to order by decending timestamps.""" +class LoggingServiceV2Client(logging_service_v2_client.LoggingServiceV2Client): + __doc__ = logging_service_v2_client.LoggingServiceV2Client.__doc__ + enums = enums + + +class ConfigServiceV2Client(config_service_v2_client.ConfigServiceV2Client): + __doc__ = config_service_v2_client.ConfigServiceV2Client.__doc__ + enums = enums + + +class MetricsServiceV2Client(metrics_service_v2_client.MetricsServiceV2Client): + __doc__ = metrics_service_v2_client.MetricsServiceV2Client.__doc__ + enums = enums __all__ = ( - "__version__", - "ASCENDING", - "Batch", - "Client", - "DESCENDING", - "handlers", - "logger_name_from_path", - "Logger", - "LogEntry", - "Metric", - "ProtobufEntry", - "Resource", - "Sink", - "StructEntry", - "TextEntry", + "enums", "types", + "LoggingServiceV2Client", + "ConfigServiceV2Client", + "MetricsServiceV2Client", ) diff --git a/google/cloud/logging_v2/proto/log_entry.proto b/google/cloud/logging_v2/proto/log_entry.proto new file mode 100644 index 000000000..3ad2cfbb5 --- /dev/null +++ b/google/cloud/logging_v2/proto/log_entry.proto @@ -0,0 +1,210 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/field_behavior.proto"; +import "google/api/monitored_resource.proto"; +import "google/api/resource.proto"; +import "google/logging/type/http_request.proto"; +import "google/logging/type/log_severity.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LogEntryProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; + +// An individual entry in a log. +// +// +message LogEntry { + option (google.api.resource) = { + type: "logging.googleapis.com/Log" + pattern: "projects/{project}/logs/{log}" + pattern: "organizations/{organization}/logs/{log}" + pattern: "folders/{folder}/logs/{log}" + pattern: "billingAccounts/{billing_account}/logs/{log}" + name_field: "log_name" + }; + + // Required. The resource name of the log to which this log entry belongs: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // A project number may be used in place of PROJECT_ID. The project number is + // translated to its corresponding PROJECT_ID internally and the `log_name` + // field will contain PROJECT_ID in queries and exports. + // + // `[LOG_ID]` must be URL-encoded within `log_name`. Example: + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // `[LOG_ID]` must be less than 512 characters long and can only include the + // following characters: upper and lower case alphanumeric characters, + // forward-slash, underscore, hyphen, and period. + // + // For backward compatibility, if `log_name` begins with a forward-slash, such + // as `/projects/...`, then the log entry is ingested as usual but the + // forward-slash is removed. Listing the log entry will not show the leading + // slash and filtering for a log name with a leading slash will never return + // any results. + string log_name = 12 [(google.api.field_behavior) = REQUIRED]; + + // Required. The monitored resource that produced this log entry. + // + // Example: a log entry that reports a database error would be associated with + // the monitored resource designating the particular database that reported + // the error. + google.api.MonitoredResource resource = 8 [(google.api.field_behavior) = REQUIRED]; + + // The log entry payload, which can be one of multiple types. + oneof payload { + // The log entry payload, represented as a protocol buffer. Some Google + // Cloud Platform services use this field for their log entry payloads. + // + // The following protocol buffer types are supported; user-defined types + // are not supported: + // + // "type.googleapis.com/google.cloud.audit.AuditLog" + // "type.googleapis.com/google.appengine.logging.v1.RequestLog" + google.protobuf.Any proto_payload = 2; + + // The log entry payload, represented as a Unicode string (UTF-8). + string text_payload = 3; + + // The log entry payload, represented as a structure that is + // expressed as a JSON object. + google.protobuf.Struct json_payload = 6; + } + + // Optional. The time the event described by the log entry occurred. This time is used + // to compute the log entry's age and to enforce the logs retention period. + // If this field is omitted in a new log entry, then Logging assigns it the + // current time. Timestamps have nanosecond accuracy, but trailing zeros in + // the fractional seconds might be omitted when the timestamp is displayed. + // + // Incoming log entries must have timestamps that don't exceed the + // [logs retention + // period](https://cloud.google.com/logging/quotas#logs_retention_periods) in + // the past, and that don't exceed 24 hours in the future. Log entries outside + // those time boundaries aren't ingested by Logging. + google.protobuf.Timestamp timestamp = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time the log entry was received by Logging. + google.protobuf.Timestamp receive_timestamp = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. + google.logging.type.LogSeverity severity = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for the log entry. If you provide a value, then + // Logging considers other log entries in the same project, with the same + // `timestamp`, and with the same `insert_id` to be duplicates which are + // removed in a single query result. However, there are no guarantees of + // de-duplication in the export of logs. + // + // If the `insert_id` is omitted when writing a log entry, the Logging API + // assigns its own unique identifier in this field. + // + // In queries, the `insert_id` is also used to order log entries that have + // the same `log_name` and `timestamp` values. + string insert_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information about the HTTP request associated with this log entry, if + // applicable. + google.logging.type.HttpRequest http_request = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A set of user-defined (key, value) data that provides additional + // information about the log entry. + map labels = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information about an operation associated with the log entry, if + // applicable. + LogEntryOperation operation = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Resource name of the trace associated with the log entry, if any. If it + // contains a relative resource name, the name is assumed to be relative to + // `//tracing.googleapis.com`. Example: + // `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` + string trace = 22 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The span ID within the trace associated with the log entry. + // + // For Trace spans, this is the same format that the Trace API v2 uses: a + // 16-character hexadecimal encoding of an 8-byte array, such as + // `000000000000004a`. + string span_id = 27 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The sampling decision of the trace associated with the log entry. + // + // True means that the trace resource name in the `trace` field was sampled + // for storage in a trace backend. False means that the trace was not sampled + // for storage when this log entry was written, or the sampling decision was + // unknown at the time. A non-sampled `trace` value is still useful as a + // request correlation identifier. The default is False. + bool trace_sampled = 30 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Source code location information associated with the log entry, if any. + LogEntrySourceLocation source_location = 23 [(google.api.field_behavior) = OPTIONAL]; +} + +// Additional information about a potentially long-running operation with which +// a log entry is associated. +message LogEntryOperation { + // Optional. An arbitrary operation identifier. Log entries with the same + // identifier are assumed to be part of the same operation. + string id = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. An arbitrary producer identifier. The combination of `id` and + // `producer` must be globally unique. Examples for `producer`: + // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. + string producer = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set this to True if this is the first log entry in the operation. + bool first = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set this to True if this is the last log entry in the operation. + bool last = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Additional information about the source code location that produced the log +// entry. +message LogEntrySourceLocation { + // Optional. Source file name. Depending on the runtime environment, this + // might be a simple name or a fully-qualified name. + string file = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Line within the source file. 1-based; 0 indicates no line number + // available. + int64 line = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Human-readable name of the function or method being invoked, with + // optional context such as the class or package name. This information may be + // used in contexts such as the logs viewer, where a file and line number are + // less meaningful. The format can vary by language. For example: + // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + // (Python). + string function = 3 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/google/cloud/logging_v2/proto/logging.proto b/google/cloud/logging_v2/proto/logging.proto new file mode 100644 index 000000000..58647b92f --- /dev/null +++ b/google/cloud/logging_v2/proto/logging.proto @@ -0,0 +1,379 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/monitored_resource.proto"; +import "google/api/resource.proto"; +import "google/logging/v2/log_entry.proto"; +import "google/logging/v2/logging_config.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LoggingProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; + +// Service for ingesting and querying logs. +service LoggingServiceV2 { + option (google.api.default_host) = "logging.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/logging.admin," + "https://www.googleapis.com/auth/logging.read," + "https://www.googleapis.com/auth/logging.write"; + + // Deletes all the log entries in a log. The log reappears if it receives new + // entries. Log entries written shortly before the delete operation might not + // be deleted. Entries received after the delete operation with a timestamp + // before the operation will be deleted. + rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{log_name=projects/*/logs/*}" + additional_bindings { + delete: "/v2/{log_name=*/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=organizations/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=folders/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=billingAccounts/*/logs/*}" + } + }; + option (google.api.method_signature) = "log_name"; + } + + // Writes log entries to Logging. This API method is the + // only way to send log entries to Logging. This method + // is used, directly or indirectly, by the Logging agent + // (fluentd) and all logging libraries configured to use Logging. + // A single request may contain log entries for a maximum of 1000 + // different resources (projects, organizations, billing accounts or + // folders) + rpc WriteLogEntries(WriteLogEntriesRequest) returns (WriteLogEntriesResponse) { + option (google.api.http) = { + post: "/v2/entries:write" + body: "*" + }; + option (google.api.method_signature) = "log_name,resource,labels,entries"; + } + + // Lists log entries. Use this method to retrieve log entries that originated + // from a project/folder/organization/billing account. For ways to export log + // entries, see [Exporting + // Logs](https://cloud.google.com/logging/docs/export). + rpc ListLogEntries(ListLogEntriesRequest) returns (ListLogEntriesResponse) { + option (google.api.http) = { + post: "/v2/entries:list" + body: "*" + }; + option (google.api.method_signature) = "resource_names,filter,order_by"; + } + + // Lists the descriptors for monitored resource types used by Logging. + rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) { + option (google.api.http) = { + get: "/v2/monitoredResourceDescriptors" + }; + } + + // Lists the logs in projects, organizations, folders, or billing accounts. + // Only logs that have entries are listed. + rpc ListLogs(ListLogsRequest) returns (ListLogsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/logs" + additional_bindings { + get: "/v2/{parent=projects/*}/logs" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/logs" + } + additional_bindings { + get: "/v2/{parent=folders/*}/logs" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/logs" + } + }; + option (google.api.method_signature) = "parent"; + } +} + +// The parameters to DeleteLog. +message DeleteLogRequest { + // Required. The resource name of the log to delete: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // `[LOG_ID]` must be URL-encoded. For example, + // `"projects/my-project-id/logs/syslog"`, + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // For more information about log names, see + // [LogEntry][google.logging.v2.LogEntry]. + string log_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/Log" + } + ]; +} + +// The parameters to WriteLogEntries. +message WriteLogEntriesRequest { + // Optional. A default log resource name that is assigned to all log entries + // in `entries` that do not specify a value for `log_name`: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // `[LOG_ID]` must be URL-encoded. For example: + // + // "projects/my-project-id/logs/syslog" + // "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + // + // The permission `logging.logEntries.create` is needed on each project, + // organization, billing account, or folder that is receiving new log + // entries, whether the resource is specified in `logName` or in an + // individual log entry. + string log_name = 1 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "logging.googleapis.com/Log" + } + ]; + + // Optional. A default monitored resource object that is assigned to all log + // entries in `entries` that do not specify a value for `resource`. Example: + // + // { "type": "gce_instance", + // "labels": { + // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + // + // See [LogEntry][google.logging.v2.LogEntry]. + google.api.MonitoredResource resource = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Default labels that are added to the `labels` field of all log + // entries in `entries`. If a log entry already has a label with the same key + // as a label in this parameter, then the log entry's label is not changed. + // See [LogEntry][google.logging.v2.LogEntry]. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The log entries to send to Logging. The order of log + // entries in this list does not matter. Values supplied in this method's + // `log_name`, `resource`, and `labels` fields are copied into those log + // entries in this list that do not include values for their corresponding + // fields. For more information, see the + // [LogEntry][google.logging.v2.LogEntry] type. + // + // If the `timestamp` or `insert_id` fields are missing in log entries, then + // this method supplies the current time or a unique identifier, respectively. + // The supplied values are chosen so that, among the log entries that did not + // supply their own values, the entries earlier in the list will sort before + // the entries later in the list. See the `entries.list` method. + // + // Log entries with timestamps that are more than the + // [logs retention period](https://cloud.google.com/logging/quota-policy) in + // the past or more than 24 hours in the future will not be available when + // calling `entries.list`. However, those log entries can still be [exported + // with + // LogSinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). + // + // To improve throughput and to avoid exceeding the + // [quota limit](https://cloud.google.com/logging/quota-policy) for calls to + // `entries.write`, you should try to include several log entries in this + // list, rather than calling this method for each individual log entry. + repeated LogEntry entries = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Whether valid entries should be written even if some other + // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any + // entry is not written, then the response status is the error associated + // with one of the failed entries and the response includes error details + // keyed by the entries' zero-based index in the `entries.write` method. + bool partial_success = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, the request should expect normal response, but the + // entries won't be persisted nor exported. Useful for checking whether the + // logging API endpoints are working properly before sending valuable data. + bool dry_run = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from WriteLogEntries. +message WriteLogEntriesResponse {} + +// Error details for WriteLogEntries with partial success. +message WriteLogEntriesPartialErrors { + // When `WriteLogEntriesRequest.partial_success` is true, records the error + // status for entries that were not written due to a permanent error, keyed + // by the entry's zero-based index in `WriteLogEntriesRequest.entries`. + // + // Failed requests for which no entries are written will not include + // per-entry errors. + map log_entry_errors = 1; +} + +// The parameters to `ListLogEntries`. +message ListLogEntriesRequest { + // Required. Names of one or more parent resources from which to + // retrieve log entries: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // + // + // Projects listed in the `project_ids` field are added to this list. + repeated string resource_names = 8 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; + + // Optional. A filter that chooses which log entries to return. See [Advanced + // Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + // Only log entries that match the filter are returned. An empty filter + // matches all log entries in the resources listed in `resource_names`. + // Referencing a parent resource that is not listed in `resource_names` will + // cause the filter to return no results. The maximum length of the filter is + // 20000 characters. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. How the results should be sorted. Presently, the only permitted + // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first + // option returns entries in order of increasing values of + // `LogEntry.timestamp` (oldest first), and the second option returns entries + // in order of decreasing timestamps (newest first). Entries with equal + // timestamps are returned in order of their `insert_id` values. + string order_by = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // Default is 50. If the value is negative or exceeds 1000, + // the request is rejected. The presence of `next_page_token` in the + // response indicates that more results might be available. + int32 page_size = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `page_token` must be the value of + // `next_page_token` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from `ListLogEntries`. +message ListLogEntriesResponse { + // A list of log entries. If `entries` is empty, `nextPageToken` may still be + // returned, indicating that more entries may exist. See `nextPageToken` for + // more information. + repeated LogEntry entries = 1; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + // + // If a value for `next_page_token` appears and the `entries` field is empty, + // it means that the search found no log entries so far but it did not have + // time to search all the possible log entries. Retry the method with this + // value for `page_token` to continue the search. Alternatively, consider + // speeding up the search by changing your filter to specify a single log name + // or resource type, or to narrow the time range of the search. + string next_page_token = 2; +} + +// The parameters to ListMonitoredResourceDescriptors +message ListMonitoredResourceDescriptorsRequest { + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from ListMonitoredResourceDescriptors. +message ListMonitoredResourceDescriptorsResponse { + // A list of resource descriptors. + repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to ListLogs. +message ListLogsRequest { + // Required. The resource name that owns the logs: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from ListLogs. +message ListLogsResponse { + // A list of log names. For example, + // `"projects/my-project/logs/syslog"` or + // `"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + repeated string log_names = 3; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} diff --git a/google/cloud/logging_v2/proto/logging_config.proto b/google/cloud/logging_v2/proto/logging_config.proto new file mode 100644 index 000000000..9486f4a9a --- /dev/null +++ b/google/cloud/logging_v2/proto/logging_config.proto @@ -0,0 +1,1178 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LoggingConfigProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/OrganizationLocation" + pattern: "organizations/{organization}/locations/{location}" +}; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/FolderLocation" + pattern: "folders/{folder}/locations/{location}" +}; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/BillingAccountLocation" + pattern: "billingAccounts/{billing_account}/locations/{location}" +}; + +// Service for configuring sinks used to route log entries. +service ConfigServiceV2 { + option (google.api.default_host) = "logging.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/logging.admin," + "https://www.googleapis.com/auth/logging.read"; + + // Lists buckets (Beta). + rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*/locations/*}/buckets" + additional_bindings { + get: "/v2/{parent=projects/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=organizations/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=folders/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*/locations/*}/buckets" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a bucket (Beta). + rpc GetBucket(GetBucketRequest) returns (LogBucket) { + option (google.api.http) = { + get: "/v2/{name=*/*/locations/*/buckets/*}" + additional_bindings { + get: "/v2/{name=projects/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=organizations/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=folders/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*/buckets/*}" + } + }; + } + + // Updates a bucket. This method replaces the following fields in the + // existing bucket with values from the new bucket: `retention_period` + // + // If the retention period is decreased and the bucket is locked, + // FAILED_PRECONDITION will be returned. + // + // If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION + // will be returned. + // + // A buckets region may not be modified after it is created. + // This method is in Beta. + rpc UpdateBucket(UpdateBucketRequest) returns (LogBucket) { + option (google.api.http) = { + patch: "/v2/{name=*/*/locations/*/buckets/*}" + body: "bucket" + additional_bindings { + patch: "/v2/{name=projects/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=organizations/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=folders/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=billingAccounts/*/locations/*/buckets/*}" + body: "bucket" + } + }; + } + + // Lists sinks. + rpc ListSinks(ListSinksRequest) returns (ListSinksResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/sinks" + additional_bindings { + get: "/v2/{parent=projects/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=folders/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/sinks" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a sink. + rpc GetSink(GetSinkRequest) returns (LogSink) { + option (google.api.http) = { + get: "/v2/{sink_name=*/*/sinks/*}" + additional_bindings { + get: "/v2/{sink_name=projects/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=organizations/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=folders/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=billingAccounts/*/sinks/*}" + } + }; + option (google.api.method_signature) = "sink_name"; + } + + // Creates a sink that exports specified log entries to a destination. The + // export of newly-ingested log entries begins immediately, unless the sink's + // `writer_identity` is not permitted to write to the destination. A sink can + // export log entries only from the resource owning the sink. + rpc CreateSink(CreateSinkRequest) returns (LogSink) { + option (google.api.http) = { + post: "/v2/{parent=*/*}/sinks" + body: "sink" + additional_bindings { + post: "/v2/{parent=projects/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=organizations/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=folders/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*}/sinks" + body: "sink" + } + }; + option (google.api.method_signature) = "parent,sink"; + } + + // Updates a sink. This method replaces the following fields in the existing + // sink with values from the new sink: `destination`, and `filter`. + // + // The updated sink might also have a new `writer_identity`; see the + // `unique_writer_identity` field. + rpc UpdateSink(UpdateSinkRequest) returns (LogSink) { + option (google.api.http) = { + put: "/v2/{sink_name=*/*/sinks/*}" + body: "sink" + additional_bindings { + put: "/v2/{sink_name=projects/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=organizations/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=folders/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=billingAccounts/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=projects/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=organizations/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=folders/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=billingAccounts/*/sinks/*}" + body: "sink" + } + }; + option (google.api.method_signature) = "sink_name,sink,update_mask"; + option (google.api.method_signature) = "sink_name,sink"; + } + + // Deletes a sink. If the sink has a unique `writer_identity`, then that + // service account is also deleted. + rpc DeleteSink(DeleteSinkRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{sink_name=*/*/sinks/*}" + additional_bindings { + delete: "/v2/{sink_name=projects/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=organizations/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=folders/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=billingAccounts/*/sinks/*}" + } + }; + option (google.api.method_signature) = "sink_name"; + } + + // Lists all the exclusions in a parent resource. + rpc ListExclusions(ListExclusionsRequest) returns (ListExclusionsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/exclusions" + additional_bindings { + get: "/v2/{parent=projects/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=folders/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/exclusions" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets the description of an exclusion. + rpc GetExclusion(GetExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + get: "/v2/{name=*/*/exclusions/*}" + additional_bindings { + get: "/v2/{name=projects/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=organizations/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=folders/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*/exclusions/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new exclusion in a specified parent resource. + // Only log entries belonging to that resource can be excluded. + // You can have up to 10 exclusions in a resource. + rpc CreateExclusion(CreateExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + post: "/v2/{parent=*/*}/exclusions" + body: "exclusion" + additional_bindings { + post: "/v2/{parent=projects/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=organizations/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=folders/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*}/exclusions" + body: "exclusion" + } + }; + option (google.api.method_signature) = "parent,exclusion"; + } + + // Changes one or more properties of an existing exclusion. + rpc UpdateExclusion(UpdateExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + patch: "/v2/{name=*/*/exclusions/*}" + body: "exclusion" + additional_bindings { + patch: "/v2/{name=projects/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=organizations/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=folders/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=billingAccounts/*/exclusions/*}" + body: "exclusion" + } + }; + option (google.api.method_signature) = "name,exclusion,update_mask"; + } + + // Deletes an exclusion. + rpc DeleteExclusion(DeleteExclusionRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=*/*/exclusions/*}" + additional_bindings { + delete: "/v2/{name=projects/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=organizations/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=folders/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=billingAccounts/*/exclusions/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Gets the Logs Router CMEK settings for the given resource. + // + // Note: CMEK for the Logs Router can currently only be configured for GCP + // organizations. Once configured, it applies to all projects and folders in + // the GCP organization. + // + // See [Enabling CMEK for Logs + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc GetCmekSettings(GetCmekSettingsRequest) returns (CmekSettings) { + option (google.api.http) = { + get: "/v2/{name=*/*}/cmekSettings" + additional_bindings { + get: "/v2/{name=organizations/*}/cmekSettings" + } + }; + } + + // Updates the Logs Router CMEK settings for the given resource. + // + // Note: CMEK for the Logs Router can currently only be configured for GCP + // organizations. Once configured, it applies to all projects and folders in + // the GCP organization. + // + // [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + // will fail if 1) `kms_key_name` is invalid, or 2) the associated service + // account does not have the required + // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or + // 3) access to the key is disabled. + // + // See [Enabling CMEK for Logs + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc UpdateCmekSettings(UpdateCmekSettingsRequest) returns (CmekSettings) { + option (google.api.http) = { + patch: "/v2/{name=*/*}/cmekSettings" + body: "cmek_settings" + additional_bindings { + patch: "/v2/{name=organizations/*}/cmekSettings" + body: "cmek_settings" + } + }; + } +} + +// Describes a repository of logs (Beta). +message LogBucket { + option (google.api.resource) = { + type: "logging.googleapis.com/LogBucket" + pattern: "projects/{project}/locations/{location}/buckets/{bucket}" + pattern: "organizations/{organization}/locations/{location}/buckets/{bucket}" + pattern: "folders/{folder}/locations/{location}/buckets/{bucket}" + pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}" + }; + + // The resource name of the bucket. + // For example: + // "projects/my-project-id/locations/my-location/buckets/my-bucket-id The + // supported locations are: + // "global" + // "us-central1" + // + // For the location of `global` it is unspecified where logs are actually + // stored. + // Once a bucket has been created, the location can not be changed. + string name = 1; + + // Describes this bucket. + string description = 3; + + // Output only. The creation timestamp of the bucket. This is not set for any of the + // default buckets. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the bucket. + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Logs will be retained by default for this amount of time, after which they + // will automatically be deleted. The minimum retention period is 1 day. + // If this value is set to zero at bucket creation time, the default time of + // 30 days will be used. + int32 retention_days = 11; + + // Output only. The bucket lifecycle state. + LifecycleState lifecycle_state = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Describes a sink used to export log entries to one of the following +// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, or a +// Cloud Pub/Sub topic. A logs filter controls which log entries are exported. +// The sink must be created within a project, organization, billing account, or +// folder. +message LogSink { + option (google.api.resource) = { + type: "logging.googleapis.com/LogSink" + pattern: "projects/{project}/sinks/{sink}" + pattern: "organizations/{organization}/sinks/{sink}" + pattern: "folders/{folder}/sinks/{sink}" + pattern: "billingAccounts/{billing_account}/sinks/{sink}" + }; + + // Deprecated. This is unused. + enum VersionFormat { + // An unspecified format version that will default to V2. + VERSION_FORMAT_UNSPECIFIED = 0; + + // `LogEntry` version 2 format. + V2 = 1; + + // `LogEntry` version 1 format. + V1 = 2; + } + + // Required. The client-assigned sink identifier, unique within the project. Example: + // `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited to 100 + // characters and can include only the following characters: upper and + // lower-case alphanumeric characters, underscores, hyphens, and periods. + // First character has to be alphanumeric. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The export destination: + // + // "storage.googleapis.com/[GCS_BUCKET]" + // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + // + // The sink's `writer_identity`, set when the sink is created, must + // have permission to write to the destination or else the log + // entries are not exported. For more information, see + // [Exporting Logs with + // Sinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). + string destination = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "*" + } + ]; + + // Optional. An [advanced logs + // filter](https://cloud.google.com/logging/docs/view/advanced-queries). The + // only exported log entries are those that are in the resource owning the + // sink and that match the filter. For example: + // + // logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR + string filter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A description of this sink. + // The maximum length of the description is 8000 characters. + string description = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to True, then this sink is disabled and it does not + // export any log entries. + bool disabled = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Deprecated. This field is unused. + VersionFormat output_version_format = 6 [deprecated = true]; + + // Output only. An IAM identity–a service account or group—under which Logging + // writes the exported log entries to the sink's destination. This field is + // set by [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] and + // [sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink] based on the + // value of `unique_writer_identity` in those methods. + // + // Until you grant this identity write-access to the destination, log entry + // exports from this sink will fail. For more information, + // see [Granting Access for a + // Resource](https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). + // Consult the destination service's documentation to determine the + // appropriate IAM roles to assign to the identity. + string writer_identity = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. This field applies only to sinks owned by organizations and + // folders. If the field is false, the default, only the logs owned by the + // sink's parent resource are available for export. If the field is true, then + // logs from all the projects, folders, and billing accounts contained in the + // sink's parent resource are also available for export. Whether a particular + // log entry from the children is exported depends on the sink's filter + // expression. For example, if this field is true, then the filter + // `resource.type=gce_instance` would export all Compute Engine VM instance + // log entries from all projects in the sink's parent. To only export entries + // from certain child projects, filter on the project part of the log name: + // + // logName:("projects/test-project1/" OR "projects/test-project2/") AND + // resource.type=gce_instance + bool include_children = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Destination dependent options. + oneof options { + // Optional. Options that affect sinks exporting data to BigQuery. + BigQueryOptions bigquery_options = 12 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The creation timestamp of the sink. + // + // This field may not be present for older sinks. + google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the sink. + // + // This field may not be present for older sinks. + google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Options that change functionality of a sink exporting data to BigQuery. +message BigQueryOptions { + // Optional. Whether to use [BigQuery's partition + // tables](https://cloud.google.com/bigquery/docs/partitioned-tables). By + // default, Logging creates dated tables based on the log entries' timestamps, + // e.g. syslog_20170523. With partitioned tables the date suffix is no longer + // present and [special query + // syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) + // has to be used instead. In both cases, tables are sharded based on UTC + // timezone. + bool use_partitioned_tables = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. True if new timestamp column based partitioning is in use, + // false if legacy ingestion-time partitioning is in use. + // All new sinks will have this field set true and will use timestamp column + // based partitioning. If use_partitioned_tables is false, this value has no + // meaning and will be false. Legacy sinks using partitioned tables will have + // this field set to false. + bool uses_timestamp_column_partitioning = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// LogBucket lifecycle states (Beta). +enum LifecycleState { + // Unspecified state. This is only used/useful for distinguishing + // unset values. + LIFECYCLE_STATE_UNSPECIFIED = 0; + + // The normal and active state. + ACTIVE = 1; + + // The bucket has been marked for deletion by the user. + DELETE_REQUESTED = 2; +} + +// The parameters to `ListBuckets` (Beta). +message ListBucketsRequest { + // Required. The parent resource whose buckets are to be listed: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + // + // Note: The locations portion of the resource must be specified, but + // supplying the character `-` in place of [LOCATION_ID] will return all + // buckets. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogBucket" + } + ]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response from ListBuckets (Beta). +message ListBucketsResponse { + // A list of buckets. + repeated LogBucket buckets = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `UpdateBucket` (Beta). +message UpdateBucketRequest { + // Required. The full resource name of the bucket to update. + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // Example: + // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. Also + // requires permission "resourcemanager.projects.updateLiens" to set the + // locked property + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; + + // Required. The updated bucket. + LogBucket bucket = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Field mask that specifies the fields in `bucket` that need an update. A + // bucket field will be overwritten if, and only if, it is in the update + // mask. `name` and output only fields cannot be updated. + // + // For a detailed `FieldMask` definition, see + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + // + // Example: `updateMask=retention_days`. + google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to `GetBucket` (Beta). +message GetBucketRequest { + // Required. The resource name of the bucket: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // Example: + // `"projects/my-project-id/locations/my-location/buckets/my-bucket-id"`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; +} + +// The parameters to `ListSinks`. +message ListSinksRequest { + // Required. The parent resource whose sinks are to be listed: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogSink" + } + ]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from `ListSinks`. +message ListSinksResponse { + // A list of sinks. + repeated LogSink sinks = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `GetSink`. +message GetSinkRequest { + // Required. The resource name of the sink: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; +} + +// The parameters to `CreateSink`. +message CreateSinkRequest { + // Required. The resource in which to create the sink: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // + // Examples: `"projects/my-logging-project"`, `"organizations/123456789"`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogSink" + } + ]; + + // Required. The new sink, whose `name` parameter is a sink identifier that + // is not already in use. + LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Determines the kind of IAM identity returned as `writer_identity` + // in the new sink. If this value is omitted or set to false, and if the + // sink's parent is a project, then the value returned as `writer_identity` is + // the same group or service account used by Logging before the addition of + // writer identities to this API. The sink's destination must be in the same + // project as the sink itself. + // + // If this field is set to true, or if the sink is owned by a non-project + // resource such as an organization, then the value of `writer_identity` will + // be a unique service account used only for exports from the new sink. For + // more information, see `writer_identity` in [LogSink][google.logging.v2.LogSink]. + bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The parameters to `UpdateSink`. +message UpdateSinkRequest { + // Required. The full resource name of the sink to update, including the parent + // resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; + + // Required. The updated sink, whose name is the same identifier that appears as part + // of `sink_name`. + LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. See [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] + // for a description of this field. When updating a sink, the effect of this + // field on the value of `writer_identity` in the updated sink depends on both + // the old and new values of this field: + // + // + If the old and new values of this field are both false or both true, + // then there is no change to the sink's `writer_identity`. + // + If the old value is false and the new value is true, then + // `writer_identity` is changed to a unique service account. + // + It is an error if the old value is true and the new value is + // set to false or defaulted to false. + bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Field mask that specifies the fields in `sink` that need + // an update. A sink field will be overwritten if, and only if, it is + // in the update mask. `name` and output only fields cannot be updated. + // + // An empty updateMask is temporarily treated as using the following mask + // for backwards compatibility purposes: + // destination,filter,includeChildren + // At some point in the future, behavior will be removed and specifying an + // empty updateMask will be an error. + // + // For a detailed `FieldMask` definition, see + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + // + // Example: `updateMask=filter`. + google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The parameters to `DeleteSink`. +message DeleteSinkRequest { + // Required. The full resource name of the sink to delete, including the parent + // resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // Example: `"projects/my-project-id/sinks/my-sink-id"`. + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; +} + +// Specifies a set of log entries that are not to be stored in +// Logging. If your GCP resource receives a large volume of logs, you can +// use exclusions to reduce your chargeable logs. Exclusions are +// processed after log sinks, so you can export log entries before they are +// excluded. Note that organization-level and folder-level exclusions don't +// apply to child resources, and that you can't exclude audit log entries. +message LogExclusion { + option (google.api.resource) = { + type: "logging.googleapis.com/LogExclusion" + pattern: "projects/{project}/exclusions/{exclusion}" + pattern: "organizations/{organization}/exclusions/{exclusion}" + pattern: "folders/{folder}/exclusions/{exclusion}" + pattern: "billingAccounts/{billing_account}/exclusions/{exclusion}" + }; + + // Required. A client-assigned identifier, such as `"load-balancer-exclusion"`. + // Identifiers are limited to 100 characters and can include only letters, + // digits, underscores, hyphens, and periods. First character has to be + // alphanumeric. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A description of this exclusion. + string description = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. An [advanced logs + // filter](https://cloud.google.com/logging/docs/view/advanced-queries) that + // matches the log entries to be excluded. By using the [sample + // function](https://cloud.google.com/logging/docs/view/advanced-queries#sample), + // you can exclude less than 100% of the matching log entries. + // For example, the following query matches 99% of low-severity log + // entries from Google Cloud Storage buckets: + // + // `"resource.type=gcs_bucket severity=ERROR" + // + // The maximum length of the filter is 20000 characters. + string filter = 3 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The metric descriptor associated with the logs-based metric. + // If unspecified, it uses a default metric descriptor with a DELTA metric + // kind, INT64 value type, with no labels and a unit of "1". Such a metric + // counts the number of log entries matching the `filter` expression. + // + // The `name`, `type`, and `description` fields in the `metric_descriptor` + // are output only, and is constructed using the `name` and `description` + // field in the LogMetric. + // + // To create a logs-based metric that records a distribution of log values, a + // DELTA metric kind with a DISTRIBUTION value type must be used along with + // a `value_extractor` expression in the LogMetric. + // + // Each label in the metric descriptor must have a matching label + // name as the key and an extractor expression as the value in the + // `label_extractors` map. + // + // The `metric_kind` and `value_type` fields in the `metric_descriptor` cannot + // be updated once initially configured. New labels can be added in the + // `metric_descriptor`, but existing labels cannot be modified except for + // their description. + google.api.MetricDescriptor metric_descriptor = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A `value_extractor` is required when using a distribution + // logs-based metric to extract the values to record from a log entry. + // Two functions are supported for value extraction: `EXTRACT(field)` or + // `REGEXP_EXTRACT(field, regex)`. The argument are: + // 1. field: The name of the log entry field from which the value is to be + // extracted. + // 2. regex: A regular expression using the Google RE2 syntax + // (https://github.com/google/re2/wiki/Syntax) with a single capture + // group to extract data from the specified log entry field. The value + // of the field is converted to a string before applying the regex. + // It is an error to specify a regex that does not include exactly one + // capture group. + // + // The result of the extraction must be convertible to a double type, as the + // distribution always records double values. If either the extraction or + // the conversion to double fails, then those values are not recorded in the + // distribution. + // + // Example: `REGEXP_EXTRACT(jsonPayload.request, ".*quantity=(\d+).*")` + string value_extractor = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A map from a label key string to an extractor expression which is + // used to extract data from a log entry field and assign as the label value. + // Each label key specified in the LabelDescriptor must have an associated + // extractor expression in this map. The syntax of the extractor expression + // is the same as for the `value_extractor` field. + // + // The extracted value is converted to the type defined in the label + // descriptor. If the either the extraction or the type conversion fails, + // the label will have a default value. The default value for a string + // label is an empty string, for an integer label its 0, and for a boolean + // label its `false`. + // + // Note that there are upper bounds on the maximum number of labels and the + // number of active time series that are allowed in a project. + map label_extractors = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The `bucket_options` are required when the logs-based metric is + // using a DISTRIBUTION value type and it describes the bucket boundaries + // used to create a histogram of the extracted values. + google.api.Distribution.BucketOptions bucket_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The creation timestamp of the metric. + // + // This field may not be present for older metrics. + google.protobuf.Timestamp create_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the metric. + // + // This field may not be present for older metrics. + google.protobuf.Timestamp update_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Deprecated. The API version that created or updated this metric. + // The v2 format is used by default and cannot be changed. + ApiVersion version = 4 [deprecated = true]; +} + +// The parameters to ListLogMetrics. +message ListLogMetricsRequest { + // Required. The name of the project containing the metrics: + // + // "projects/[PROJECT_ID]" + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from ListLogMetrics. +message ListLogMetricsResponse { + // A list of logs-based metrics. + repeated LogMetric metrics = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to GetLogMetric. +message GetLogMetricRequest { + // Required. The resource name of the desired metric: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + string metric_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogMetric" + } + ]; +} + +// The parameters to CreateLogMetric. +message CreateLogMetricRequest { + // Required. The resource name of the project in which to create the metric: + // + // "projects/[PROJECT_ID]" + // + // The new metric must be provided in the request. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogMetric" + } + ]; + + // Required. The new logs-based metric, which must not have an identifier that + // already exists. + LogMetric metric = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to UpdateLogMetric. +message UpdateLogMetricRequest { + // Required. The resource name of the metric to update: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + // + // The updated metric must be provided in the request and it's + // `name` field must be the same as `[METRIC_ID]` If the metric + // does not exist in `[PROJECT_ID]`, then a new metric is created. + string metric_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogMetric" + } + ]; + + // Required. The updated metric. + LogMetric metric = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to DeleteLogMetric. +message DeleteLogMetricRequest { + // Required. The resource name of the metric to delete: + // + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + string metric_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogMetric" + } + ]; +} diff --git a/google/cloud/logging_v2/py.typed b/google/cloud/logging_v2/py.typed new file mode 100644 index 000000000..6c7420d0d --- /dev/null +++ b/google/cloud/logging_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-logging package uses inline types. diff --git a/google/cloud/logging_v2/services/__init__.py b/google/cloud/logging_v2/services/__init__.py new file mode 100644 index 000000000..42ffdf2bc --- /dev/null +++ b/google/cloud/logging_v2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/logging_v2/services/config_service_v2/__init__.py b/google/cloud/logging_v2/services/config_service_v2/__init__.py new file mode 100644 index 000000000..4ab8f4d40 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import ConfigServiceV2Client +from .async_client import ConfigServiceV2AsyncClient + +__all__ = ( + "ConfigServiceV2Client", + "ConfigServiceV2AsyncClient", +) diff --git a/google/cloud/logging_v2/services/config_service_v2/async_client.py b/google/cloud/logging_v2/services/config_service_v2/async_client.py new file mode 100644 index 000000000..d025f5916 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/async_client.py @@ -0,0 +1,1531 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.logging_v2.services.config_service_v2 import pagers +from google.cloud.logging_v2.types import logging_config +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import ConfigServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ConfigServiceV2GrpcAsyncIOTransport +from .client import ConfigServiceV2Client + + +class ConfigServiceV2AsyncClient: + """Service for configuring sinks used to route log entries.""" + + _client: ConfigServiceV2Client + + DEFAULT_ENDPOINT = ConfigServiceV2Client.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ConfigServiceV2Client.DEFAULT_MTLS_ENDPOINT + + cmek_settings_path = staticmethod(ConfigServiceV2Client.cmek_settings_path) + parse_cmek_settings_path = staticmethod( + ConfigServiceV2Client.parse_cmek_settings_path + ) + log_bucket_path = staticmethod(ConfigServiceV2Client.log_bucket_path) + parse_log_bucket_path = staticmethod(ConfigServiceV2Client.parse_log_bucket_path) + log_exclusion_path = staticmethod(ConfigServiceV2Client.log_exclusion_path) + parse_log_exclusion_path = staticmethod( + ConfigServiceV2Client.parse_log_exclusion_path + ) + log_sink_path = staticmethod(ConfigServiceV2Client.log_sink_path) + parse_log_sink_path = staticmethod(ConfigServiceV2Client.parse_log_sink_path) + + common_billing_account_path = staticmethod( + ConfigServiceV2Client.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ConfigServiceV2Client.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(ConfigServiceV2Client.common_folder_path) + parse_common_folder_path = staticmethod( + ConfigServiceV2Client.parse_common_folder_path + ) + + common_organization_path = staticmethod( + ConfigServiceV2Client.common_organization_path + ) + parse_common_organization_path = staticmethod( + ConfigServiceV2Client.parse_common_organization_path + ) + + common_project_path = staticmethod(ConfigServiceV2Client.common_project_path) + parse_common_project_path = staticmethod( + ConfigServiceV2Client.parse_common_project_path + ) + + common_location_path = staticmethod(ConfigServiceV2Client.common_location_path) + parse_common_location_path = staticmethod( + ConfigServiceV2Client.parse_common_location_path + ) + + from_service_account_file = ConfigServiceV2Client.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ConfigServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + ConfigServiceV2Transport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(ConfigServiceV2Client).get_transport_class, type(ConfigServiceV2Client) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ConfigServiceV2Transport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the config service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ConfigServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ConfigServiceV2Client( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_buckets( + self, + request: logging_config.ListBucketsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBucketsAsyncPager: + r"""Lists buckets (Beta). + + Args: + request (:class:`~.logging_config.ListBucketsRequest`): + The request object. The parameters to `ListBuckets` + (Beta). + parent (:class:`str`): + Required. The parent resource whose buckets are to be + listed: + + :: + + "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + + Note: The locations portion of the resource must be + specified, but supplying the character ``-`` in place of + [LOCATION_ID] will return all buckets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBucketsAsyncPager: + The response from ListBuckets (Beta). + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.ListBucketsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_buckets, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBucketsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_bucket( + self, + request: logging_config.GetBucketRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogBucket: + r"""Gets a bucket (Beta). + + Args: + request (:class:`~.logging_config.GetBucketRequest`): + The request object. The parameters to `GetBucket` + (Beta). + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogBucket: + Describes a repository of logs + (Beta). + + """ + # Create or coerce a protobuf request object. + + request = logging_config.GetBucketRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_bucket, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_bucket( + self, + request: logging_config.UpdateBucketRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogBucket: + r"""Updates a bucket. This method replaces the following fields in + the existing bucket with values from the new bucket: + ``retention_period`` + + If the retention period is decreased and the bucket is locked, + FAILED_PRECONDITION will be returned. + + If the bucket has a LifecycleState of DELETE_REQUESTED, + FAILED_PRECONDITION will be returned. + + A buckets region may not be modified after it is created. This + method is in Beta. + + Args: + request (:class:`~.logging_config.UpdateBucketRequest`): + The request object. The parameters to `UpdateBucket` + (Beta). + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogBucket: + Describes a repository of logs + (Beta). + + """ + # Create or coerce a protobuf request object. + + request = logging_config.UpdateBucketRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_bucket, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_sinks( + self, + request: logging_config.ListSinksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSinksAsyncPager: + r"""Lists sinks. + + Args: + request (:class:`~.logging_config.ListSinksRequest`): + The request object. The parameters to `ListSinks`. + parent (:class:`str`): + Required. The parent resource whose sinks are to be + listed: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListSinksAsyncPager: + Result returned from ``ListSinks``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.ListSinksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_sinks, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSinksAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_sink( + self, + request: logging_config.GetSinkRequest = None, + *, + sink_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Gets a sink. + + Args: + request (:class:`~.logging_config.GetSinkRequest`): + The request object. The parameters to `GetSink`. + sink_name (:class:`str`): + Required. The resource name of the sink: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.GetSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_sink( + self, + request: logging_config.CreateSinkRequest = None, + *, + parent: str = None, + sink: logging_config.LogSink = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Creates a sink that exports specified log entries to a + destination. The export of newly-ingested log entries begins + immediately, unless the sink's ``writer_identity`` is not + permitted to write to the destination. A sink can export log + entries only from the resource owning the sink. + + Args: + request (:class:`~.logging_config.CreateSinkRequest`): + The request object. The parameters to `CreateSink`. + parent (:class:`str`): + Required. The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Examples: ``"projects/my-logging-project"``, + ``"organizations/123456789"``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + sink (:class:`~.logging_config.LogSink`): + Required. The new sink, whose ``name`` parameter is a + sink identifier that is not already in use. + This corresponds to the ``sink`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, sink]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.CreateSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if sink is not None: + request.sink = sink + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_sink, + default_timeout=120.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_sink( + self, + request: logging_config.UpdateSinkRequest = None, + *, + sink_name: str = None, + sink: logging_config.LogSink = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Updates a sink. This method replaces the following fields in the + existing sink with values from the new sink: ``destination``, + and ``filter``. + + The updated sink might also have a new ``writer_identity``; see + the ``unique_writer_identity`` field. + + Args: + request (:class:`~.logging_config.UpdateSinkRequest`): + The request object. The parameters to `UpdateSink`. + sink_name (:class:`str`): + Required. The full resource name of the sink to update, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + sink (:class:`~.logging_config.LogSink`): + Required. The updated sink, whose name is the same + identifier that appears as part of ``sink_name``. + This corresponds to the ``sink`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Optional. Field mask that specifies the fields in + ``sink`` that need an update. A sink field will be + overwritten if, and only if, it is in the update mask. + ``name`` and output only fields cannot be updated. + + An empty updateMask is temporarily treated as using the + following mask for backwards compatibility purposes: + destination,filter,includeChildren At some point in the + future, behavior will be removed and specifying an empty + updateMask will be an error. + + For a detailed ``FieldMask`` definition, see + https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + + Example: ``updateMask=filter``. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name, sink, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.UpdateSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + if sink is not None: + request.sink = sink + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_sink( + self, + request: logging_config.DeleteSinkRequest = None, + *, + sink_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a sink. If the sink has a unique ``writer_identity``, + then that service account is also deleted. + + Args: + request (:class:`~.logging_config.DeleteSinkRequest`): + The request object. The parameters to `DeleteSink`. + sink_name (:class:`str`): + Required. The full resource name of the sink to delete, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.DeleteSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_exclusions( + self, + request: logging_config.ListExclusionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExclusionsAsyncPager: + r"""Lists all the exclusions in a parent resource. + + Args: + request (:class:`~.logging_config.ListExclusionsRequest`): + The request object. The parameters to `ListExclusions`. + parent (:class:`str`): + Required. The parent resource whose exclusions are to be + listed. + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListExclusionsAsyncPager: + Result returned from ``ListExclusions``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.ListExclusionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_exclusions, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExclusionsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_exclusion( + self, + request: logging_config.GetExclusionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Gets the description of an exclusion. + + Args: + request (:class:`~.logging_config.GetExclusionRequest`): + The request object. The parameters to `GetExclusion`. + name (:class:`str`): + Required. The resource name of an existing exclusion: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.GetExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_exclusion, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_exclusion( + self, + request: logging_config.CreateExclusionRequest = None, + *, + parent: str = None, + exclusion: logging_config.LogExclusion = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Creates a new exclusion in a specified parent + resource. Only log entries belonging to that resource + can be excluded. You can have up to 10 exclusions in a + resource. + + Args: + request (:class:`~.logging_config.CreateExclusionRequest`): + The request object. The parameters to `CreateExclusion`. + parent (:class:`str`): + Required. The parent resource in which to create the + exclusion: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Examples: ``"projects/my-logging-project"``, + ``"organizations/123456789"``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exclusion (:class:`~.logging_config.LogExclusion`): + Required. The new exclusion, whose ``name`` parameter is + an exclusion name that is not already used in the parent + resource. + This corresponds to the ``exclusion`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, exclusion]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.CreateExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if exclusion is not None: + request.exclusion = exclusion + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_exclusion, + default_timeout=120.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_exclusion( + self, + request: logging_config.UpdateExclusionRequest = None, + *, + name: str = None, + exclusion: logging_config.LogExclusion = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Changes one or more properties of an existing + exclusion. + + Args: + request (:class:`~.logging_config.UpdateExclusionRequest`): + The request object. The parameters to `UpdateExclusion`. + name (:class:`str`): + Required. The resource name of the exclusion to update: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exclusion (:class:`~.logging_config.LogExclusion`): + Required. New values for the existing exclusion. Only + the fields specified in ``update_mask`` are relevant. + This corresponds to the ``exclusion`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. A non-empty list of fields to change in the + existing exclusion. New values for the fields are taken + from the corresponding fields in the + [LogExclusion][google.logging.v2.LogExclusion] included + in this request. Fields not mentioned in ``update_mask`` + are not changed and are ignored in the request. + + For example, to change the filter and description of an + exclusion, specify an ``update_mask`` of + ``"filter,description"``. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, exclusion, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.UpdateExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if exclusion is not None: + request.exclusion = exclusion + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_exclusion, + default_timeout=120.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_exclusion( + self, + request: logging_config.DeleteExclusionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an exclusion. + + Args: + request (:class:`~.logging_config.DeleteExclusionRequest`): + The request object. The parameters to `DeleteExclusion`. + name (:class:`str`): + Required. The resource name of an existing exclusion to + delete: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_config.DeleteExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_exclusion, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_cmek_settings( + self, + request: logging_config.GetCmekSettingsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.CmekSettings: + r"""Gets the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Args: + request (:class:`~.logging_config.GetCmekSettingsRequest`): + The request object. The parameters to + [GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings]. + See [Enabling CMEK for Logs + Router](https://cloud.google.com/logging/docs/routing/managed- + encryption) for more information. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.CmekSettings: + Describes the customer-managed encryption key (CMEK) + settings associated with a project, folder, + organization, billing account, or flexible resource. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP + organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + """ + # Create or coerce a protobuf request object. + + request = logging_config.GetCmekSettingsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cmek_settings, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_cmek_settings( + self, + request: logging_config.UpdateCmekSettingsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.CmekSettings: + r"""Updates the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + will fail if 1) ``kms_key_name`` is invalid, or 2) the + associated service account does not have the required + ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` role assigned for + the key, or 3) access to the key is disabled. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Args: + request (:class:`~.logging_config.UpdateCmekSettingsRequest`): + The request object. The parameters to + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings]. + See [Enabling CMEK for Logs + Router](https://cloud.google.com/logging/docs/routing/managed- + encryption) for more information. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.CmekSettings: + Describes the customer-managed encryption key (CMEK) + settings associated with a project, folder, + organization, billing account, or flexible resource. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP + organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + """ + # Create or coerce a protobuf request object. + + request = logging_config.UpdateCmekSettingsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cmek_settings, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ConfigServiceV2AsyncClient",) diff --git a/google/cloud/logging_v2/services/config_service_v2/client.py b/google/cloud/logging_v2/services/config_service_v2/client.py new file mode 100644 index 000000000..ea9ee605a --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/client.py @@ -0,0 +1,1692 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.logging_v2.services.config_service_v2 import pagers +from google.cloud.logging_v2.types import logging_config +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import ConfigServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc import ConfigServiceV2GrpcTransport +from .transports.grpc_asyncio import ConfigServiceV2GrpcAsyncIOTransport + + +class ConfigServiceV2ClientMeta(type): + """Metaclass for the ConfigServiceV2 client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ConfigServiceV2Transport]] + _transport_registry["grpc"] = ConfigServiceV2GrpcTransport + _transport_registry["grpc_asyncio"] = ConfigServiceV2GrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[ConfigServiceV2Transport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ConfigServiceV2Client(metaclass=ConfigServiceV2ClientMeta): + """Service for configuring sinks used to route log entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "logging.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ConfigServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + ConfigServiceV2Transport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def cmek_settings_path(project: str,) -> str: + """Return a fully-qualified cmek_settings string.""" + return "projects/{project}/cmekSettings".format(project=project,) + + @staticmethod + def parse_cmek_settings_path(path: str) -> Dict[str, str]: + """Parse a cmek_settings path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/cmekSettings$", path) + return m.groupdict() if m else {} + + @staticmethod + def log_bucket_path(project: str, location: str, bucket: str,) -> str: + """Return a fully-qualified log_bucket string.""" + return "projects/{project}/locations/{location}/buckets/{bucket}".format( + project=project, location=location, bucket=bucket, + ) + + @staticmethod + def parse_log_bucket_path(path: str) -> Dict[str, str]: + """Parse a log_bucket path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/buckets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def log_exclusion_path(project: str, exclusion: str,) -> str: + """Return a fully-qualified log_exclusion string.""" + return "projects/{project}/exclusions/{exclusion}".format( + project=project, exclusion=exclusion, + ) + + @staticmethod + def parse_log_exclusion_path(path: str) -> Dict[str, str]: + """Parse a log_exclusion path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/exclusions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def log_sink_path(project: str, sink: str,) -> str: + """Return a fully-qualified log_sink string.""" + return "projects/{project}/sinks/{sink}".format(project=project, sink=sink,) + + @staticmethod + def parse_log_sink_path(path: str) -> Dict[str, str]: + """Parse a log_sink path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/sinks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, ConfigServiceV2Transport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the config service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ConfigServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ConfigServiceV2Transport): + # transport is a ConfigServiceV2Transport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_buckets( + self, + request: logging_config.ListBucketsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBucketsPager: + r"""Lists buckets (Beta). + + Args: + request (:class:`~.logging_config.ListBucketsRequest`): + The request object. The parameters to `ListBuckets` + (Beta). + parent (:class:`str`): + Required. The parent resource whose buckets are to be + listed: + + :: + + "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + + Note: The locations portion of the resource must be + specified, but supplying the character ``-`` in place of + [LOCATION_ID] will return all buckets. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListBucketsPager: + The response from ListBuckets (Beta). + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.ListBucketsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.ListBucketsRequest): + request = logging_config.ListBucketsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_buckets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBucketsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_bucket( + self, + request: logging_config.GetBucketRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogBucket: + r"""Gets a bucket (Beta). + + Args: + request (:class:`~.logging_config.GetBucketRequest`): + The request object. The parameters to `GetBucket` + (Beta). + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogBucket: + Describes a repository of logs + (Beta). + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.GetBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.GetBucketRequest): + request = logging_config.GetBucketRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_bucket] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_bucket( + self, + request: logging_config.UpdateBucketRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogBucket: + r"""Updates a bucket. This method replaces the following fields in + the existing bucket with values from the new bucket: + ``retention_period`` + + If the retention period is decreased and the bucket is locked, + FAILED_PRECONDITION will be returned. + + If the bucket has a LifecycleState of DELETE_REQUESTED, + FAILED_PRECONDITION will be returned. + + A buckets region may not be modified after it is created. This + method is in Beta. + + Args: + request (:class:`~.logging_config.UpdateBucketRequest`): + The request object. The parameters to `UpdateBucket` + (Beta). + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogBucket: + Describes a repository of logs + (Beta). + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.UpdateBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.UpdateBucketRequest): + request = logging_config.UpdateBucketRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_bucket] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_sinks( + self, + request: logging_config.ListSinksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSinksPager: + r"""Lists sinks. + + Args: + request (:class:`~.logging_config.ListSinksRequest`): + The request object. The parameters to `ListSinks`. + parent (:class:`str`): + Required. The parent resource whose sinks are to be + listed: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListSinksPager: + Result returned from ``ListSinks``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.ListSinksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.ListSinksRequest): + request = logging_config.ListSinksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_sinks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSinksPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_sink( + self, + request: logging_config.GetSinkRequest = None, + *, + sink_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Gets a sink. + + Args: + request (:class:`~.logging_config.GetSinkRequest`): + The request object. The parameters to `GetSink`. + sink_name (:class:`str`): + Required. The resource name of the sink: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.GetSinkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.GetSinkRequest): + request = logging_config.GetSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_sink] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_sink( + self, + request: logging_config.CreateSinkRequest = None, + *, + parent: str = None, + sink: logging_config.LogSink = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Creates a sink that exports specified log entries to a + destination. The export of newly-ingested log entries begins + immediately, unless the sink's ``writer_identity`` is not + permitted to write to the destination. A sink can export log + entries only from the resource owning the sink. + + Args: + request (:class:`~.logging_config.CreateSinkRequest`): + The request object. The parameters to `CreateSink`. + parent (:class:`str`): + Required. The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Examples: ``"projects/my-logging-project"``, + ``"organizations/123456789"``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + sink (:class:`~.logging_config.LogSink`): + Required. The new sink, whose ``name`` parameter is a + sink identifier that is not already in use. + This corresponds to the ``sink`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, sink]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.CreateSinkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.CreateSinkRequest): + request = logging_config.CreateSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if sink is not None: + request.sink = sink + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_sink] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_sink( + self, + request: logging_config.UpdateSinkRequest = None, + *, + sink_name: str = None, + sink: logging_config.LogSink = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogSink: + r"""Updates a sink. This method replaces the following fields in the + existing sink with values from the new sink: ``destination``, + and ``filter``. + + The updated sink might also have a new ``writer_identity``; see + the ``unique_writer_identity`` field. + + Args: + request (:class:`~.logging_config.UpdateSinkRequest`): + The request object. The parameters to `UpdateSink`. + sink_name (:class:`str`): + Required. The full resource name of the sink to update, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + sink (:class:`~.logging_config.LogSink`): + Required. The updated sink, whose name is the same + identifier that appears as part of ``sink_name``. + This corresponds to the ``sink`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Optional. Field mask that specifies the fields in + ``sink`` that need an update. A sink field will be + overwritten if, and only if, it is in the update mask. + ``name`` and output only fields cannot be updated. + + An empty updateMask is temporarily treated as using the + following mask for backwards compatibility purposes: + destination,filter,includeChildren At some point in the + future, behavior will be removed and specifying an empty + updateMask will be an error. + + For a detailed ``FieldMask`` definition, see + https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + + Example: ``updateMask=filter``. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogSink: + Describes a sink used to export log + entries to one of the following + destinations in any project: a Cloud + Storage bucket, a BigQuery dataset, or a + Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. + The sink must be created within a + project, organization, billing account, + or folder. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name, sink, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.UpdateSinkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.UpdateSinkRequest): + request = logging_config.UpdateSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + if sink is not None: + request.sink = sink + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_sink] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_sink( + self, + request: logging_config.DeleteSinkRequest = None, + *, + sink_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a sink. If the sink has a unique ``writer_identity``, + then that service account is also deleted. + + Args: + request (:class:`~.logging_config.DeleteSinkRequest`): + The request object. The parameters to `DeleteSink`. + sink_name (:class:`str`): + Required. The full resource name of the sink to delete, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + This corresponds to the ``sink_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([sink_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.DeleteSinkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.DeleteSinkRequest): + request = logging_config.DeleteSinkRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if sink_name is not None: + request.sink_name = sink_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_sink] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("sink_name", request.sink_name),) + ), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_exclusions( + self, + request: logging_config.ListExclusionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExclusionsPager: + r"""Lists all the exclusions in a parent resource. + + Args: + request (:class:`~.logging_config.ListExclusionsRequest`): + The request object. The parameters to `ListExclusions`. + parent (:class:`str`): + Required. The parent resource whose exclusions are to be + listed. + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListExclusionsPager: + Result returned from ``ListExclusions``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.ListExclusionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.ListExclusionsRequest): + request = logging_config.ListExclusionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_exclusions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExclusionsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_exclusion( + self, + request: logging_config.GetExclusionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Gets the description of an exclusion. + + Args: + request (:class:`~.logging_config.GetExclusionRequest`): + The request object. The parameters to `GetExclusion`. + name (:class:`str`): + Required. The resource name of an existing exclusion: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.GetExclusionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.GetExclusionRequest): + request = logging_config.GetExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_exclusion] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_exclusion( + self, + request: logging_config.CreateExclusionRequest = None, + *, + parent: str = None, + exclusion: logging_config.LogExclusion = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Creates a new exclusion in a specified parent + resource. Only log entries belonging to that resource + can be excluded. You can have up to 10 exclusions in a + resource. + + Args: + request (:class:`~.logging_config.CreateExclusionRequest`): + The request object. The parameters to `CreateExclusion`. + parent (:class:`str`): + Required. The parent resource in which to create the + exclusion: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Examples: ``"projects/my-logging-project"``, + ``"organizations/123456789"``. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exclusion (:class:`~.logging_config.LogExclusion`): + Required. The new exclusion, whose ``name`` parameter is + an exclusion name that is not already used in the parent + resource. + This corresponds to the ``exclusion`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, exclusion]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.CreateExclusionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.CreateExclusionRequest): + request = logging_config.CreateExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if exclusion is not None: + request.exclusion = exclusion + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_exclusion] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_exclusion( + self, + request: logging_config.UpdateExclusionRequest = None, + *, + name: str = None, + exclusion: logging_config.LogExclusion = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.LogExclusion: + r"""Changes one or more properties of an existing + exclusion. + + Args: + request (:class:`~.logging_config.UpdateExclusionRequest`): + The request object. The parameters to `UpdateExclusion`. + name (:class:`str`): + Required. The resource name of the exclusion to update: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + exclusion (:class:`~.logging_config.LogExclusion`): + Required. New values for the existing exclusion. Only + the fields specified in ``update_mask`` are relevant. + This corresponds to the ``exclusion`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. A non-empty list of fields to change in the + existing exclusion. New values for the fields are taken + from the corresponding fields in the + [LogExclusion][google.logging.v2.LogExclusion] included + in this request. Fields not mentioned in ``update_mask`` + are not changed and are ignored in the request. + + For example, to change the filter and description of an + exclusion, specify an ``update_mask`` of + ``"filter,description"``. + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.LogExclusion: + Specifies a set of log entries that + are not to be stored in Logging. If your + GCP resource receives a large volume of + logs, you can use exclusions to reduce + your chargeable logs. Exclusions are + processed after log sinks, so you can + export log entries before they are + excluded. Note that organization-level + and folder-level exclusions don't apply + to child resources, and that you can't + exclude audit log entries. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, exclusion, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.UpdateExclusionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.UpdateExclusionRequest): + request = logging_config.UpdateExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if exclusion is not None: + request.exclusion = exclusion + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_exclusion] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_exclusion( + self, + request: logging_config.DeleteExclusionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an exclusion. + + Args: + request (:class:`~.logging_config.DeleteExclusionRequest`): + The request object. The parameters to `DeleteExclusion`. + name (:class:`str`): + Required. The resource name of an existing exclusion to + delete: + + :: + + "projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]" + "organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]" + "folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]" + + Example: + ``"projects/my-project-id/exclusions/my-exclusion-id"``. + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.DeleteExclusionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.DeleteExclusionRequest): + request = logging_config.DeleteExclusionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_exclusion] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_cmek_settings( + self, + request: logging_config.GetCmekSettingsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.CmekSettings: + r"""Gets the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Args: + request (:class:`~.logging_config.GetCmekSettingsRequest`): + The request object. The parameters to + [GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings]. + See [Enabling CMEK for Logs + Router](https://cloud.google.com/logging/docs/routing/managed- + encryption) for more information. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.CmekSettings: + Describes the customer-managed encryption key (CMEK) + settings associated with a project, folder, + organization, billing account, or flexible resource. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP + organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.GetCmekSettingsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.GetCmekSettingsRequest): + request = logging_config.GetCmekSettingsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cmek_settings] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_cmek_settings( + self, + request: logging_config.UpdateCmekSettingsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_config.CmekSettings: + r"""Updates the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + will fail if 1) ``kms_key_name`` is invalid, or 2) the + associated service account does not have the required + ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` role assigned for + the key, or 3) access to the key is disabled. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Args: + request (:class:`~.logging_config.UpdateCmekSettingsRequest`): + The request object. The parameters to + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings]. + See [Enabling CMEK for Logs + Router](https://cloud.google.com/logging/docs/routing/managed- + encryption) for more information. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_config.CmekSettings: + Describes the customer-managed encryption key (CMEK) + settings associated with a project, folder, + organization, billing account, or flexible resource. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP + organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a logging_config.UpdateCmekSettingsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_config.UpdateCmekSettingsRequest): + request = logging_config.UpdateCmekSettingsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cmek_settings] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ConfigServiceV2Client",) diff --git a/google/cloud/logging_v2/services/config_service_v2/pagers.py b/google/cloud/logging_v2/services/config_service_v2/pagers.py new file mode 100644 index 000000000..173780b5e --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/pagers.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.logging_v2.types import logging_config + + +class ListBucketsPager: + """A pager for iterating through ``list_buckets`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListBucketsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``buckets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBuckets`` requests and continue to iterate + through the ``buckets`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListBucketsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging_config.ListBucketsResponse], + request: logging_config.ListBucketsRequest, + response: logging_config.ListBucketsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListBucketsRequest`): + The initial request object. + response (:class:`~.logging_config.ListBucketsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListBucketsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging_config.ListBucketsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[logging_config.LogBucket]: + for page in self.pages: + yield from page.buckets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBucketsAsyncPager: + """A pager for iterating through ``list_buckets`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListBucketsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``buckets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBuckets`` requests and continue to iterate + through the ``buckets`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListBucketsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging_config.ListBucketsResponse]], + request: logging_config.ListBucketsRequest, + response: logging_config.ListBucketsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListBucketsRequest`): + The initial request object. + response (:class:`~.logging_config.ListBucketsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListBucketsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging_config.ListBucketsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[logging_config.LogBucket]: + async def async_generator(): + async for page in self.pages: + for response in page.buckets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSinksPager: + """A pager for iterating through ``list_sinks`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListSinksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``sinks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSinks`` requests and continue to iterate + through the ``sinks`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListSinksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging_config.ListSinksResponse], + request: logging_config.ListSinksRequest, + response: logging_config.ListSinksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListSinksRequest`): + The initial request object. + response (:class:`~.logging_config.ListSinksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListSinksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging_config.ListSinksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[logging_config.LogSink]: + for page in self.pages: + yield from page.sinks + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSinksAsyncPager: + """A pager for iterating through ``list_sinks`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListSinksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``sinks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSinks`` requests and continue to iterate + through the ``sinks`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListSinksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging_config.ListSinksResponse]], + request: logging_config.ListSinksRequest, + response: logging_config.ListSinksResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListSinksRequest`): + The initial request object. + response (:class:`~.logging_config.ListSinksResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListSinksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging_config.ListSinksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[logging_config.LogSink]: + async def async_generator(): + async for page in self.pages: + for response in page.sinks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExclusionsPager: + """A pager for iterating through ``list_exclusions`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListExclusionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``exclusions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExclusions`` requests and continue to iterate + through the ``exclusions`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListExclusionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging_config.ListExclusionsResponse], + request: logging_config.ListExclusionsRequest, + response: logging_config.ListExclusionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListExclusionsRequest`): + The initial request object. + response (:class:`~.logging_config.ListExclusionsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListExclusionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging_config.ListExclusionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[logging_config.LogExclusion]: + for page in self.pages: + yield from page.exclusions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExclusionsAsyncPager: + """A pager for iterating through ``list_exclusions`` requests. + + This class thinly wraps an initial + :class:`~.logging_config.ListExclusionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``exclusions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExclusions`` requests and continue to iterate + through the ``exclusions`` field on the + corresponding responses. + + All the usual :class:`~.logging_config.ListExclusionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging_config.ListExclusionsResponse]], + request: logging_config.ListExclusionsRequest, + response: logging_config.ListExclusionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_config.ListExclusionsRequest`): + The initial request object. + response (:class:`~.logging_config.ListExclusionsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_config.ListExclusionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging_config.ListExclusionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[logging_config.LogExclusion]: + async def async_generator(): + async for page in self.pages: + for response in page.exclusions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/logging_v2/services/config_service_v2/transports/__init__.py b/google/cloud/logging_v2/services/config_service_v2/transports/__init__.py new file mode 100644 index 000000000..c4ae13076 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ConfigServiceV2Transport +from .grpc import ConfigServiceV2GrpcTransport +from .grpc_asyncio import ConfigServiceV2GrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ConfigServiceV2Transport]] +_transport_registry["grpc"] = ConfigServiceV2GrpcTransport +_transport_registry["grpc_asyncio"] = ConfigServiceV2GrpcAsyncIOTransport + + +__all__ = ( + "ConfigServiceV2Transport", + "ConfigServiceV2GrpcTransport", + "ConfigServiceV2GrpcAsyncIOTransport", +) diff --git a/google/cloud/logging_v2/services/config_service_v2/transports/base.py b/google/cloud/logging_v2/services/config_service_v2/transports/base.py new file mode 100644 index 000000000..a0393aa98 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/transports/base.py @@ -0,0 +1,405 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.logging_v2.types import logging_config +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ConfigServiceV2Transport(abc.ABC): + """Abstract transport class for ConfigServiceV2.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_buckets: gapic_v1.method.wrap_method( + self.list_buckets, default_timeout=None, client_info=client_info, + ), + self.get_bucket: gapic_v1.method.wrap_method( + self.get_bucket, default_timeout=None, client_info=client_info, + ), + self.update_bucket: gapic_v1.method.wrap_method( + self.update_bucket, default_timeout=None, client_info=client_info, + ), + self.list_sinks: gapic_v1.method.wrap_method( + self.list_sinks, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_sink: gapic_v1.method.wrap_method( + self.get_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_sink: gapic_v1.method.wrap_method( + self.create_sink, default_timeout=120.0, client_info=client_info, + ), + self.update_sink: gapic_v1.method.wrap_method( + self.update_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_sink: gapic_v1.method.wrap_method( + self.delete_sink, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_exclusions: gapic_v1.method.wrap_method( + self.list_exclusions, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_exclusion: gapic_v1.method.wrap_method( + self.get_exclusion, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_exclusion: gapic_v1.method.wrap_method( + self.create_exclusion, default_timeout=120.0, client_info=client_info, + ), + self.update_exclusion: gapic_v1.method.wrap_method( + self.update_exclusion, default_timeout=120.0, client_info=client_info, + ), + self.delete_exclusion: gapic_v1.method.wrap_method( + self.delete_exclusion, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_cmek_settings: gapic_v1.method.wrap_method( + self.get_cmek_settings, default_timeout=None, client_info=client_info, + ), + self.update_cmek_settings: gapic_v1.method.wrap_method( + self.update_cmek_settings, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def list_buckets( + self, + ) -> typing.Callable[ + [logging_config.ListBucketsRequest], + typing.Union[ + logging_config.ListBucketsResponse, + typing.Awaitable[logging_config.ListBucketsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_bucket( + self, + ) -> typing.Callable[ + [logging_config.GetBucketRequest], + typing.Union[ + logging_config.LogBucket, typing.Awaitable[logging_config.LogBucket] + ], + ]: + raise NotImplementedError() + + @property + def update_bucket( + self, + ) -> typing.Callable[ + [logging_config.UpdateBucketRequest], + typing.Union[ + logging_config.LogBucket, typing.Awaitable[logging_config.LogBucket] + ], + ]: + raise NotImplementedError() + + @property + def list_sinks( + self, + ) -> typing.Callable[ + [logging_config.ListSinksRequest], + typing.Union[ + logging_config.ListSinksResponse, + typing.Awaitable[logging_config.ListSinksResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_sink( + self, + ) -> typing.Callable[ + [logging_config.GetSinkRequest], + typing.Union[logging_config.LogSink, typing.Awaitable[logging_config.LogSink]], + ]: + raise NotImplementedError() + + @property + def create_sink( + self, + ) -> typing.Callable[ + [logging_config.CreateSinkRequest], + typing.Union[logging_config.LogSink, typing.Awaitable[logging_config.LogSink]], + ]: + raise NotImplementedError() + + @property + def update_sink( + self, + ) -> typing.Callable[ + [logging_config.UpdateSinkRequest], + typing.Union[logging_config.LogSink, typing.Awaitable[logging_config.LogSink]], + ]: + raise NotImplementedError() + + @property + def delete_sink( + self, + ) -> typing.Callable[ + [logging_config.DeleteSinkRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_exclusions( + self, + ) -> typing.Callable[ + [logging_config.ListExclusionsRequest], + typing.Union[ + logging_config.ListExclusionsResponse, + typing.Awaitable[logging_config.ListExclusionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_exclusion( + self, + ) -> typing.Callable[ + [logging_config.GetExclusionRequest], + typing.Union[ + logging_config.LogExclusion, typing.Awaitable[logging_config.LogExclusion] + ], + ]: + raise NotImplementedError() + + @property + def create_exclusion( + self, + ) -> typing.Callable[ + [logging_config.CreateExclusionRequest], + typing.Union[ + logging_config.LogExclusion, typing.Awaitable[logging_config.LogExclusion] + ], + ]: + raise NotImplementedError() + + @property + def update_exclusion( + self, + ) -> typing.Callable[ + [logging_config.UpdateExclusionRequest], + typing.Union[ + logging_config.LogExclusion, typing.Awaitable[logging_config.LogExclusion] + ], + ]: + raise NotImplementedError() + + @property + def delete_exclusion( + self, + ) -> typing.Callable[ + [logging_config.DeleteExclusionRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_cmek_settings( + self, + ) -> typing.Callable[ + [logging_config.GetCmekSettingsRequest], + typing.Union[ + logging_config.CmekSettings, typing.Awaitable[logging_config.CmekSettings] + ], + ]: + raise NotImplementedError() + + @property + def update_cmek_settings( + self, + ) -> typing.Callable[ + [logging_config.UpdateCmekSettingsRequest], + typing.Union[ + logging_config.CmekSettings, typing.Awaitable[logging_config.CmekSettings] + ], + ]: + raise NotImplementedError() + + +__all__ = ("ConfigServiceV2Transport",) diff --git a/google/cloud/logging_v2/services/config_service_v2/transports/grpc.py b/google/cloud/logging_v2/services/config_service_v2/transports/grpc.py new file mode 100644 index 000000000..5603beeb5 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/transports/grpc.py @@ -0,0 +1,675 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.logging_v2.types import logging_config +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ConfigServiceV2Transport, DEFAULT_CLIENT_INFO + + +class ConfigServiceV2GrpcTransport(ConfigServiceV2Transport): + """gRPC backend transport for ConfigServiceV2. + + Service for configuring sinks used to route log entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def list_buckets( + self, + ) -> Callable[ + [logging_config.ListBucketsRequest], logging_config.ListBucketsResponse + ]: + r"""Return a callable for the list buckets method over gRPC. + + Lists buckets (Beta). + + Returns: + Callable[[~.ListBucketsRequest], + ~.ListBucketsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_buckets" not in self._stubs: + self._stubs["list_buckets"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListBuckets", + request_serializer=logging_config.ListBucketsRequest.serialize, + response_deserializer=logging_config.ListBucketsResponse.deserialize, + ) + return self._stubs["list_buckets"] + + @property + def get_bucket( + self, + ) -> Callable[[logging_config.GetBucketRequest], logging_config.LogBucket]: + r"""Return a callable for the get bucket method over gRPC. + + Gets a bucket (Beta). + + Returns: + Callable[[~.GetBucketRequest], + ~.LogBucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_bucket" not in self._stubs: + self._stubs["get_bucket"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetBucket", + request_serializer=logging_config.GetBucketRequest.serialize, + response_deserializer=logging_config.LogBucket.deserialize, + ) + return self._stubs["get_bucket"] + + @property + def update_bucket( + self, + ) -> Callable[[logging_config.UpdateBucketRequest], logging_config.LogBucket]: + r"""Return a callable for the update bucket method over gRPC. + + Updates a bucket. This method replaces the following fields in + the existing bucket with values from the new bucket: + ``retention_period`` + + If the retention period is decreased and the bucket is locked, + FAILED_PRECONDITION will be returned. + + If the bucket has a LifecycleState of DELETE_REQUESTED, + FAILED_PRECONDITION will be returned. + + A buckets region may not be modified after it is created. This + method is in Beta. + + Returns: + Callable[[~.UpdateBucketRequest], + ~.LogBucket]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_bucket" not in self._stubs: + self._stubs["update_bucket"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateBucket", + request_serializer=logging_config.UpdateBucketRequest.serialize, + response_deserializer=logging_config.LogBucket.deserialize, + ) + return self._stubs["update_bucket"] + + @property + def list_sinks( + self, + ) -> Callable[[logging_config.ListSinksRequest], logging_config.ListSinksResponse]: + r"""Return a callable for the list sinks method over gRPC. + + Lists sinks. + + Returns: + Callable[[~.ListSinksRequest], + ~.ListSinksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sinks" not in self._stubs: + self._stubs["list_sinks"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListSinks", + request_serializer=logging_config.ListSinksRequest.serialize, + response_deserializer=logging_config.ListSinksResponse.deserialize, + ) + return self._stubs["list_sinks"] + + @property + def get_sink( + self, + ) -> Callable[[logging_config.GetSinkRequest], logging_config.LogSink]: + r"""Return a callable for the get sink method over gRPC. + + Gets a sink. + + Returns: + Callable[[~.GetSinkRequest], + ~.LogSink]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_sink" not in self._stubs: + self._stubs["get_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetSink", + request_serializer=logging_config.GetSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["get_sink"] + + @property + def create_sink( + self, + ) -> Callable[[logging_config.CreateSinkRequest], logging_config.LogSink]: + r"""Return a callable for the create sink method over gRPC. + + Creates a sink that exports specified log entries to a + destination. The export of newly-ingested log entries begins + immediately, unless the sink's ``writer_identity`` is not + permitted to write to the destination. A sink can export log + entries only from the resource owning the sink. + + Returns: + Callable[[~.CreateSinkRequest], + ~.LogSink]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_sink" not in self._stubs: + self._stubs["create_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/CreateSink", + request_serializer=logging_config.CreateSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["create_sink"] + + @property + def update_sink( + self, + ) -> Callable[[logging_config.UpdateSinkRequest], logging_config.LogSink]: + r"""Return a callable for the update sink method over gRPC. + + Updates a sink. This method replaces the following fields in the + existing sink with values from the new sink: ``destination``, + and ``filter``. + + The updated sink might also have a new ``writer_identity``; see + the ``unique_writer_identity`` field. + + Returns: + Callable[[~.UpdateSinkRequest], + ~.LogSink]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_sink" not in self._stubs: + self._stubs["update_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateSink", + request_serializer=logging_config.UpdateSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["update_sink"] + + @property + def delete_sink(self) -> Callable[[logging_config.DeleteSinkRequest], empty.Empty]: + r"""Return a callable for the delete sink method over gRPC. + + Deletes a sink. If the sink has a unique ``writer_identity``, + then that service account is also deleted. + + Returns: + Callable[[~.DeleteSinkRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_sink" not in self._stubs: + self._stubs["delete_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/DeleteSink", + request_serializer=logging_config.DeleteSinkRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_sink"] + + @property + def list_exclusions( + self, + ) -> Callable[ + [logging_config.ListExclusionsRequest], logging_config.ListExclusionsResponse + ]: + r"""Return a callable for the list exclusions method over gRPC. + + Lists all the exclusions in a parent resource. + + Returns: + Callable[[~.ListExclusionsRequest], + ~.ListExclusionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exclusions" not in self._stubs: + self._stubs["list_exclusions"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListExclusions", + request_serializer=logging_config.ListExclusionsRequest.serialize, + response_deserializer=logging_config.ListExclusionsResponse.deserialize, + ) + return self._stubs["list_exclusions"] + + @property + def get_exclusion( + self, + ) -> Callable[[logging_config.GetExclusionRequest], logging_config.LogExclusion]: + r"""Return a callable for the get exclusion method over gRPC. + + Gets the description of an exclusion. + + Returns: + Callable[[~.GetExclusionRequest], + ~.LogExclusion]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exclusion" not in self._stubs: + self._stubs["get_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetExclusion", + request_serializer=logging_config.GetExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["get_exclusion"] + + @property + def create_exclusion( + self, + ) -> Callable[[logging_config.CreateExclusionRequest], logging_config.LogExclusion]: + r"""Return a callable for the create exclusion method over gRPC. + + Creates a new exclusion in a specified parent + resource. Only log entries belonging to that resource + can be excluded. You can have up to 10 exclusions in a + resource. + + Returns: + Callable[[~.CreateExclusionRequest], + ~.LogExclusion]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exclusion" not in self._stubs: + self._stubs["create_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/CreateExclusion", + request_serializer=logging_config.CreateExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["create_exclusion"] + + @property + def update_exclusion( + self, + ) -> Callable[[logging_config.UpdateExclusionRequest], logging_config.LogExclusion]: + r"""Return a callable for the update exclusion method over gRPC. + + Changes one or more properties of an existing + exclusion. + + Returns: + Callable[[~.UpdateExclusionRequest], + ~.LogExclusion]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_exclusion" not in self._stubs: + self._stubs["update_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateExclusion", + request_serializer=logging_config.UpdateExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["update_exclusion"] + + @property + def delete_exclusion( + self, + ) -> Callable[[logging_config.DeleteExclusionRequest], empty.Empty]: + r"""Return a callable for the delete exclusion method over gRPC. + + Deletes an exclusion. + + Returns: + Callable[[~.DeleteExclusionRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exclusion" not in self._stubs: + self._stubs["delete_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/DeleteExclusion", + request_serializer=logging_config.DeleteExclusionRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_exclusion"] + + @property + def get_cmek_settings( + self, + ) -> Callable[[logging_config.GetCmekSettingsRequest], logging_config.CmekSettings]: + r"""Return a callable for the get cmek settings method over gRPC. + + Gets the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Returns: + Callable[[~.GetCmekSettingsRequest], + ~.CmekSettings]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cmek_settings" not in self._stubs: + self._stubs["get_cmek_settings"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetCmekSettings", + request_serializer=logging_config.GetCmekSettingsRequest.serialize, + response_deserializer=logging_config.CmekSettings.deserialize, + ) + return self._stubs["get_cmek_settings"] + + @property + def update_cmek_settings( + self, + ) -> Callable[ + [logging_config.UpdateCmekSettingsRequest], logging_config.CmekSettings + ]: + r"""Return a callable for the update cmek settings method over gRPC. + + Updates the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + will fail if 1) ``kms_key_name`` is invalid, or 2) the + associated service account does not have the required + ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` role assigned for + the key, or 3) access to the key is disabled. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Returns: + Callable[[~.UpdateCmekSettingsRequest], + ~.CmekSettings]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cmek_settings" not in self._stubs: + self._stubs["update_cmek_settings"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateCmekSettings", + request_serializer=logging_config.UpdateCmekSettingsRequest.serialize, + response_deserializer=logging_config.CmekSettings.deserialize, + ) + return self._stubs["update_cmek_settings"] + + +__all__ = ("ConfigServiceV2GrpcTransport",) diff --git a/google/cloud/logging_v2/services/config_service_v2/transports/grpc_asyncio.py b/google/cloud/logging_v2/services/config_service_v2/transports/grpc_asyncio.py new file mode 100644 index 000000000..a4c94db22 --- /dev/null +++ b/google/cloud/logging_v2/services/config_service_v2/transports/grpc_asyncio.py @@ -0,0 +1,702 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.logging_v2.types import logging_config +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import ConfigServiceV2Transport, DEFAULT_CLIENT_INFO +from .grpc import ConfigServiceV2GrpcTransport + + +class ConfigServiceV2GrpcAsyncIOTransport(ConfigServiceV2Transport): + """gRPC AsyncIO backend transport for ConfigServiceV2. + + Service for configuring sinks used to route log entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_buckets( + self, + ) -> Callable[ + [logging_config.ListBucketsRequest], + Awaitable[logging_config.ListBucketsResponse], + ]: + r"""Return a callable for the list buckets method over gRPC. + + Lists buckets (Beta). + + Returns: + Callable[[~.ListBucketsRequest], + Awaitable[~.ListBucketsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_buckets" not in self._stubs: + self._stubs["list_buckets"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListBuckets", + request_serializer=logging_config.ListBucketsRequest.serialize, + response_deserializer=logging_config.ListBucketsResponse.deserialize, + ) + return self._stubs["list_buckets"] + + @property + def get_bucket( + self, + ) -> Callable[ + [logging_config.GetBucketRequest], Awaitable[logging_config.LogBucket] + ]: + r"""Return a callable for the get bucket method over gRPC. + + Gets a bucket (Beta). + + Returns: + Callable[[~.GetBucketRequest], + Awaitable[~.LogBucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_bucket" not in self._stubs: + self._stubs["get_bucket"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetBucket", + request_serializer=logging_config.GetBucketRequest.serialize, + response_deserializer=logging_config.LogBucket.deserialize, + ) + return self._stubs["get_bucket"] + + @property + def update_bucket( + self, + ) -> Callable[ + [logging_config.UpdateBucketRequest], Awaitable[logging_config.LogBucket] + ]: + r"""Return a callable for the update bucket method over gRPC. + + Updates a bucket. This method replaces the following fields in + the existing bucket with values from the new bucket: + ``retention_period`` + + If the retention period is decreased and the bucket is locked, + FAILED_PRECONDITION will be returned. + + If the bucket has a LifecycleState of DELETE_REQUESTED, + FAILED_PRECONDITION will be returned. + + A buckets region may not be modified after it is created. This + method is in Beta. + + Returns: + Callable[[~.UpdateBucketRequest], + Awaitable[~.LogBucket]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_bucket" not in self._stubs: + self._stubs["update_bucket"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateBucket", + request_serializer=logging_config.UpdateBucketRequest.serialize, + response_deserializer=logging_config.LogBucket.deserialize, + ) + return self._stubs["update_bucket"] + + @property + def list_sinks( + self, + ) -> Callable[ + [logging_config.ListSinksRequest], Awaitable[logging_config.ListSinksResponse] + ]: + r"""Return a callable for the list sinks method over gRPC. + + Lists sinks. + + Returns: + Callable[[~.ListSinksRequest], + Awaitable[~.ListSinksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_sinks" not in self._stubs: + self._stubs["list_sinks"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListSinks", + request_serializer=logging_config.ListSinksRequest.serialize, + response_deserializer=logging_config.ListSinksResponse.deserialize, + ) + return self._stubs["list_sinks"] + + @property + def get_sink( + self, + ) -> Callable[[logging_config.GetSinkRequest], Awaitable[logging_config.LogSink]]: + r"""Return a callable for the get sink method over gRPC. + + Gets a sink. + + Returns: + Callable[[~.GetSinkRequest], + Awaitable[~.LogSink]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_sink" not in self._stubs: + self._stubs["get_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetSink", + request_serializer=logging_config.GetSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["get_sink"] + + @property + def create_sink( + self, + ) -> Callable[ + [logging_config.CreateSinkRequest], Awaitable[logging_config.LogSink] + ]: + r"""Return a callable for the create sink method over gRPC. + + Creates a sink that exports specified log entries to a + destination. The export of newly-ingested log entries begins + immediately, unless the sink's ``writer_identity`` is not + permitted to write to the destination. A sink can export log + entries only from the resource owning the sink. + + Returns: + Callable[[~.CreateSinkRequest], + Awaitable[~.LogSink]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_sink" not in self._stubs: + self._stubs["create_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/CreateSink", + request_serializer=logging_config.CreateSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["create_sink"] + + @property + def update_sink( + self, + ) -> Callable[ + [logging_config.UpdateSinkRequest], Awaitable[logging_config.LogSink] + ]: + r"""Return a callable for the update sink method over gRPC. + + Updates a sink. This method replaces the following fields in the + existing sink with values from the new sink: ``destination``, + and ``filter``. + + The updated sink might also have a new ``writer_identity``; see + the ``unique_writer_identity`` field. + + Returns: + Callable[[~.UpdateSinkRequest], + Awaitable[~.LogSink]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_sink" not in self._stubs: + self._stubs["update_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateSink", + request_serializer=logging_config.UpdateSinkRequest.serialize, + response_deserializer=logging_config.LogSink.deserialize, + ) + return self._stubs["update_sink"] + + @property + def delete_sink( + self, + ) -> Callable[[logging_config.DeleteSinkRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete sink method over gRPC. + + Deletes a sink. If the sink has a unique ``writer_identity``, + then that service account is also deleted. + + Returns: + Callable[[~.DeleteSinkRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_sink" not in self._stubs: + self._stubs["delete_sink"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/DeleteSink", + request_serializer=logging_config.DeleteSinkRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_sink"] + + @property + def list_exclusions( + self, + ) -> Callable[ + [logging_config.ListExclusionsRequest], + Awaitable[logging_config.ListExclusionsResponse], + ]: + r"""Return a callable for the list exclusions method over gRPC. + + Lists all the exclusions in a parent resource. + + Returns: + Callable[[~.ListExclusionsRequest], + Awaitable[~.ListExclusionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_exclusions" not in self._stubs: + self._stubs["list_exclusions"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/ListExclusions", + request_serializer=logging_config.ListExclusionsRequest.serialize, + response_deserializer=logging_config.ListExclusionsResponse.deserialize, + ) + return self._stubs["list_exclusions"] + + @property + def get_exclusion( + self, + ) -> Callable[ + [logging_config.GetExclusionRequest], Awaitable[logging_config.LogExclusion] + ]: + r"""Return a callable for the get exclusion method over gRPC. + + Gets the description of an exclusion. + + Returns: + Callable[[~.GetExclusionRequest], + Awaitable[~.LogExclusion]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_exclusion" not in self._stubs: + self._stubs["get_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetExclusion", + request_serializer=logging_config.GetExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["get_exclusion"] + + @property + def create_exclusion( + self, + ) -> Callable[ + [logging_config.CreateExclusionRequest], Awaitable[logging_config.LogExclusion] + ]: + r"""Return a callable for the create exclusion method over gRPC. + + Creates a new exclusion in a specified parent + resource. Only log entries belonging to that resource + can be excluded. You can have up to 10 exclusions in a + resource. + + Returns: + Callable[[~.CreateExclusionRequest], + Awaitable[~.LogExclusion]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_exclusion" not in self._stubs: + self._stubs["create_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/CreateExclusion", + request_serializer=logging_config.CreateExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["create_exclusion"] + + @property + def update_exclusion( + self, + ) -> Callable[ + [logging_config.UpdateExclusionRequest], Awaitable[logging_config.LogExclusion] + ]: + r"""Return a callable for the update exclusion method over gRPC. + + Changes one or more properties of an existing + exclusion. + + Returns: + Callable[[~.UpdateExclusionRequest], + Awaitable[~.LogExclusion]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_exclusion" not in self._stubs: + self._stubs["update_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateExclusion", + request_serializer=logging_config.UpdateExclusionRequest.serialize, + response_deserializer=logging_config.LogExclusion.deserialize, + ) + return self._stubs["update_exclusion"] + + @property + def delete_exclusion( + self, + ) -> Callable[[logging_config.DeleteExclusionRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete exclusion method over gRPC. + + Deletes an exclusion. + + Returns: + Callable[[~.DeleteExclusionRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_exclusion" not in self._stubs: + self._stubs["delete_exclusion"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/DeleteExclusion", + request_serializer=logging_config.DeleteExclusionRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_exclusion"] + + @property + def get_cmek_settings( + self, + ) -> Callable[ + [logging_config.GetCmekSettingsRequest], Awaitable[logging_config.CmekSettings] + ]: + r"""Return a callable for the get cmek settings method over gRPC. + + Gets the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Returns: + Callable[[~.GetCmekSettingsRequest], + Awaitable[~.CmekSettings]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cmek_settings" not in self._stubs: + self._stubs["get_cmek_settings"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/GetCmekSettings", + request_serializer=logging_config.GetCmekSettingsRequest.serialize, + response_deserializer=logging_config.CmekSettings.deserialize, + ) + return self._stubs["get_cmek_settings"] + + @property + def update_cmek_settings( + self, + ) -> Callable[ + [logging_config.UpdateCmekSettingsRequest], + Awaitable[logging_config.CmekSettings], + ]: + r"""Return a callable for the update cmek settings method over gRPC. + + Updates the Logs Router CMEK settings for the given resource. + + Note: CMEK for the Logs Router can currently only be configured + for GCP organizations. Once configured, it applies to all + projects and folders in the GCP organization. + + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + will fail if 1) ``kms_key_name`` is invalid, or 2) the + associated service account does not have the required + ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` role assigned for + the key, or 3) access to the key is disabled. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Returns: + Callable[[~.UpdateCmekSettingsRequest], + Awaitable[~.CmekSettings]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cmek_settings" not in self._stubs: + self._stubs["update_cmek_settings"] = self.grpc_channel.unary_unary( + "/google.logging.v2.ConfigServiceV2/UpdateCmekSettings", + request_serializer=logging_config.UpdateCmekSettingsRequest.serialize, + response_deserializer=logging_config.CmekSettings.deserialize, + ) + return self._stubs["update_cmek_settings"] + + +__all__ = ("ConfigServiceV2GrpcAsyncIOTransport",) diff --git a/google/cloud/logging_v2/services/logging_service_v2/__init__.py b/google/cloud/logging_v2/services/logging_service_v2/__init__.py new file mode 100644 index 000000000..c46b48a29 --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import LoggingServiceV2Client +from .async_client import LoggingServiceV2AsyncClient + +__all__ = ( + "LoggingServiceV2Client", + "LoggingServiceV2AsyncClient", +) diff --git a/google/cloud/logging_v2/services/logging_service_v2/async_client.py b/google/cloud/logging_v2/services/logging_service_v2/async_client.py new file mode 100644 index 000000000..e6dd57247 --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/async_client.py @@ -0,0 +1,702 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.logging_v2.services.logging_service_v2 import pagers +from google.cloud.logging_v2.types import log_entry +from google.cloud.logging_v2.types import logging + +from .transports.base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import LoggingServiceV2GrpcAsyncIOTransport +from .client import LoggingServiceV2Client + + +class LoggingServiceV2AsyncClient: + """Service for ingesting and querying logs.""" + + _client: LoggingServiceV2Client + + DEFAULT_ENDPOINT = LoggingServiceV2Client.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = LoggingServiceV2Client.DEFAULT_MTLS_ENDPOINT + + log_path = staticmethod(LoggingServiceV2Client.log_path) + parse_log_path = staticmethod(LoggingServiceV2Client.parse_log_path) + + common_billing_account_path = staticmethod( + LoggingServiceV2Client.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + LoggingServiceV2Client.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(LoggingServiceV2Client.common_folder_path) + parse_common_folder_path = staticmethod( + LoggingServiceV2Client.parse_common_folder_path + ) + + common_organization_path = staticmethod( + LoggingServiceV2Client.common_organization_path + ) + parse_common_organization_path = staticmethod( + LoggingServiceV2Client.parse_common_organization_path + ) + + common_project_path = staticmethod(LoggingServiceV2Client.common_project_path) + parse_common_project_path = staticmethod( + LoggingServiceV2Client.parse_common_project_path + ) + + common_location_path = staticmethod(LoggingServiceV2Client.common_location_path) + parse_common_location_path = staticmethod( + LoggingServiceV2Client.parse_common_location_path + ) + + from_service_account_file = LoggingServiceV2Client.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> LoggingServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + LoggingServiceV2Transport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(LoggingServiceV2Client).get_transport_class, type(LoggingServiceV2Client) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, LoggingServiceV2Transport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the logging service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LoggingServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = LoggingServiceV2Client( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def delete_log( + self, + request: logging.DeleteLogRequest = None, + *, + log_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes all the log entries in a log. The log + reappears if it receives new entries. Log entries + written shortly before the delete operation might not be + deleted. Entries received after the delete operation + with a timestamp before the operation will be deleted. + + Args: + request (:class:`~.logging.DeleteLogRequest`): + The request object. The parameters to DeleteLog. + log_name (:class:`str`): + Required. The resource name of the log to delete: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + For more information about log names, see + [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``log_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([log_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging.DeleteLogRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if log_name is not None: + request.log_name = log_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_log, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("log_name", request.log_name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def write_log_entries( + self, + request: logging.WriteLogEntriesRequest = None, + *, + log_name: str = None, + resource: monitored_resource.MonitoredResource = None, + labels: Sequence[logging.WriteLogEntriesRequest.LabelsEntry] = None, + entries: Sequence[log_entry.LogEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging.WriteLogEntriesResponse: + r"""Writes log entries to Logging. This API method is the + only way to send log entries to Logging. This method is + used, directly or indirectly, by the Logging agent + (fluentd) and all logging libraries configured to use + Logging. A single request may contain log entries for a + maximum of 1000 different resources (projects, + organizations, billing accounts or folders) + + Args: + request (:class:`~.logging.WriteLogEntriesRequest`): + The request object. The parameters to WriteLogEntries. + log_name (:class:`str`): + Optional. A default log resource name that is assigned + to all log entries in ``entries`` that do not specify a + value for ``log_name``: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example: + + :: + + "projects/my-project-id/logs/syslog" + "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + + The permission ``logging.logEntries.create`` is needed + on each project, organization, billing account, or + folder that is receiving new log entries, whether the + resource is specified in ``logName`` or in an individual + log entry. + This corresponds to the ``log_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (:class:`~.monitored_resource.MonitoredResource`): + Optional. A default monitored resource object that is + assigned to all log entries in ``entries`` that do not + specify a value for ``resource``. Example: + + :: + + { "type": "gce_instance", + "labels": { + "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + + See [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + labels (:class:`Sequence[~.logging.WriteLogEntriesRequest.LabelsEntry]`): + Optional. Default labels that are added to the + ``labels`` field of all log entries in ``entries``. If a + log entry already has a label with the same key as a + label in this parameter, then the log entry's label is + not changed. See [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (:class:`Sequence[~.log_entry.LogEntry]`): + Required. The log entries to send to Logging. The order + of log entries in this list does not matter. Values + supplied in this method's ``log_name``, ``resource``, + and ``labels`` fields are copied into those log entries + in this list that do not include values for their + corresponding fields. For more information, see the + [LogEntry][google.logging.v2.LogEntry] type. + + If the ``timestamp`` or ``insert_id`` fields are missing + in log entries, then this method supplies the current + time or a unique identifier, respectively. The supplied + values are chosen so that, among the log entries that + did not supply their own values, the entries earlier in + the list will sort before the entries later in the list. + See the ``entries.list`` method. + + Log entries with timestamps that are more than the `logs + retention + period `__ + in the past or more than 24 hours in the future will not + be available when calling ``entries.list``. However, + those log entries can still be `exported with + LogSinks `__. + + To improve throughput and to avoid exceeding the `quota + limit `__ + for calls to ``entries.write``, you should try to + include several log entries in this list, rather than + calling this method for each individual log entry. + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging.WriteLogEntriesResponse: + Result returned from WriteLogEntries. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([log_name, resource, labels, entries]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging.WriteLogEntriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if log_name is not None: + request.log_name = log_name + if resource is not None: + request.resource = resource + + if labels: + request.labels.update(labels) + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_log_entries, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_log_entries( + self, + request: logging.ListLogEntriesRequest = None, + *, + resource_names: Sequence[str] = None, + filter: str = None, + order_by: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogEntriesAsyncPager: + r"""Lists log entries. Use this method to retrieve log entries that + originated from a project/folder/organization/billing account. + For ways to export log entries, see `Exporting + Logs `__. + + Args: + request (:class:`~.logging.ListLogEntriesRequest`): + The request object. The parameters to `ListLogEntries`. + resource_names (:class:`Sequence[str]`): + Required. Names of one or more parent resources from + which to retrieve log entries: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Projects listed in the ``project_ids`` field are added + to this list. + This corresponds to the ``resource_names`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter that chooses which log entries to + return. See `Advanced Logs + Queries `__. + Only log entries that match the filter are returned. An + empty filter matches all log entries in the resources + listed in ``resource_names``. Referencing a parent + resource that is not listed in ``resource_names`` will + cause the filter to return no results. The maximum + length of the filter is 20000 characters. + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + order_by (:class:`str`): + Optional. How the results should be sorted. Presently, + the only permitted values are ``"timestamp asc"`` + (default) and ``"timestamp desc"``. The first option + returns entries in order of increasing values of + ``LogEntry.timestamp`` (oldest first), and the second + option returns entries in order of decreasing timestamps + (newest first). Entries with equal timestamps are + returned in order of their ``insert_id`` values. + This corresponds to the ``order_by`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogEntriesAsyncPager: + Result returned from ``ListLogEntries``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource_names, filter, order_by]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging.ListLogEntriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if filter is not None: + request.filter = filter + if order_by is not None: + request.order_by = order_by + + if resource_names: + request.resource_names.extend(resource_names) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_log_entries, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListLogEntriesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_monitored_resource_descriptors( + self, + request: logging.ListMonitoredResourceDescriptorsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMonitoredResourceDescriptorsAsyncPager: + r"""Lists the descriptors for monitored resource types + used by Logging. + + Args: + request (:class:`~.logging.ListMonitoredResourceDescriptorsRequest`): + The request object. The parameters to + ListMonitoredResourceDescriptors + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMonitoredResourceDescriptorsAsyncPager: + Result returned from + ListMonitoredResourceDescriptors. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + + request = logging.ListMonitoredResourceDescriptorsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_monitored_resource_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMonitoredResourceDescriptorsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_logs( + self, + request: logging.ListLogsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogsAsyncPager: + r"""Lists the logs in projects, organizations, folders, + or billing accounts. Only logs that have entries are + listed. + + Args: + request (:class:`~.logging.ListLogsRequest`): + The request object. The parameters to ListLogs. + parent (:class:`str`): + Required. The resource name that owns the logs: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogsAsyncPager: + Result returned from ListLogs. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging.ListLogsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_logs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListLogsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LoggingServiceV2AsyncClient",) diff --git a/google/cloud/logging_v2/services/logging_service_v2/client.py b/google/cloud/logging_v2/services/logging_service_v2/client.py new file mode 100644 index 000000000..79a9ed1af --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/client.py @@ -0,0 +1,845 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.logging_v2.services.logging_service_v2 import pagers +from google.cloud.logging_v2.types import log_entry +from google.cloud.logging_v2.types import logging + +from .transports.base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc import LoggingServiceV2GrpcTransport +from .transports.grpc_asyncio import LoggingServiceV2GrpcAsyncIOTransport + + +class LoggingServiceV2ClientMeta(type): + """Metaclass for the LoggingServiceV2 client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[LoggingServiceV2Transport]] + _transport_registry["grpc"] = LoggingServiceV2GrpcTransport + _transport_registry["grpc_asyncio"] = LoggingServiceV2GrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[LoggingServiceV2Transport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class LoggingServiceV2Client(metaclass=LoggingServiceV2ClientMeta): + """Service for ingesting and querying logs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "logging.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> LoggingServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + LoggingServiceV2Transport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def log_path(project: str, log: str,) -> str: + """Return a fully-qualified log string.""" + return "projects/{project}/logs/{log}".format(project=project, log=log,) + + @staticmethod + def parse_log_path(path: str) -> Dict[str, str]: + """Parse a log path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/logs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, LoggingServiceV2Transport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the logging service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.LoggingServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, LoggingServiceV2Transport): + # transport is a LoggingServiceV2Transport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def delete_log( + self, + request: logging.DeleteLogRequest = None, + *, + log_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes all the log entries in a log. The log + reappears if it receives new entries. Log entries + written shortly before the delete operation might not be + deleted. Entries received after the delete operation + with a timestamp before the operation will be deleted. + + Args: + request (:class:`~.logging.DeleteLogRequest`): + The request object. The parameters to DeleteLog. + log_name (:class:`str`): + Required. The resource name of the log to delete: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + For more information about log names, see + [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``log_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([log_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging.DeleteLogRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging.DeleteLogRequest): + request = logging.DeleteLogRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if log_name is not None: + request.log_name = log_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_log] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("log_name", request.log_name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def write_log_entries( + self, + request: logging.WriteLogEntriesRequest = None, + *, + log_name: str = None, + resource: monitored_resource.MonitoredResource = None, + labels: Sequence[logging.WriteLogEntriesRequest.LabelsEntry] = None, + entries: Sequence[log_entry.LogEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging.WriteLogEntriesResponse: + r"""Writes log entries to Logging. This API method is the + only way to send log entries to Logging. This method is + used, directly or indirectly, by the Logging agent + (fluentd) and all logging libraries configured to use + Logging. A single request may contain log entries for a + maximum of 1000 different resources (projects, + organizations, billing accounts or folders) + + Args: + request (:class:`~.logging.WriteLogEntriesRequest`): + The request object. The parameters to WriteLogEntries. + log_name (:class:`str`): + Optional. A default log resource name that is assigned + to all log entries in ``entries`` that do not specify a + value for ``log_name``: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example: + + :: + + "projects/my-project-id/logs/syslog" + "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + + The permission ``logging.logEntries.create`` is needed + on each project, organization, billing account, or + folder that is receiving new log entries, whether the + resource is specified in ``logName`` or in an individual + log entry. + This corresponds to the ``log_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (:class:`~.monitored_resource.MonitoredResource`): + Optional. A default monitored resource object that is + assigned to all log entries in ``entries`` that do not + specify a value for ``resource``. Example: + + :: + + { "type": "gce_instance", + "labels": { + "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + + See [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + labels (:class:`Sequence[~.logging.WriteLogEntriesRequest.LabelsEntry]`): + Optional. Default labels that are added to the + ``labels`` field of all log entries in ``entries``. If a + log entry already has a label with the same key as a + label in this parameter, then the log entry's label is + not changed. See [LogEntry][google.logging.v2.LogEntry]. + This corresponds to the ``labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (:class:`Sequence[~.log_entry.LogEntry]`): + Required. The log entries to send to Logging. The order + of log entries in this list does not matter. Values + supplied in this method's ``log_name``, ``resource``, + and ``labels`` fields are copied into those log entries + in this list that do not include values for their + corresponding fields. For more information, see the + [LogEntry][google.logging.v2.LogEntry] type. + + If the ``timestamp`` or ``insert_id`` fields are missing + in log entries, then this method supplies the current + time or a unique identifier, respectively. The supplied + values are chosen so that, among the log entries that + did not supply their own values, the entries earlier in + the list will sort before the entries later in the list. + See the ``entries.list`` method. + + Log entries with timestamps that are more than the `logs + retention + period `__ + in the past or more than 24 hours in the future will not + be available when calling ``entries.list``. However, + those log entries can still be `exported with + LogSinks `__. + + To improve throughput and to avoid exceeding the `quota + limit `__ + for calls to ``entries.write``, you should try to + include several log entries in this list, rather than + calling this method for each individual log entry. + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging.WriteLogEntriesResponse: + Result returned from WriteLogEntries. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([log_name, resource, labels, entries]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging.WriteLogEntriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging.WriteLogEntriesRequest): + request = logging.WriteLogEntriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if log_name is not None: + request.log_name = log_name + if resource is not None: + request.resource = resource + + if labels: + request.labels.update(labels) + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_log_entries] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_log_entries( + self, + request: logging.ListLogEntriesRequest = None, + *, + resource_names: Sequence[str] = None, + filter: str = None, + order_by: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogEntriesPager: + r"""Lists log entries. Use this method to retrieve log entries that + originated from a project/folder/organization/billing account. + For ways to export log entries, see `Exporting + Logs `__. + + Args: + request (:class:`~.logging.ListLogEntriesRequest`): + The request object. The parameters to `ListLogEntries`. + resource_names (:class:`Sequence[str]`): + Required. Names of one or more parent resources from + which to retrieve log entries: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Projects listed in the ``project_ids`` field are added + to this list. + This corresponds to the ``resource_names`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter that chooses which log entries to + return. See `Advanced Logs + Queries `__. + Only log entries that match the filter are returned. An + empty filter matches all log entries in the resources + listed in ``resource_names``. Referencing a parent + resource that is not listed in ``resource_names`` will + cause the filter to return no results. The maximum + length of the filter is 20000 characters. + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + order_by (:class:`str`): + Optional. How the results should be sorted. Presently, + the only permitted values are ``"timestamp asc"`` + (default) and ``"timestamp desc"``. The first option + returns entries in order of increasing values of + ``LogEntry.timestamp`` (oldest first), and the second + option returns entries in order of decreasing timestamps + (newest first). Entries with equal timestamps are + returned in order of their ``insert_id`` values. + This corresponds to the ``order_by`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogEntriesPager: + Result returned from ``ListLogEntries``. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource_names, filter, order_by]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging.ListLogEntriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging.ListLogEntriesRequest): + request = logging.ListLogEntriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if filter is not None: + request.filter = filter + if order_by is not None: + request.order_by = order_by + + if resource_names: + request.resource_names.extend(resource_names) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_log_entries] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListLogEntriesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def list_monitored_resource_descriptors( + self, + request: logging.ListMonitoredResourceDescriptorsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMonitoredResourceDescriptorsPager: + r"""Lists the descriptors for monitored resource types + used by Logging. + + Args: + request (:class:`~.logging.ListMonitoredResourceDescriptorsRequest`): + The request object. The parameters to + ListMonitoredResourceDescriptors + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListMonitoredResourceDescriptorsPager: + Result returned from + ListMonitoredResourceDescriptors. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a logging.ListMonitoredResourceDescriptorsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging.ListMonitoredResourceDescriptorsRequest): + request = logging.ListMonitoredResourceDescriptorsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_monitored_resource_descriptors + ] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMonitoredResourceDescriptorsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def list_logs( + self, + request: logging.ListLogsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogsPager: + r"""Lists the logs in projects, organizations, folders, + or billing accounts. Only logs that have entries are + listed. + + Args: + request (:class:`~.logging.ListLogsRequest`): + The request object. The parameters to ListLogs. + parent (:class:`str`): + Required. The resource name that owns the logs: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogsPager: + Result returned from ListLogs. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging.ListLogsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging.ListLogsRequest): + request = logging.ListLogsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_logs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListLogsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("LoggingServiceV2Client",) diff --git a/google/cloud/logging_v2/services/logging_service_v2/pagers.py b/google/cloud/logging_v2/services/logging_service_v2/pagers.py new file mode 100644 index 000000000..72bbe8e23 --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/pagers.py @@ -0,0 +1,412 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.logging_v2.types import log_entry +from google.cloud.logging_v2.types import logging + + +class ListLogEntriesPager: + """A pager for iterating through ``list_log_entries`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListLogEntriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entries`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListLogEntries`` requests and continue to iterate + through the ``entries`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListLogEntriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging.ListLogEntriesResponse], + request: logging.ListLogEntriesRequest, + response: logging.ListLogEntriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListLogEntriesRequest`): + The initial request object. + response (:class:`~.logging.ListLogEntriesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListLogEntriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging.ListLogEntriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[log_entry.LogEntry]: + for page in self.pages: + yield from page.entries + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogEntriesAsyncPager: + """A pager for iterating through ``list_log_entries`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListLogEntriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entries`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListLogEntries`` requests and continue to iterate + through the ``entries`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListLogEntriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging.ListLogEntriesResponse]], + request: logging.ListLogEntriesRequest, + response: logging.ListLogEntriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListLogEntriesRequest`): + The initial request object. + response (:class:`~.logging.ListLogEntriesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListLogEntriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging.ListLogEntriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[log_entry.LogEntry]: + async def async_generator(): + async for page in self.pages: + for response in page.entries: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMonitoredResourceDescriptorsPager: + """A pager for iterating through ``list_monitored_resource_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListMonitoredResourceDescriptorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``resource_descriptors`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMonitoredResourceDescriptors`` requests and continue to iterate + through the ``resource_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListMonitoredResourceDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging.ListMonitoredResourceDescriptorsResponse], + request: logging.ListMonitoredResourceDescriptorsRequest, + response: logging.ListMonitoredResourceDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListMonitoredResourceDescriptorsRequest`): + The initial request object. + response (:class:`~.logging.ListMonitoredResourceDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListMonitoredResourceDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging.ListMonitoredResourceDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[monitored_resource.MonitoredResourceDescriptor]: + for page in self.pages: + yield from page.resource_descriptors + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMonitoredResourceDescriptorsAsyncPager: + """A pager for iterating through ``list_monitored_resource_descriptors`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListMonitoredResourceDescriptorsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``resource_descriptors`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMonitoredResourceDescriptors`` requests and continue to iterate + through the ``resource_descriptors`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListMonitoredResourceDescriptorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[logging.ListMonitoredResourceDescriptorsResponse] + ], + request: logging.ListMonitoredResourceDescriptorsRequest, + response: logging.ListMonitoredResourceDescriptorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListMonitoredResourceDescriptorsRequest`): + The initial request object. + response (:class:`~.logging.ListMonitoredResourceDescriptorsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListMonitoredResourceDescriptorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[logging.ListMonitoredResourceDescriptorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__( + self, + ) -> AsyncIterable[monitored_resource.MonitoredResourceDescriptor]: + async def async_generator(): + async for page in self.pages: + for response in page.resource_descriptors: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogsPager: + """A pager for iterating through ``list_logs`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListLogsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``log_names`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListLogs`` requests and continue to iterate + through the ``log_names`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListLogsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging.ListLogsResponse], + request: logging.ListLogsRequest, + response: logging.ListLogsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListLogsRequest`): + The initial request object. + response (:class:`~.logging.ListLogsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListLogsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging.ListLogsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[str]: + for page in self.pages: + yield from page.log_names + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogsAsyncPager: + """A pager for iterating through ``list_logs`` requests. + + This class thinly wraps an initial + :class:`~.logging.ListLogsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``log_names`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListLogs`` requests and continue to iterate + through the ``log_names`` field on the + corresponding responses. + + All the usual :class:`~.logging.ListLogsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging.ListLogsResponse]], + request: logging.ListLogsRequest, + response: logging.ListLogsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging.ListLogsRequest`): + The initial request object. + response (:class:`~.logging.ListLogsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging.ListLogsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging.ListLogsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[str]: + async def async_generator(): + async for page in self.pages: + for response in page.log_names: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/logging_v2/services/logging_service_v2/transports/__init__.py b/google/cloud/logging_v2/services/logging_service_v2/transports/__init__.py new file mode 100644 index 000000000..910a38ecd --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import LoggingServiceV2Transport +from .grpc import LoggingServiceV2GrpcTransport +from .grpc_asyncio import LoggingServiceV2GrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[LoggingServiceV2Transport]] +_transport_registry["grpc"] = LoggingServiceV2GrpcTransport +_transport_registry["grpc_asyncio"] = LoggingServiceV2GrpcAsyncIOTransport + + +__all__ = ( + "LoggingServiceV2Transport", + "LoggingServiceV2GrpcTransport", + "LoggingServiceV2GrpcAsyncIOTransport", +) diff --git a/google/cloud/logging_v2/services/logging_service_v2/transports/base.py b/google/cloud/logging_v2/services/logging_service_v2/transports/base.py new file mode 100644 index 000000000..c8bcbcbf9 --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/transports/base.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.logging_v2.types import logging +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class LoggingServiceV2Transport(abc.ABC): + """Abstract transport class for LoggingServiceV2.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete_log: gapic_v1.method.wrap_method( + self.delete_log, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.write_log_entries: gapic_v1.method.wrap_method( + self.write_log_entries, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_log_entries: gapic_v1.method.wrap_method( + self.list_log_entries, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_monitored_resource_descriptors: gapic_v1.method.wrap_method( + self.list_monitored_resource_descriptors, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_logs: gapic_v1.method.wrap_method( + self.list_logs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def delete_log( + self, + ) -> typing.Callable[ + [logging.DeleteLogRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def write_log_entries( + self, + ) -> typing.Callable[ + [logging.WriteLogEntriesRequest], + typing.Union[ + logging.WriteLogEntriesResponse, + typing.Awaitable[logging.WriteLogEntriesResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_log_entries( + self, + ) -> typing.Callable[ + [logging.ListLogEntriesRequest], + typing.Union[ + logging.ListLogEntriesResponse, + typing.Awaitable[logging.ListLogEntriesResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_monitored_resource_descriptors( + self, + ) -> typing.Callable[ + [logging.ListMonitoredResourceDescriptorsRequest], + typing.Union[ + logging.ListMonitoredResourceDescriptorsResponse, + typing.Awaitable[logging.ListMonitoredResourceDescriptorsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_logs( + self, + ) -> typing.Callable[ + [logging.ListLogsRequest], + typing.Union[ + logging.ListLogsResponse, typing.Awaitable[logging.ListLogsResponse] + ], + ]: + raise NotImplementedError() + + +__all__ = ("LoggingServiceV2Transport",) diff --git a/google/cloud/logging_v2/services/logging_service_v2/transports/grpc.py b/google/cloud/logging_v2/services/logging_service_v2/transports/grpc.py new file mode 100644 index 000000000..4c0636e47 --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/transports/grpc.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.logging_v2.types import logging +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO + + +class LoggingServiceV2GrpcTransport(LoggingServiceV2Transport): + """gRPC backend transport for LoggingServiceV2. + + Service for ingesting and querying logs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def delete_log(self) -> Callable[[logging.DeleteLogRequest], empty.Empty]: + r"""Return a callable for the delete log method over gRPC. + + Deletes all the log entries in a log. The log + reappears if it receives new entries. Log entries + written shortly before the delete operation might not be + deleted. Entries received after the delete operation + with a timestamp before the operation will be deleted. + + Returns: + Callable[[~.DeleteLogRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_log" not in self._stubs: + self._stubs["delete_log"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/DeleteLog", + request_serializer=logging.DeleteLogRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_log"] + + @property + def write_log_entries( + self, + ) -> Callable[[logging.WriteLogEntriesRequest], logging.WriteLogEntriesResponse]: + r"""Return a callable for the write log entries method over gRPC. + + Writes log entries to Logging. This API method is the + only way to send log entries to Logging. This method is + used, directly or indirectly, by the Logging agent + (fluentd) and all logging libraries configured to use + Logging. A single request may contain log entries for a + maximum of 1000 different resources (projects, + organizations, billing accounts or folders) + + Returns: + Callable[[~.WriteLogEntriesRequest], + ~.WriteLogEntriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_log_entries" not in self._stubs: + self._stubs["write_log_entries"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/WriteLogEntries", + request_serializer=logging.WriteLogEntriesRequest.serialize, + response_deserializer=logging.WriteLogEntriesResponse.deserialize, + ) + return self._stubs["write_log_entries"] + + @property + def list_log_entries( + self, + ) -> Callable[[logging.ListLogEntriesRequest], logging.ListLogEntriesResponse]: + r"""Return a callable for the list log entries method over gRPC. + + Lists log entries. Use this method to retrieve log entries that + originated from a project/folder/organization/billing account. + For ways to export log entries, see `Exporting + Logs `__. + + Returns: + Callable[[~.ListLogEntriesRequest], + ~.ListLogEntriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_log_entries" not in self._stubs: + self._stubs["list_log_entries"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListLogEntries", + request_serializer=logging.ListLogEntriesRequest.serialize, + response_deserializer=logging.ListLogEntriesResponse.deserialize, + ) + return self._stubs["list_log_entries"] + + @property + def list_monitored_resource_descriptors( + self, + ) -> Callable[ + [logging.ListMonitoredResourceDescriptorsRequest], + logging.ListMonitoredResourceDescriptorsResponse, + ]: + r"""Return a callable for the list monitored resource + descriptors method over gRPC. + + Lists the descriptors for monitored resource types + used by Logging. + + Returns: + Callable[[~.ListMonitoredResourceDescriptorsRequest], + ~.ListMonitoredResourceDescriptorsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_monitored_resource_descriptors" not in self._stubs: + self._stubs[ + "list_monitored_resource_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", + request_serializer=logging.ListMonitoredResourceDescriptorsRequest.serialize, + response_deserializer=logging.ListMonitoredResourceDescriptorsResponse.deserialize, + ) + return self._stubs["list_monitored_resource_descriptors"] + + @property + def list_logs( + self, + ) -> Callable[[logging.ListLogsRequest], logging.ListLogsResponse]: + r"""Return a callable for the list logs method over gRPC. + + Lists the logs in projects, organizations, folders, + or billing accounts. Only logs that have entries are + listed. + + Returns: + Callable[[~.ListLogsRequest], + ~.ListLogsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logs" not in self._stubs: + self._stubs["list_logs"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListLogs", + request_serializer=logging.ListLogsRequest.serialize, + response_deserializer=logging.ListLogsResponse.deserialize, + ) + return self._stubs["list_logs"] + + +__all__ = ("LoggingServiceV2GrpcTransport",) diff --git a/google/cloud/logging_v2/services/logging_service_v2/transports/grpc_asyncio.py b/google/cloud/logging_v2/services/logging_service_v2/transports/grpc_asyncio.py new file mode 100644 index 000000000..8a26a078e --- /dev/null +++ b/google/cloud/logging_v2/services/logging_service_v2/transports/grpc_asyncio.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.logging_v2.types import logging +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import LoggingServiceV2Transport, DEFAULT_CLIENT_INFO +from .grpc import LoggingServiceV2GrpcTransport + + +class LoggingServiceV2GrpcAsyncIOTransport(LoggingServiceV2Transport): + """gRPC AsyncIO backend transport for LoggingServiceV2. + + Service for ingesting and querying logs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def delete_log( + self, + ) -> Callable[[logging.DeleteLogRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete log method over gRPC. + + Deletes all the log entries in a log. The log + reappears if it receives new entries. Log entries + written shortly before the delete operation might not be + deleted. Entries received after the delete operation + with a timestamp before the operation will be deleted. + + Returns: + Callable[[~.DeleteLogRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_log" not in self._stubs: + self._stubs["delete_log"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/DeleteLog", + request_serializer=logging.DeleteLogRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_log"] + + @property + def write_log_entries( + self, + ) -> Callable[ + [logging.WriteLogEntriesRequest], Awaitable[logging.WriteLogEntriesResponse] + ]: + r"""Return a callable for the write log entries method over gRPC. + + Writes log entries to Logging. This API method is the + only way to send log entries to Logging. This method is + used, directly or indirectly, by the Logging agent + (fluentd) and all logging libraries configured to use + Logging. A single request may contain log entries for a + maximum of 1000 different resources (projects, + organizations, billing accounts or folders) + + Returns: + Callable[[~.WriteLogEntriesRequest], + Awaitable[~.WriteLogEntriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "write_log_entries" not in self._stubs: + self._stubs["write_log_entries"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/WriteLogEntries", + request_serializer=logging.WriteLogEntriesRequest.serialize, + response_deserializer=logging.WriteLogEntriesResponse.deserialize, + ) + return self._stubs["write_log_entries"] + + @property + def list_log_entries( + self, + ) -> Callable[ + [logging.ListLogEntriesRequest], Awaitable[logging.ListLogEntriesResponse] + ]: + r"""Return a callable for the list log entries method over gRPC. + + Lists log entries. Use this method to retrieve log entries that + originated from a project/folder/organization/billing account. + For ways to export log entries, see `Exporting + Logs `__. + + Returns: + Callable[[~.ListLogEntriesRequest], + Awaitable[~.ListLogEntriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_log_entries" not in self._stubs: + self._stubs["list_log_entries"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListLogEntries", + request_serializer=logging.ListLogEntriesRequest.serialize, + response_deserializer=logging.ListLogEntriesResponse.deserialize, + ) + return self._stubs["list_log_entries"] + + @property + def list_monitored_resource_descriptors( + self, + ) -> Callable[ + [logging.ListMonitoredResourceDescriptorsRequest], + Awaitable[logging.ListMonitoredResourceDescriptorsResponse], + ]: + r"""Return a callable for the list monitored resource + descriptors method over gRPC. + + Lists the descriptors for monitored resource types + used by Logging. + + Returns: + Callable[[~.ListMonitoredResourceDescriptorsRequest], + Awaitable[~.ListMonitoredResourceDescriptorsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_monitored_resource_descriptors" not in self._stubs: + self._stubs[ + "list_monitored_resource_descriptors" + ] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", + request_serializer=logging.ListMonitoredResourceDescriptorsRequest.serialize, + response_deserializer=logging.ListMonitoredResourceDescriptorsResponse.deserialize, + ) + return self._stubs["list_monitored_resource_descriptors"] + + @property + def list_logs( + self, + ) -> Callable[[logging.ListLogsRequest], Awaitable[logging.ListLogsResponse]]: + r"""Return a callable for the list logs method over gRPC. + + Lists the logs in projects, organizations, folders, + or billing accounts. Only logs that have entries are + listed. + + Returns: + Callable[[~.ListLogsRequest], + Awaitable[~.ListLogsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logs" not in self._stubs: + self._stubs["list_logs"] = self.grpc_channel.unary_unary( + "/google.logging.v2.LoggingServiceV2/ListLogs", + request_serializer=logging.ListLogsRequest.serialize, + response_deserializer=logging.ListLogsResponse.deserialize, + ) + return self._stubs["list_logs"] + + +__all__ = ("LoggingServiceV2GrpcAsyncIOTransport",) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/__init__.py b/google/cloud/logging_v2/services/metrics_service_v2/__init__.py new file mode 100644 index 000000000..c857ea037 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import MetricsServiceV2Client +from .async_client import MetricsServiceV2AsyncClient + +__all__ = ( + "MetricsServiceV2Client", + "MetricsServiceV2AsyncClient", +) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/async_client.py b/google/cloud/logging_v2/services/metrics_service_v2/async_client.py new file mode 100644 index 000000000..93dfbd71b --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/async_client.py @@ -0,0 +1,627 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import distribution_pb2 as distribution # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import metric_pb2 as metric # type: ignore +from google.cloud.logging_v2.services.metrics_service_v2 import pagers +from google.cloud.logging_v2.types import logging_metrics +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetricsServiceV2GrpcAsyncIOTransport +from .client import MetricsServiceV2Client + + +class MetricsServiceV2AsyncClient: + """Service for configuring logs-based metrics.""" + + _client: MetricsServiceV2Client + + DEFAULT_ENDPOINT = MetricsServiceV2Client.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetricsServiceV2Client.DEFAULT_MTLS_ENDPOINT + + log_metric_path = staticmethod(MetricsServiceV2Client.log_metric_path) + parse_log_metric_path = staticmethod(MetricsServiceV2Client.parse_log_metric_path) + + common_billing_account_path = staticmethod( + MetricsServiceV2Client.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + MetricsServiceV2Client.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(MetricsServiceV2Client.common_folder_path) + parse_common_folder_path = staticmethod( + MetricsServiceV2Client.parse_common_folder_path + ) + + common_organization_path = staticmethod( + MetricsServiceV2Client.common_organization_path + ) + parse_common_organization_path = staticmethod( + MetricsServiceV2Client.parse_common_organization_path + ) + + common_project_path = staticmethod(MetricsServiceV2Client.common_project_path) + parse_common_project_path = staticmethod( + MetricsServiceV2Client.parse_common_project_path + ) + + common_location_path = staticmethod(MetricsServiceV2Client.common_location_path) + parse_common_location_path = staticmethod( + MetricsServiceV2Client.parse_common_location_path + ) + + from_service_account_file = MetricsServiceV2Client.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetricsServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + MetricsServiceV2Transport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(MetricsServiceV2Client).get_transport_class, type(MetricsServiceV2Client) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, MetricsServiceV2Transport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metrics service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetricsServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = MetricsServiceV2Client( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def list_log_metrics( + self, + request: logging_metrics.ListLogMetricsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogMetricsAsyncPager: + r"""Lists logs-based metrics. + + Args: + request (:class:`~.logging_metrics.ListLogMetricsRequest`): + The request object. The parameters to ListLogMetrics. + parent (:class:`str`): + Required. The name of the project containing the + metrics: + + :: + + "projects/[PROJECT_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogMetricsAsyncPager: + Result returned from ListLogMetrics. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_metrics.ListLogMetricsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_log_metrics, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListLogMetricsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_log_metric( + self, + request: logging_metrics.GetLogMetricRequest = None, + *, + metric_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Gets a logs-based metric. + + Args: + request (:class:`~.logging_metrics.GetLogMetricRequest`): + The request object. The parameters to GetLogMetric. + metric_name (:class:`str`): + Required. The resource name of the desired metric: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_metrics.GetLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_log_metric( + self, + request: logging_metrics.CreateLogMetricRequest = None, + *, + parent: str = None, + metric: logging_metrics.LogMetric = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Creates a logs-based metric. + + Args: + request (:class:`~.logging_metrics.CreateLogMetricRequest`): + The request object. The parameters to CreateLogMetric. + parent (:class:`str`): + Required. The resource name of the project in which to + create the metric: + + :: + + "projects/[PROJECT_ID]" + + The new metric must be provided in the request. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric (:class:`~.logging_metrics.LogMetric`): + Required. The new logs-based metric, + which must not have an identifier that + already exists. + This corresponds to the ``metric`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metric]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_metrics.CreateLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metric is not None: + request.metric = metric + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_log_metric, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_log_metric( + self, + request: logging_metrics.UpdateLogMetricRequest = None, + *, + metric_name: str = None, + metric: logging_metrics.LogMetric = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Creates or updates a logs-based metric. + + Args: + request (:class:`~.logging_metrics.UpdateLogMetricRequest`): + The request object. The parameters to UpdateLogMetric. + metric_name (:class:`str`): + Required. The resource name of the metric to update: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + + The updated metric must be provided in the request and + it's ``name`` field must be the same as ``[METRIC_ID]`` + If the metric does not exist in ``[PROJECT_ID]``, then a + new metric is created. + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric (:class:`~.logging_metrics.LogMetric`): + Required. The updated metric. + This corresponds to the ``metric`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name, metric]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_metrics.UpdateLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + if metric is not None: + request.metric = metric + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_log_metric( + self, + request: logging_metrics.DeleteLogMetricRequest = None, + *, + metric_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a logs-based metric. + + Args: + request (:class:`~.logging_metrics.DeleteLogMetricRequest`): + The request object. The parameters to DeleteLogMetric. + metric_name (:class:`str`): + Required. The resource name of the metric to delete: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = logging_metrics.DeleteLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetricsServiceV2AsyncClient",) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/client.py b/google/cloud/logging_v2/services/metrics_service_v2/client.py new file mode 100644 index 000000000..f4bca3926 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/client.py @@ -0,0 +1,780 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api import distribution_pb2 as distribution # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import metric_pb2 as metric # type: ignore +from google.cloud.logging_v2.services.metrics_service_v2 import pagers +from google.cloud.logging_v2.types import logging_metrics +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetricsServiceV2GrpcTransport +from .transports.grpc_asyncio import MetricsServiceV2GrpcAsyncIOTransport + + +class MetricsServiceV2ClientMeta(type): + """Metaclass for the MetricsServiceV2 client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[MetricsServiceV2Transport]] + _transport_registry["grpc"] = MetricsServiceV2GrpcTransport + _transport_registry["grpc_asyncio"] = MetricsServiceV2GrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[MetricsServiceV2Transport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetricsServiceV2Client(metaclass=MetricsServiceV2ClientMeta): + """Service for configuring logs-based metrics.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "logging.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetricsServiceV2Transport: + """Return the transport used by the client instance. + + Returns: + MetricsServiceV2Transport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def log_metric_path(project: str, metric: str,) -> str: + """Return a fully-qualified log_metric string.""" + return "projects/{project}/metrics/{metric}".format( + project=project, metric=metric, + ) + + @staticmethod + def parse_log_metric_path(path: str) -> Dict[str, str]: + """Parse a log_metric path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/metrics/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, MetricsServiceV2Transport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the metrics service v2 client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetricsServiceV2Transport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (client_options_lib.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetricsServiceV2Transport): + # transport is a MetricsServiceV2Transport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def list_log_metrics( + self, + request: logging_metrics.ListLogMetricsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListLogMetricsPager: + r"""Lists logs-based metrics. + + Args: + request (:class:`~.logging_metrics.ListLogMetricsRequest`): + The request object. The parameters to ListLogMetrics. + parent (:class:`str`): + Required. The name of the project containing the + metrics: + + :: + + "projects/[PROJECT_ID]". + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListLogMetricsPager: + Result returned from ListLogMetrics. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_metrics.ListLogMetricsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_metrics.ListLogMetricsRequest): + request = logging_metrics.ListLogMetricsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_log_metrics] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListLogMetricsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_log_metric( + self, + request: logging_metrics.GetLogMetricRequest = None, + *, + metric_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Gets a logs-based metric. + + Args: + request (:class:`~.logging_metrics.GetLogMetricRequest`): + The request object. The parameters to GetLogMetric. + metric_name (:class:`str`): + Required. The resource name of the desired metric: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_metrics.GetLogMetricRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_metrics.GetLogMetricRequest): + request = logging_metrics.GetLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_log_metric] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_log_metric( + self, + request: logging_metrics.CreateLogMetricRequest = None, + *, + parent: str = None, + metric: logging_metrics.LogMetric = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Creates a logs-based metric. + + Args: + request (:class:`~.logging_metrics.CreateLogMetricRequest`): + The request object. The parameters to CreateLogMetric. + parent (:class:`str`): + Required. The resource name of the project in which to + create the metric: + + :: + + "projects/[PROJECT_ID]" + + The new metric must be provided in the request. + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric (:class:`~.logging_metrics.LogMetric`): + Required. The new logs-based metric, + which must not have an identifier that + already exists. + This corresponds to the ``metric`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metric]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_metrics.CreateLogMetricRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_metrics.CreateLogMetricRequest): + request = logging_metrics.CreateLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if metric is not None: + request.metric = metric + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_log_metric] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_log_metric( + self, + request: logging_metrics.UpdateLogMetricRequest = None, + *, + metric_name: str = None, + metric: logging_metrics.LogMetric = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> logging_metrics.LogMetric: + r"""Creates or updates a logs-based metric. + + Args: + request (:class:`~.logging_metrics.UpdateLogMetricRequest`): + The request object. The parameters to UpdateLogMetric. + metric_name (:class:`str`): + Required. The resource name of the metric to update: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + + The updated metric must be provided in the request and + it's ``name`` field must be the same as ``[METRIC_ID]`` + If the metric does not exist in ``[PROJECT_ID]``, then a + new metric is created. + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metric (:class:`~.logging_metrics.LogMetric`): + Required. The updated metric. + This corresponds to the ``metric`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.logging_metrics.LogMetric: + Describes a logs-based metric. The + value of the metric is the number of log + entries that match a logs filter in a + given time interval. + Logs-based metric can also be used to + extract values from logs and create a a + distribution of the values. The + distribution records the statistics of + the extracted values along with an + optional histogram of the values as + specified by the bucket options. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name, metric]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_metrics.UpdateLogMetricRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_metrics.UpdateLogMetricRequest): + request = logging_metrics.UpdateLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + if metric is not None: + request.metric = metric + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_log_metric] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_log_metric( + self, + request: logging_metrics.DeleteLogMetricRequest = None, + *, + metric_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a logs-based metric. + + Args: + request (:class:`~.logging_metrics.DeleteLogMetricRequest`): + The request object. The parameters to DeleteLogMetric. + metric_name (:class:`str`): + Required. The resource name of the metric to delete: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + This corresponds to the ``metric_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([metric_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a logging_metrics.DeleteLogMetricRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, logging_metrics.DeleteLogMetricRequest): + request = logging_metrics.DeleteLogMetricRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if metric_name is not None: + request.metric_name = metric_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_log_metric] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("metric_name", request.metric_name),) + ), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("MetricsServiceV2Client",) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/pagers.py b/google/cloud/logging_v2/services/metrics_service_v2/pagers.py new file mode 100644 index 000000000..09010a685 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.logging_v2.types import logging_metrics + + +class ListLogMetricsPager: + """A pager for iterating through ``list_log_metrics`` requests. + + This class thinly wraps an initial + :class:`~.logging_metrics.ListLogMetricsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metrics`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListLogMetrics`` requests and continue to iterate + through the ``metrics`` field on the + corresponding responses. + + All the usual :class:`~.logging_metrics.ListLogMetricsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., logging_metrics.ListLogMetricsResponse], + request: logging_metrics.ListLogMetricsRequest, + response: logging_metrics.ListLogMetricsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_metrics.ListLogMetricsRequest`): + The initial request object. + response (:class:`~.logging_metrics.ListLogMetricsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_metrics.ListLogMetricsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[logging_metrics.ListLogMetricsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[logging_metrics.LogMetric]: + for page in self.pages: + yield from page.metrics + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogMetricsAsyncPager: + """A pager for iterating through ``list_log_metrics`` requests. + + This class thinly wraps an initial + :class:`~.logging_metrics.ListLogMetricsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metrics`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListLogMetrics`` requests and continue to iterate + through the ``metrics`` field on the + corresponding responses. + + All the usual :class:`~.logging_metrics.ListLogMetricsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[logging_metrics.ListLogMetricsResponse]], + request: logging_metrics.ListLogMetricsRequest, + response: logging_metrics.ListLogMetricsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.logging_metrics.ListLogMetricsRequest`): + The initial request object. + response (:class:`~.logging_metrics.ListLogMetricsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = logging_metrics.ListLogMetricsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[logging_metrics.ListLogMetricsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[logging_metrics.LogMetric]: + async def async_generator(): + async for page in self.pages: + for response in page.metrics: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/transports/__init__.py b/google/cloud/logging_v2/services/metrics_service_v2/transports/__init__.py new file mode 100644 index 000000000..eef07abd7 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetricsServiceV2Transport +from .grpc import MetricsServiceV2GrpcTransport +from .grpc_asyncio import MetricsServiceV2GrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetricsServiceV2Transport]] +_transport_registry["grpc"] = MetricsServiceV2GrpcTransport +_transport_registry["grpc_asyncio"] = MetricsServiceV2GrpcAsyncIOTransport + + +__all__ = ( + "MetricsServiceV2Transport", + "MetricsServiceV2GrpcTransport", + "MetricsServiceV2GrpcAsyncIOTransport", +) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/transports/base.py b/google/cloud/logging_v2/services/metrics_service_v2/transports/base.py new file mode 100644 index 000000000..78d226dfa --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/transports/base.py @@ -0,0 +1,234 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.logging_v2.types import logging_metrics +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MetricsServiceV2Transport(abc.ABC): + """Abstract transport class for MetricsServiceV2.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_log_metrics: gapic_v1.method.wrap_method( + self.list_log_metrics, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_log_metric: gapic_v1.method.wrap_method( + self.get_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.create_log_metric: gapic_v1.method.wrap_method( + self.create_log_metric, default_timeout=60.0, client_info=client_info, + ), + self.update_log_metric: gapic_v1.method.wrap_method( + self.update_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_log_metric: gapic_v1.method.wrap_method( + self.delete_log_metric, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def list_log_metrics( + self, + ) -> typing.Callable[ + [logging_metrics.ListLogMetricsRequest], + typing.Union[ + logging_metrics.ListLogMetricsResponse, + typing.Awaitable[logging_metrics.ListLogMetricsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_log_metric( + self, + ) -> typing.Callable[ + [logging_metrics.GetLogMetricRequest], + typing.Union[ + logging_metrics.LogMetric, typing.Awaitable[logging_metrics.LogMetric] + ], + ]: + raise NotImplementedError() + + @property + def create_log_metric( + self, + ) -> typing.Callable[ + [logging_metrics.CreateLogMetricRequest], + typing.Union[ + logging_metrics.LogMetric, typing.Awaitable[logging_metrics.LogMetric] + ], + ]: + raise NotImplementedError() + + @property + def update_log_metric( + self, + ) -> typing.Callable[ + [logging_metrics.UpdateLogMetricRequest], + typing.Union[ + logging_metrics.LogMetric, typing.Awaitable[logging_metrics.LogMetric] + ], + ]: + raise NotImplementedError() + + @property + def delete_log_metric( + self, + ) -> typing.Callable[ + [logging_metrics.DeleteLogMetricRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("MetricsServiceV2Transport",) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc.py b/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc.py new file mode 100644 index 000000000..0a6f25bd6 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc.py @@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.logging_v2.types import logging_metrics +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO + + +class MetricsServiceV2GrpcTransport(MetricsServiceV2Transport): + """gRPC backend transport for MetricsServiceV2. + + Service for configuring logs-based metrics. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def list_log_metrics( + self, + ) -> Callable[ + [logging_metrics.ListLogMetricsRequest], logging_metrics.ListLogMetricsResponse + ]: + r"""Return a callable for the list log metrics method over gRPC. + + Lists logs-based metrics. + + Returns: + Callable[[~.ListLogMetricsRequest], + ~.ListLogMetricsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_log_metrics" not in self._stubs: + self._stubs["list_log_metrics"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/ListLogMetrics", + request_serializer=logging_metrics.ListLogMetricsRequest.serialize, + response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize, + ) + return self._stubs["list_log_metrics"] + + @property + def get_log_metric( + self, + ) -> Callable[[logging_metrics.GetLogMetricRequest], logging_metrics.LogMetric]: + r"""Return a callable for the get log metric method over gRPC. + + Gets a logs-based metric. + + Returns: + Callable[[~.GetLogMetricRequest], + ~.LogMetric]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_log_metric" not in self._stubs: + self._stubs["get_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/GetLogMetric", + request_serializer=logging_metrics.GetLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["get_log_metric"] + + @property + def create_log_metric( + self, + ) -> Callable[[logging_metrics.CreateLogMetricRequest], logging_metrics.LogMetric]: + r"""Return a callable for the create log metric method over gRPC. + + Creates a logs-based metric. + + Returns: + Callable[[~.CreateLogMetricRequest], + ~.LogMetric]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_log_metric" not in self._stubs: + self._stubs["create_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/CreateLogMetric", + request_serializer=logging_metrics.CreateLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["create_log_metric"] + + @property + def update_log_metric( + self, + ) -> Callable[[logging_metrics.UpdateLogMetricRequest], logging_metrics.LogMetric]: + r"""Return a callable for the update log metric method over gRPC. + + Creates or updates a logs-based metric. + + Returns: + Callable[[~.UpdateLogMetricRequest], + ~.LogMetric]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_log_metric" not in self._stubs: + self._stubs["update_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/UpdateLogMetric", + request_serializer=logging_metrics.UpdateLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["update_log_metric"] + + @property + def delete_log_metric( + self, + ) -> Callable[[logging_metrics.DeleteLogMetricRequest], empty.Empty]: + r"""Return a callable for the delete log metric method over gRPC. + + Deletes a logs-based metric. + + Returns: + Callable[[~.DeleteLogMetricRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_log_metric" not in self._stubs: + self._stubs["delete_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/DeleteLogMetric", + request_serializer=logging_metrics.DeleteLogMetricRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_log_metric"] + + +__all__ = ("MetricsServiceV2GrpcTransport",) diff --git a/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py b/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py new file mode 100644 index 000000000..9ec30eed0 --- /dev/null +++ b/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py @@ -0,0 +1,377 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.logging_v2.types import logging_metrics +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO +from .grpc import MetricsServiceV2GrpcTransport + + +class MetricsServiceV2GrpcAsyncIOTransport(MetricsServiceV2Transport): + """gRPC AsyncIO backend transport for MetricsServiceV2. + + Service for configuring logs-based metrics. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "logging.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_log_metrics( + self, + ) -> Callable[ + [logging_metrics.ListLogMetricsRequest], + Awaitable[logging_metrics.ListLogMetricsResponse], + ]: + r"""Return a callable for the list log metrics method over gRPC. + + Lists logs-based metrics. + + Returns: + Callable[[~.ListLogMetricsRequest], + Awaitable[~.ListLogMetricsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_log_metrics" not in self._stubs: + self._stubs["list_log_metrics"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/ListLogMetrics", + request_serializer=logging_metrics.ListLogMetricsRequest.serialize, + response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize, + ) + return self._stubs["list_log_metrics"] + + @property + def get_log_metric( + self, + ) -> Callable[ + [logging_metrics.GetLogMetricRequest], Awaitable[logging_metrics.LogMetric] + ]: + r"""Return a callable for the get log metric method over gRPC. + + Gets a logs-based metric. + + Returns: + Callable[[~.GetLogMetricRequest], + Awaitable[~.LogMetric]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_log_metric" not in self._stubs: + self._stubs["get_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/GetLogMetric", + request_serializer=logging_metrics.GetLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["get_log_metric"] + + @property + def create_log_metric( + self, + ) -> Callable[ + [logging_metrics.CreateLogMetricRequest], Awaitable[logging_metrics.LogMetric] + ]: + r"""Return a callable for the create log metric method over gRPC. + + Creates a logs-based metric. + + Returns: + Callable[[~.CreateLogMetricRequest], + Awaitable[~.LogMetric]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_log_metric" not in self._stubs: + self._stubs["create_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/CreateLogMetric", + request_serializer=logging_metrics.CreateLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["create_log_metric"] + + @property + def update_log_metric( + self, + ) -> Callable[ + [logging_metrics.UpdateLogMetricRequest], Awaitable[logging_metrics.LogMetric] + ]: + r"""Return a callable for the update log metric method over gRPC. + + Creates or updates a logs-based metric. + + Returns: + Callable[[~.UpdateLogMetricRequest], + Awaitable[~.LogMetric]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_log_metric" not in self._stubs: + self._stubs["update_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/UpdateLogMetric", + request_serializer=logging_metrics.UpdateLogMetricRequest.serialize, + response_deserializer=logging_metrics.LogMetric.deserialize, + ) + return self._stubs["update_log_metric"] + + @property + def delete_log_metric( + self, + ) -> Callable[[logging_metrics.DeleteLogMetricRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete log metric method over gRPC. + + Deletes a logs-based metric. + + Returns: + Callable[[~.DeleteLogMetricRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_log_metric" not in self._stubs: + self._stubs["delete_log_metric"] = self.grpc_channel.unary_unary( + "/google.logging.v2.MetricsServiceV2/DeleteLogMetric", + request_serializer=logging_metrics.DeleteLogMetricRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_log_metric"] + + +__all__ = ("MetricsServiceV2GrpcAsyncIOTransport",) diff --git a/google/cloud/logging_v2/types.py b/google/cloud/logging_v2/types.py deleted file mode 100644 index 464edbe70..000000000 --- a/google/cloud/logging_v2/types.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import sys - -from google.api import distribution_pb2 -from google.api import http_pb2 -from google.api import label_pb2 -from google.api import metric_pb2 -from google.api import monitored_resource_pb2 -from google.logging.type import http_request_pb2 -from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import struct_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - -from google.api_core.protobuf_helpers import get_messages -from google.cloud.logging_v2.proto import log_entry_pb2 -from google.cloud.logging_v2.proto import logging_config_pb2 -from google.cloud.logging_v2.proto import logging_metrics_pb2 -from google.cloud.logging_v2.proto import logging_pb2 - - -_shared_modules = [ - distribution_pb2, - http_pb2, - label_pb2, - metric_pb2, - monitored_resource_pb2, - http_request_pb2, - any_pb2, - descriptor_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - struct_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [log_entry_pb2, logging_config_pb2, logging_metrics_pb2, logging_pb2] - -names = [] - -for module in _shared_modules: - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) - -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.logging_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/logging_v2/types/__init__.py b/google/cloud/logging_v2/types/__init__.py new file mode 100644 index 000000000..ab5f9c8c1 --- /dev/null +++ b/google/cloud/logging_v2/types/__init__.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .log_entry import ( + LogEntry, + LogEntryOperation, + LogEntrySourceLocation, +) +from .logging_config import ( + LogBucket, + LogSink, + BigQueryOptions, + ListBucketsRequest, + ListBucketsResponse, + UpdateBucketRequest, + GetBucketRequest, + ListSinksRequest, + ListSinksResponse, + GetSinkRequest, + CreateSinkRequest, + UpdateSinkRequest, + DeleteSinkRequest, + LogExclusion, + ListExclusionsRequest, + ListExclusionsResponse, + GetExclusionRequest, + CreateExclusionRequest, + UpdateExclusionRequest, + DeleteExclusionRequest, + GetCmekSettingsRequest, + UpdateCmekSettingsRequest, + CmekSettings, +) +from .logging import ( + DeleteLogRequest, + WriteLogEntriesRequest, + WriteLogEntriesResponse, + WriteLogEntriesPartialErrors, + ListLogEntriesRequest, + ListLogEntriesResponse, + ListMonitoredResourceDescriptorsRequest, + ListMonitoredResourceDescriptorsResponse, + ListLogsRequest, + ListLogsResponse, +) +from .logging_metrics import ( + LogMetric, + ListLogMetricsRequest, + ListLogMetricsResponse, + GetLogMetricRequest, + CreateLogMetricRequest, + UpdateLogMetricRequest, + DeleteLogMetricRequest, +) + + +__all__ = ( + "LogEntry", + "LogEntryOperation", + "LogEntrySourceLocation", + "LogBucket", + "LogSink", + "BigQueryOptions", + "ListBucketsRequest", + "ListBucketsResponse", + "UpdateBucketRequest", + "GetBucketRequest", + "ListSinksRequest", + "ListSinksResponse", + "GetSinkRequest", + "CreateSinkRequest", + "UpdateSinkRequest", + "DeleteSinkRequest", + "LogExclusion", + "ListExclusionsRequest", + "ListExclusionsResponse", + "GetExclusionRequest", + "CreateExclusionRequest", + "UpdateExclusionRequest", + "DeleteExclusionRequest", + "GetCmekSettingsRequest", + "UpdateCmekSettingsRequest", + "CmekSettings", + "DeleteLogRequest", + "WriteLogEntriesRequest", + "WriteLogEntriesResponse", + "WriteLogEntriesPartialErrors", + "ListLogEntriesRequest", + "ListLogEntriesResponse", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "ListLogsRequest", + "ListLogsResponse", + "LogMetric", + "ListLogMetricsRequest", + "ListLogMetricsResponse", + "GetLogMetricRequest", + "CreateLogMetricRequest", + "UpdateLogMetricRequest", + "DeleteLogMetricRequest", +) diff --git a/google/cloud/logging_v2/types/log_entry.py b/google/cloud/logging_v2/types/log_entry.py new file mode 100644 index 000000000..a481557fd --- /dev/null +++ b/google/cloud/logging_v2/types/log_entry.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.logging.type import http_request_pb2 as glt_http_request # type: ignore +from google.logging.type import log_severity_pb2 as log_severity # type: ignore +from google.protobuf import any_pb2 as gp_any # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as gp_timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.logging.v2", + manifest={"LogEntry", "LogEntryOperation", "LogEntrySourceLocation",}, +) + + +class LogEntry(proto.Message): + r"""An individual entry in a log. + + Attributes: + log_name (str): + Required. The resource name of the log to which this log + entry belongs: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + A project number may be used in place of PROJECT_ID. The + project number is translated to its corresponding PROJECT_ID + internally and the ``log_name`` field will contain + PROJECT_ID in queries and exports. + + ``[LOG_ID]`` must be URL-encoded within ``log_name``. + Example: + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + ``[LOG_ID]`` must be less than 512 characters long and can + only include the following characters: upper and lower case + alphanumeric characters, forward-slash, underscore, hyphen, + and period. + + For backward compatibility, if ``log_name`` begins with a + forward-slash, such as ``/projects/...``, then the log entry + is ingested as usual but the forward-slash is removed. + Listing the log entry will not show the leading slash and + filtering for a log name with a leading slash will never + return any results. + resource (~.monitored_resource.MonitoredResource): + Required. The monitored resource that + produced this log entry. + Example: a log entry that reports a database + error would be associated with the monitored + resource designating the particular database + that reported the error. + proto_payload (~.gp_any.Any): + The log entry payload, represented as a + protocol buffer. Some Google Cloud Platform + services use this field for their log entry + payloads. + The following protocol buffer types are + supported; user-defined types are not supported: + + "type.googleapis.com/google.cloud.audit.AuditLog" + "type.googleapis.com/google.appengine.logging.v1.RequestLog". + text_payload (str): + The log entry payload, represented as a + Unicode string (UTF-8). + json_payload (~.struct.Struct): + The log entry payload, represented as a + structure that is expressed as a JSON object. + timestamp (~.gp_timestamp.Timestamp): + Optional. The time the event described by the log entry + occurred. This time is used to compute the log entry's age + and to enforce the logs retention period. If this field is + omitted in a new log entry, then Logging assigns it the + current time. Timestamps have nanosecond accuracy, but + trailing zeros in the fractional seconds might be omitted + when the timestamp is displayed. + + Incoming log entries must have timestamps that don't exceed + the `logs retention + period `__ + in the past, and that don't exceed 24 hours in the future. + Log entries outside those time boundaries aren't ingested by + Logging. + receive_timestamp (~.gp_timestamp.Timestamp): + Output only. The time the log entry was + received by Logging. + severity (~.log_severity.LogSeverity): + Optional. The severity of the log entry. The default value + is ``LogSeverity.DEFAULT``. + insert_id (str): + Optional. A unique identifier for the log entry. If you + provide a value, then Logging considers other log entries in + the same project, with the same ``timestamp``, and with the + same ``insert_id`` to be duplicates which are removed in a + single query result. However, there are no guarantees of + de-duplication in the export of logs. + + If the ``insert_id`` is omitted when writing a log entry, + the Logging API assigns its own unique identifier in this + field. + + In queries, the ``insert_id`` is also used to order log + entries that have the same ``log_name`` and ``timestamp`` + values. + http_request (~.glt_http_request.HttpRequest): + Optional. Information about the HTTP request + associated with this log entry, if applicable. + labels (Sequence[~.log_entry.LogEntry.LabelsEntry]): + Optional. A set of user-defined (key, value) + data that provides additional information about + the log entry. + operation (~.log_entry.LogEntryOperation): + Optional. Information about an operation + associated with the log entry, if applicable. + trace (str): + Optional. Resource name of the trace associated with the log + entry, if any. If it contains a relative resource name, the + name is assumed to be relative to + ``//tracing.googleapis.com``. Example: + ``projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824`` + span_id (str): + Optional. The span ID within the trace associated with the + log entry. + + For Trace spans, this is the same format that the Trace API + v2 uses: a 16-character hexadecimal encoding of an 8-byte + array, such as ``000000000000004a``. + trace_sampled (bool): + Optional. The sampling decision of the trace associated with + the log entry. + + True means that the trace resource name in the ``trace`` + field was sampled for storage in a trace backend. False + means that the trace was not sampled for storage when this + log entry was written, or the sampling decision was unknown + at the time. A non-sampled ``trace`` value is still useful + as a request correlation identifier. The default is False. + source_location (~.log_entry.LogEntrySourceLocation): + Optional. Source code location information + associated with the log entry, if any. + """ + + log_name = proto.Field(proto.STRING, number=12) + + resource = proto.Field( + proto.MESSAGE, number=8, message=monitored_resource.MonitoredResource, + ) + + proto_payload = proto.Field( + proto.MESSAGE, number=2, oneof="payload", message=gp_any.Any, + ) + + text_payload = proto.Field(proto.STRING, number=3, oneof="payload") + + json_payload = proto.Field( + proto.MESSAGE, number=6, oneof="payload", message=struct.Struct, + ) + + timestamp = proto.Field(proto.MESSAGE, number=9, message=gp_timestamp.Timestamp,) + + receive_timestamp = proto.Field( + proto.MESSAGE, number=24, message=gp_timestamp.Timestamp, + ) + + severity = proto.Field(proto.ENUM, number=10, enum=log_severity.LogSeverity,) + + insert_id = proto.Field(proto.STRING, number=4) + + http_request = proto.Field( + proto.MESSAGE, number=7, message=glt_http_request.HttpRequest, + ) + + labels = proto.MapField(proto.STRING, proto.STRING, number=11) + + operation = proto.Field(proto.MESSAGE, number=15, message="LogEntryOperation",) + + trace = proto.Field(proto.STRING, number=22) + + span_id = proto.Field(proto.STRING, number=27) + + trace_sampled = proto.Field(proto.BOOL, number=30) + + source_location = proto.Field( + proto.MESSAGE, number=23, message="LogEntrySourceLocation", + ) + + +class LogEntryOperation(proto.Message): + r"""Additional information about a potentially long-running + operation with which a log entry is associated. + + Attributes: + id (str): + Optional. An arbitrary operation identifier. + Log entries with the same identifier are assumed + to be part of the same operation. + producer (str): + Optional. An arbitrary producer identifier. The combination + of ``id`` and ``producer`` must be globally unique. Examples + for ``producer``: ``"MyDivision.MyBigCompany.com"``, + ``"github.com/MyProject/MyApplication"``. + first (bool): + Optional. Set this to True if this is the + first log entry in the operation. + last (bool): + Optional. Set this to True if this is the + last log entry in the operation. + """ + + id = proto.Field(proto.STRING, number=1) + + producer = proto.Field(proto.STRING, number=2) + + first = proto.Field(proto.BOOL, number=3) + + last = proto.Field(proto.BOOL, number=4) + + +class LogEntrySourceLocation(proto.Message): + r"""Additional information about the source code location that + produced the log entry. + + Attributes: + file (str): + Optional. Source file name. Depending on the + runtime environment, this might be a simple name + or a fully-qualified name. + line (int): + Optional. Line within the source file. + 1-based; 0 indicates no line number available. + function (str): + Optional. Human-readable name of the function or method + being invoked, with optional context such as the class or + package name. This information may be used in contexts such + as the logs viewer, where a file and line number are less + meaningful. The format can vary by language. For example: + ``qual.if.ied.Class.method`` (Java), ``dir/package.func`` + (Go), ``function`` (Python). + """ + + file = proto.Field(proto.STRING, number=1) + + line = proto.Field(proto.INT64, number=2) + + function = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/logging_v2/types/logging.py b/google/cloud/logging_v2/types/logging.py new file mode 100644 index 000000000..0d44439ab --- /dev/null +++ b/google/cloud/logging_v2/types/logging.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.cloud.logging_v2.types import log_entry +from google.rpc import status_pb2 as status # type: ignore + + +__protobuf__ = proto.module( + package="google.logging.v2", + manifest={ + "DeleteLogRequest", + "WriteLogEntriesRequest", + "WriteLogEntriesResponse", + "WriteLogEntriesPartialErrors", + "ListLogEntriesRequest", + "ListLogEntriesResponse", + "ListMonitoredResourceDescriptorsRequest", + "ListMonitoredResourceDescriptorsResponse", + "ListLogsRequest", + "ListLogsResponse", + }, +) + + +class DeleteLogRequest(proto.Message): + r"""The parameters to DeleteLog. + + Attributes: + log_name (str): + Required. The resource name of the log to delete: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + For more information about log names, see + [LogEntry][google.logging.v2.LogEntry]. + """ + + log_name = proto.Field(proto.STRING, number=1) + + +class WriteLogEntriesRequest(proto.Message): + r"""The parameters to WriteLogEntries. + + Attributes: + log_name (str): + Optional. A default log resource name that is assigned to + all log entries in ``entries`` that do not specify a value + for ``log_name``: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example: + + :: + + "projects/my-project-id/logs/syslog" + "organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity" + + The permission ``logging.logEntries.create`` is needed on + each project, organization, billing account, or folder that + is receiving new log entries, whether the resource is + specified in ``logName`` or in an individual log entry. + resource (~.monitored_resource.MonitoredResource): + Optional. A default monitored resource object that is + assigned to all log entries in ``entries`` that do not + specify a value for ``resource``. Example: + + :: + + { "type": "gce_instance", + "labels": { + "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + + See [LogEntry][google.logging.v2.LogEntry]. + labels (Sequence[~.logging.WriteLogEntriesRequest.LabelsEntry]): + Optional. Default labels that are added to the ``labels`` + field of all log entries in ``entries``. If a log entry + already has a label with the same key as a label in this + parameter, then the log entry's label is not changed. See + [LogEntry][google.logging.v2.LogEntry]. + entries (Sequence[~.log_entry.LogEntry]): + Required. The log entries to send to Logging. The order of + log entries in this list does not matter. Values supplied in + this method's ``log_name``, ``resource``, and ``labels`` + fields are copied into those log entries in this list that + do not include values for their corresponding fields. For + more information, see the + [LogEntry][google.logging.v2.LogEntry] type. + + If the ``timestamp`` or ``insert_id`` fields are missing in + log entries, then this method supplies the current time or a + unique identifier, respectively. The supplied values are + chosen so that, among the log entries that did not supply + their own values, the entries earlier in the list will sort + before the entries later in the list. See the + ``entries.list`` method. + + Log entries with timestamps that are more than the `logs + retention + period `__ in + the past or more than 24 hours in the future will not be + available when calling ``entries.list``. However, those log + entries can still be `exported with + LogSinks `__. + + To improve throughput and to avoid exceeding the `quota + limit `__ for + calls to ``entries.write``, you should try to include + several log entries in this list, rather than calling this + method for each individual log entry. + partial_success (bool): + Optional. Whether valid entries should be written even if + some other entries fail due to INVALID_ARGUMENT or + PERMISSION_DENIED errors. If any entry is not written, then + the response status is the error associated with one of the + failed entries and the response includes error details keyed + by the entries' zero-based index in the ``entries.write`` + method. + dry_run (bool): + Optional. If true, the request should expect + normal response, but the entries won't be + persisted nor exported. Useful for checking + whether the logging API endpoints are working + properly before sending valuable data. + """ + + log_name = proto.Field(proto.STRING, number=1) + + resource = proto.Field( + proto.MESSAGE, number=2, message=monitored_resource.MonitoredResource, + ) + + labels = proto.MapField(proto.STRING, proto.STRING, number=3) + + entries = proto.RepeatedField(proto.MESSAGE, number=4, message=log_entry.LogEntry,) + + partial_success = proto.Field(proto.BOOL, number=5) + + dry_run = proto.Field(proto.BOOL, number=6) + + +class WriteLogEntriesResponse(proto.Message): + r"""Result returned from WriteLogEntries.""" + + +class WriteLogEntriesPartialErrors(proto.Message): + r"""Error details for WriteLogEntries with partial success. + + Attributes: + log_entry_errors (Sequence[~.logging.WriteLogEntriesPartialErrors.LogEntryErrorsEntry]): + When ``WriteLogEntriesRequest.partial_success`` is true, + records the error status for entries that were not written + due to a permanent error, keyed by the entry's zero-based + index in ``WriteLogEntriesRequest.entries``. + + Failed requests for which no entries are written will not + include per-entry errors. + """ + + log_entry_errors = proto.MapField( + proto.INT32, proto.MESSAGE, number=1, message=status.Status, + ) + + +class ListLogEntriesRequest(proto.Message): + r"""The parameters to ``ListLogEntries``. + + Attributes: + resource_names (Sequence[str]): + Required. Names of one or more parent resources from which + to retrieve log entries: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Projects listed in the ``project_ids`` field are added to + this list. + filter (str): + Optional. A filter that chooses which log entries to return. + See `Advanced Logs + Queries `__. + Only log entries that match the filter are returned. An + empty filter matches all log entries in the resources listed + in ``resource_names``. Referencing a parent resource that is + not listed in ``resource_names`` will cause the filter to + return no results. The maximum length of the filter is 20000 + characters. + order_by (str): + Optional. How the results should be sorted. Presently, the + only permitted values are ``"timestamp asc"`` (default) and + ``"timestamp desc"``. The first option returns entries in + order of increasing values of ``LogEntry.timestamp`` (oldest + first), and the second option returns entries in order of + decreasing timestamps (newest first). Entries with equal + timestamps are returned in order of their ``insert_id`` + values. + page_size (int): + Optional. The maximum number of results to return from this + request. Default is 50. If the value is negative or exceeds + 1000, the request is rejected. The presence of + ``next_page_token`` in the response indicates that more + results might be available. + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``page_token`` must be the value of ``next_page_token`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + """ + + resource_names = proto.RepeatedField(proto.STRING, number=8) + + filter = proto.Field(proto.STRING, number=2) + + order_by = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=5) + + +class ListLogEntriesResponse(proto.Message): + r"""Result returned from ``ListLogEntries``. + + Attributes: + entries (Sequence[~.log_entry.LogEntry]): + A list of log entries. If ``entries`` is empty, + ``nextPageToken`` may still be returned, indicating that + more entries may exist. See ``nextPageToken`` for more + information. + next_page_token (str): + If there might be more results than those appearing in this + response, then ``nextPageToken`` is included. To get the + next set of results, call this method again using the value + of ``nextPageToken`` as ``pageToken``. + + If a value for ``next_page_token`` appears and the + ``entries`` field is empty, it means that the search found + no log entries so far but it did not have time to search all + the possible log entries. Retry the method with this value + for ``page_token`` to continue the search. Alternatively, + consider speeding up the search by changing your filter to + specify a single log name or resource type, or to narrow the + time range of the search. + """ + + @property + def raw_page(self): + return self + + entries = proto.RepeatedField(proto.MESSAGE, number=1, message=log_entry.LogEntry,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class ListMonitoredResourceDescriptorsRequest(proto.Message): + r"""The parameters to ListMonitoredResourceDescriptors + + Attributes: + page_size (int): + Optional. The maximum number of results to return from this + request. Non-positive values are ignored. The presence of + ``nextPageToken`` in the response indicates that more + results might be available. + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``pageToken`` must be the value of ``nextPageToken`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + """ + + page_size = proto.Field(proto.INT32, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListMonitoredResourceDescriptorsResponse(proto.Message): + r"""Result returned from ListMonitoredResourceDescriptors. + + Attributes: + resource_descriptors (Sequence[~.monitored_resource.MonitoredResourceDescriptor]): + A list of resource descriptors. + next_page_token (str): + If there might be more results than those appearing in this + response, then ``nextPageToken`` is included. To get the + next set of results, call this method again using the value + of ``nextPageToken`` as ``pageToken``. + """ + + @property + def raw_page(self): + return self + + resource_descriptors = proto.RepeatedField( + proto.MESSAGE, number=1, message=monitored_resource.MonitoredResourceDescriptor, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class ListLogsRequest(proto.Message): + r"""The parameters to ListLogs. + + Attributes: + parent (str): + Required. The resource name that owns the logs: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + page_size (int): + Optional. The maximum number of results to return from this + request. Non-positive values are ignored. The presence of + ``nextPageToken`` in the response indicates that more + results might be available. + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``pageToken`` must be the value of ``nextPageToken`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListLogsResponse(proto.Message): + r"""Result returned from ListLogs. + + Attributes: + log_names (Sequence[str]): + A list of log names. For example, + ``"projects/my-project/logs/syslog"`` or + ``"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + next_page_token (str): + If there might be more results than those appearing in this + response, then ``nextPageToken`` is included. To get the + next set of results, call this method again using the value + of ``nextPageToken`` as ``pageToken``. + """ + + @property + def raw_page(self): + return self + + log_names = proto.RepeatedField(proto.STRING, number=3) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/logging_v2/types/logging_config.py b/google/cloud/logging_v2/types/logging_config.py new file mode 100644 index 000000000..2161d6872 --- /dev/null +++ b/google/cloud/logging_v2/types/logging_config.py @@ -0,0 +1,960 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.logging.v2", + manifest={ + "LifecycleState", + "LogBucket", + "LogSink", + "BigQueryOptions", + "ListBucketsRequest", + "ListBucketsResponse", + "UpdateBucketRequest", + "GetBucketRequest", + "ListSinksRequest", + "ListSinksResponse", + "GetSinkRequest", + "CreateSinkRequest", + "UpdateSinkRequest", + "DeleteSinkRequest", + "LogExclusion", + "ListExclusionsRequest", + "ListExclusionsResponse", + "GetExclusionRequest", + "CreateExclusionRequest", + "UpdateExclusionRequest", + "DeleteExclusionRequest", + "GetCmekSettingsRequest", + "UpdateCmekSettingsRequest", + "CmekSettings", + }, +) + + +class LifecycleState(proto.Enum): + r"""LogBucket lifecycle states (Beta).""" + LIFECYCLE_STATE_UNSPECIFIED = 0 + ACTIVE = 1 + DELETE_REQUESTED = 2 + + +class LogBucket(proto.Message): + r"""Describes a repository of logs (Beta). + + Attributes: + name (str): + The resource name of the bucket. For example: + "projects/my-project-id/locations/my-location/buckets/my-bucket-id + The supported locations are: "global" "us-central1" + + For the location of ``global`` it is unspecified where logs + are actually stored. Once a bucket has been created, the + location can not be changed. + description (str): + Describes this bucket. + create_time (~.timestamp.Timestamp): + Output only. The creation timestamp of the + bucket. This is not set for any of the default + buckets. + update_time (~.timestamp.Timestamp): + Output only. The last update timestamp of the + bucket. + retention_days (int): + Logs will be retained by default for this + amount of time, after which they will + automatically be deleted. The minimum retention + period is 1 day. If this value is set to zero at + bucket creation time, the default time of 30 + days will be used. + lifecycle_state (~.logging_config.LifecycleState): + Output only. The bucket lifecycle state. + """ + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + retention_days = proto.Field(proto.INT32, number=11) + + lifecycle_state = proto.Field(proto.ENUM, number=12, enum="LifecycleState",) + + +class LogSink(proto.Message): + r"""Describes a sink used to export log entries to one of the + following destinations in any project: a Cloud Storage bucket, a + BigQuery dataset, or a Cloud Pub/Sub topic. A logs filter + controls which log entries are exported. The sink must be + created within a project, organization, billing account, or + folder. + + Attributes: + name (str): + Required. The client-assigned sink identifier, unique within + the project. Example: ``"my-syslog-errors-to-pubsub"``. Sink + identifiers are limited to 100 characters and can include + only the following characters: upper and lower-case + alphanumeric characters, underscores, hyphens, and periods. + First character has to be alphanumeric. + destination (str): + Required. The export destination: + + :: + + "storage.googleapis.com/[GCS_BUCKET]" + "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + + The sink's ``writer_identity``, set when the sink is + created, must have permission to write to the destination or + else the log entries are not exported. For more information, + see `Exporting Logs with + Sinks `__. + filter (str): + Optional. An `advanced logs + filter `__. + The only exported log entries are those that are in the + resource owning the sink and that match the filter. For + example: + + :: + + logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR + description (str): + Optional. A description of this sink. + The maximum length of the description is 8000 + characters. + disabled (bool): + Optional. If set to True, then this sink is + disabled and it does not export any log entries. + output_version_format (~.logging_config.LogSink.VersionFormat): + Deprecated. This field is unused. + writer_identity (str): + Output only. An IAM identity–a service account or + group—under which Logging writes the exported log entries to + the sink's destination. This field is set by + [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] + and + [sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink] + based on the value of ``unique_writer_identity`` in those + methods. + + Until you grant this identity write-access to the + destination, log entry exports from this sink will fail. For + more information, see `Granting Access for a + Resource `__. + Consult the destination service's documentation to determine + the appropriate IAM roles to assign to the identity. + include_children (bool): + Optional. This field applies only to sinks owned by + organizations and folders. If the field is false, the + default, only the logs owned by the sink's parent resource + are available for export. If the field is true, then logs + from all the projects, folders, and billing accounts + contained in the sink's parent resource are also available + for export. Whether a particular log entry from the children + is exported depends on the sink's filter expression. For + example, if this field is true, then the filter + ``resource.type=gce_instance`` would export all Compute + Engine VM instance log entries from all projects in the + sink's parent. To only export entries from certain child + projects, filter on the project part of the log name: + + :: + + logName:("projects/test-project1/" OR "projects/test-project2/") AND + resource.type=gce_instance + bigquery_options (~.logging_config.BigQueryOptions): + Optional. Options that affect sinks exporting + data to BigQuery. + create_time (~.timestamp.Timestamp): + Output only. The creation timestamp of the + sink. + This field may not be present for older sinks. + update_time (~.timestamp.Timestamp): + Output only. The last update timestamp of the + sink. + This field may not be present for older sinks. + """ + + class VersionFormat(proto.Enum): + r"""Deprecated. This is unused.""" + VERSION_FORMAT_UNSPECIFIED = 0 + V2 = 1 + V1 = 2 + + name = proto.Field(proto.STRING, number=1) + + destination = proto.Field(proto.STRING, number=3) + + filter = proto.Field(proto.STRING, number=5) + + description = proto.Field(proto.STRING, number=18) + + disabled = proto.Field(proto.BOOL, number=19) + + output_version_format = proto.Field(proto.ENUM, number=6, enum=VersionFormat,) + + writer_identity = proto.Field(proto.STRING, number=8) + + include_children = proto.Field(proto.BOOL, number=9) + + bigquery_options = proto.Field( + proto.MESSAGE, number=12, oneof="options", message="BigQueryOptions", + ) + + create_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,) + + +class BigQueryOptions(proto.Message): + r"""Options that change functionality of a sink exporting data to + BigQuery. + + Attributes: + use_partitioned_tables (bool): + Optional. Whether to use `BigQuery's partition + tables `__. + By default, Logging creates dated tables based on the log + entries' timestamps, e.g. syslog_20170523. With partitioned + tables the date suffix is no longer present and `special + query + syntax `__ + has to be used instead. In both cases, tables are sharded + based on UTC timezone. + uses_timestamp_column_partitioning (bool): + Output only. True if new timestamp column based partitioning + is in use, false if legacy ingestion-time partitioning is in + use. All new sinks will have this field set true and will + use timestamp column based partitioning. If + use_partitioned_tables is false, this value has no meaning + and will be false. Legacy sinks using partitioned tables + will have this field set to false. + """ + + use_partitioned_tables = proto.Field(proto.BOOL, number=1) + + uses_timestamp_column_partitioning = proto.Field(proto.BOOL, number=3) + + +class ListBucketsRequest(proto.Message): + r"""The parameters to ``ListBuckets`` (Beta). + + Attributes: + parent (str): + Required. The parent resource whose buckets are to be + listed: + + :: + + "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + + Note: The locations portion of the resource must be + specified, but supplying the character ``-`` in place of + [LOCATION_ID] will return all buckets. + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``pageToken`` must be the value of ``nextPageToken`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + page_size (int): + Optional. The maximum number of results to return from this + request. Non-positive values are ignored. The presence of + ``nextPageToken`` in the response indicates that more + results might be available. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + +class ListBucketsResponse(proto.Message): + r"""The response from ListBuckets (Beta). + + Attributes: + buckets (Sequence[~.logging_config.LogBucket]): + A list of buckets. + next_page_token (str): + If there might be more results than appear in this response, + then ``nextPageToken`` is included. To get the next set of + results, call the same method again using the value of + ``nextPageToken`` as ``pageToken``. + """ + + @property + def raw_page(self): + return self + + buckets = proto.RepeatedField(proto.MESSAGE, number=1, message="LogBucket",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class UpdateBucketRequest(proto.Message): + r"""The parameters to ``UpdateBucket`` (Beta). + + Attributes: + name (str): + Required. The full resource name of the bucket to update. + + :: + + "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + + Example: + ``"projects/my-project-id/locations/my-location/buckets/my-bucket-id"``. + Also requires permission + "resourcemanager.projects.updateLiens" to set the locked + property + bucket (~.logging_config.LogBucket): + Required. The updated bucket. + update_mask (~.field_mask.FieldMask): + Required. Field mask that specifies the fields in ``bucket`` + that need an update. A bucket field will be overwritten if, + and only if, it is in the update mask. ``name`` and output + only fields cannot be updated. + + For a detailed ``FieldMask`` definition, see + https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + + Example: ``updateMask=retention_days``. + """ + + name = proto.Field(proto.STRING, number=1) + + bucket = proto.Field(proto.MESSAGE, number=2, message="LogBucket",) + + update_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask.FieldMask,) + + +class GetBucketRequest(proto.Message): + r"""The parameters to ``GetBucket`` (Beta). + + Attributes: + name (str): + Required. The resource name of the bucket: + + :: + + "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + + Example: + ``"projects/my-project-id/locations/my-location/buckets/my-bucket-id"``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListSinksRequest(proto.Message): + r"""The parameters to ``ListSinks``. + + Attributes: + parent (str): + Required. The parent resource whose sinks are to be listed: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``pageToken`` must be the value of ``nextPageToken`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + page_size (int): + Optional. The maximum number of results to return from this + request. Non-positive values are ignored. The presence of + ``nextPageToken`` in the response indicates that more + results might be available. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + +class ListSinksResponse(proto.Message): + r"""Result returned from ``ListSinks``. + + Attributes: + sinks (Sequence[~.logging_config.LogSink]): + A list of sinks. + next_page_token (str): + If there might be more results than appear in this response, + then ``nextPageToken`` is included. To get the next set of + results, call the same method again using the value of + ``nextPageToken`` as ``pageToken``. + """ + + @property + def raw_page(self): + return self + + sinks = proto.RepeatedField(proto.MESSAGE, number=1, message="LogSink",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetSinkRequest(proto.Message): + r"""The parameters to ``GetSink``. + + Attributes: + sink_name (str): + Required. The resource name of the sink: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + """ + + sink_name = proto.Field(proto.STRING, number=1) + + +class CreateSinkRequest(proto.Message): + r"""The parameters to ``CreateSink``. + + Attributes: + parent (str): + Required. The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + Examples: ``"projects/my-logging-project"``, + ``"organizations/123456789"``. + sink (~.logging_config.LogSink): + Required. The new sink, whose ``name`` parameter is a sink + identifier that is not already in use. + unique_writer_identity (bool): + Optional. Determines the kind of IAM identity returned as + ``writer_identity`` in the new sink. If this value is + omitted or set to false, and if the sink's parent is a + project, then the value returned as ``writer_identity`` is + the same group or service account used by Logging before the + addition of writer identities to this API. The sink's + destination must be in the same project as the sink itself. + + If this field is set to true, or if the sink is owned by a + non-project resource such as an organization, then the value + of ``writer_identity`` will be a unique service account used + only for exports from the new sink. For more information, + see ``writer_identity`` in + [LogSink][google.logging.v2.LogSink]. + """ + + parent = proto.Field(proto.STRING, number=1) + + sink = proto.Field(proto.MESSAGE, number=2, message="LogSink",) + + unique_writer_identity = proto.Field(proto.BOOL, number=3) + + +class UpdateSinkRequest(proto.Message): + r"""The parameters to ``UpdateSink``. + + Attributes: + sink_name (str): + Required. The full resource name of the sink to update, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + sink (~.logging_config.LogSink): + Required. The updated sink, whose name is the same + identifier that appears as part of ``sink_name``. + unique_writer_identity (bool): + Optional. See + [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] + for a description of this field. When updating a sink, the + effect of this field on the value of ``writer_identity`` in + the updated sink depends on both the old and new values of + this field: + + - If the old and new values of this field are both false or + both true, then there is no change to the sink's + ``writer_identity``. + - If the old value is false and the new value is true, then + ``writer_identity`` is changed to a unique service + account. + - It is an error if the old value is true and the new value + is set to false or defaulted to false. + update_mask (~.field_mask.FieldMask): + Optional. Field mask that specifies the fields in ``sink`` + that need an update. A sink field will be overwritten if, + and only if, it is in the update mask. ``name`` and output + only fields cannot be updated. + + An empty updateMask is temporarily treated as using the + following mask for backwards compatibility purposes: + destination,filter,includeChildren At some point in the + future, behavior will be removed and specifying an empty + updateMask will be an error. + + For a detailed ``FieldMask`` definition, see + https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + + Example: ``updateMask=filter``. + """ + + sink_name = proto.Field(proto.STRING, number=1) + + sink = proto.Field(proto.MESSAGE, number=2, message="LogSink",) + + unique_writer_identity = proto.Field(proto.BOOL, number=3) + + update_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask.FieldMask,) + + +class DeleteSinkRequest(proto.Message): + r"""The parameters to ``DeleteSink``. + + Attributes: + sink_name (str): + Required. The full resource name of the sink to delete, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + """ + + sink_name = proto.Field(proto.STRING, number=1) + + +class LogExclusion(proto.Message): + r"""Specifies a set of log entries that are not to be stored in + Logging. If your GCP resource receives a large volume of logs, + you can use exclusions to reduce your chargeable logs. + Exclusions are processed after log sinks, so you can export log + entries before they are excluded. Note that organization-level + and folder-level exclusions don't apply to child resources, and + that you can't exclude audit log entries. + + Attributes: + name (str): + Required. A client-assigned identifier, such as + ``"load-balancer-exclusion"``. Identifiers are limited to + 100 characters and can include only letters, digits, + underscores, hyphens, and periods. First character has to be + alphanumeric. + description (str): + Optional. A description of this exclusion. + filter (str): + Required. An `advanced logs + filter `__ + that matches the log entries to be excluded. By using the + `sample + function `__, + you can exclude less than 100% of the matching log entries. + For example, the following query matches 99% of low-severity + log entries from Google Cloud Storage buckets: + + ``"resource.type=gcs_bucket severity`__ + for more information. + + Attributes: + name (str): + Required. The resource for which to retrieve CMEK settings. + + :: + + "projects/[PROJECT_ID]/cmekSettings" + "organizations/[ORGANIZATION_ID]/cmekSettings" + "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" + "folders/[FOLDER_ID]/cmekSettings" + + Example: ``"organizations/12345/cmekSettings"``. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP organization. + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateCmekSettingsRequest(proto.Message): + r"""The parameters to + [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings]. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Attributes: + name (str): + Required. The resource name for the CMEK settings to update. + + :: + + "projects/[PROJECT_ID]/cmekSettings" + "organizations/[ORGANIZATION_ID]/cmekSettings" + "billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings" + "folders/[FOLDER_ID]/cmekSettings" + + Example: ``"organizations/12345/cmekSettings"``. + + Note: CMEK for the Logs Router can currently only be + configured for GCP organizations. Once configured, it + applies to all projects and folders in the GCP organization. + cmek_settings (~.logging_config.CmekSettings): + Required. The CMEK settings to update. + + See `Enabling CMEK for Logs + Router `__ + for more information. + update_mask (~.field_mask.FieldMask): + Optional. Field mask identifying which fields from + ``cmek_settings`` should be updated. A field will be + overwritten if and only if it is in the update mask. Output + only fields cannot be updated. + + See [FieldMask][google.protobuf.FieldMask] for more + information. + + Example: ``"updateMask=kmsKeyName"`` + """ + + name = proto.Field(proto.STRING, number=1) + + cmek_settings = proto.Field(proto.MESSAGE, number=2, message="CmekSettings",) + + update_mask = proto.Field(proto.MESSAGE, number=3, message=field_mask.FieldMask,) + + +class CmekSettings(proto.Message): + r"""Describes the customer-managed encryption key (CMEK) settings + associated with a project, folder, organization, billing account, or + flexible resource. + + Note: CMEK for the Logs Router can currently only be configured for + GCP organizations. Once configured, it applies to all projects and + folders in the GCP organization. + + See `Enabling CMEK for Logs + Router `__ + for more information. + + Attributes: + name (str): + Output only. The resource name of the CMEK + settings. + kms_key_name (str): + The resource name for the configured Cloud KMS key. + + KMS key name format: + "projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]" + + For example: + ``"projects/my-project-id/locations/my-region/keyRings/key-ring-name/cryptoKeys/key-name"`` + + To enable CMEK for the Logs Router, set this field to a + valid ``kms_key_name`` for which the associated service + account has the required + ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` role assigned + for the key. + + The Cloud KMS key used by the Log Router can be updated by + changing the ``kms_key_name`` to a new valid key name. + Encryption operations that are in progress will be completed + with the key that was in use when they started. Decryption + operations will be completed using the key that was used at + the time of encryption unless access to that key has been + revoked. + + To disable CMEK for the Logs Router, set this field to an + empty string. + + See `Enabling CMEK for Logs + Router `__ + for more information. + service_account_id (str): + Output only. The service account that will be used by the + Logs Router to access your Cloud KMS key. + + Before enabling CMEK for Logs Router, you must first assign + the role ``roles/cloudkms.cryptoKeyEncrypterDecrypter`` to + the service account that the Logs Router will use to access + your Cloud KMS key. Use + [GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings] + to obtain the service account ID. + + See `Enabling CMEK for Logs + Router `__ + for more information. + """ + + name = proto.Field(proto.STRING, number=1) + + kms_key_name = proto.Field(proto.STRING, number=2) + + service_account_id = proto.Field(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/logging_v2/types/logging_metrics.py b/google/cloud/logging_v2/types/logging_metrics.py new file mode 100644 index 000000000..2f7c5b472 --- /dev/null +++ b/google/cloud/logging_v2/types/logging_metrics.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.api import distribution_pb2 as distribution # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.logging.v2", + manifest={ + "LogMetric", + "ListLogMetricsRequest", + "ListLogMetricsResponse", + "GetLogMetricRequest", + "CreateLogMetricRequest", + "UpdateLogMetricRequest", + "DeleteLogMetricRequest", + }, +) + + +class LogMetric(proto.Message): + r"""Describes a logs-based metric. The value of the metric is the + number of log entries that match a logs filter in a given time + interval. + Logs-based metric can also be used to extract values from logs + and create a a distribution of the values. The distribution + records the statistics of the extracted values along with an + optional histogram of the values as specified by the bucket + options. + + Attributes: + name (str): + Required. The client-assigned metric identifier. Examples: + ``"error_count"``, ``"nginx/requests"``. + + Metric identifiers are limited to 100 characters and can + include only the following characters: ``A-Z``, ``a-z``, + ``0-9``, and the special characters ``_-.,+!*',()%/``. The + forward-slash character (``/``) denotes a hierarchy of name + pieces, and it cannot be the first character of the name. + + The metric identifier in this field must not be + `URL-encoded `__. + However, when the metric identifier appears as the + ``[METRIC_ID]`` part of a ``metric_name`` API parameter, + then the metric identifier must be URL-encoded. Example: + ``"projects/my-project/metrics/nginx%2Frequests"``. + description (str): + Optional. A description of this metric, which + is used in documentation. The maximum length of + the description is 8000 characters. + filter (str): + Required. An `advanced logs + filter `__ + which is used to match log entries. Example: + + :: + + "resource.type=gae_app AND severity>=ERROR" + + The maximum length of the filter is 20000 characters. + metric_descriptor (~.ga_metric.MetricDescriptor): + Optional. The metric descriptor associated with the + logs-based metric. If unspecified, it uses a default metric + descriptor with a DELTA metric kind, INT64 value type, with + no labels and a unit of "1". Such a metric counts the number + of log entries matching the ``filter`` expression. + + The ``name``, ``type``, and ``description`` fields in the + ``metric_descriptor`` are output only, and is constructed + using the ``name`` and ``description`` field in the + LogMetric. + + To create a logs-based metric that records a distribution of + log values, a DELTA metric kind with a DISTRIBUTION value + type must be used along with a ``value_extractor`` + expression in the LogMetric. + + Each label in the metric descriptor must have a matching + label name as the key and an extractor expression as the + value in the ``label_extractors`` map. + + The ``metric_kind`` and ``value_type`` fields in the + ``metric_descriptor`` cannot be updated once initially + configured. New labels can be added in the + ``metric_descriptor``, but existing labels cannot be + modified except for their description. + value_extractor (str): + Optional. A ``value_extractor`` is required when using a + distribution logs-based metric to extract the values to + record from a log entry. Two functions are supported for + value extraction: ``EXTRACT(field)`` or + ``REGEXP_EXTRACT(field, regex)``. The argument are: + + 1. field: The name of the log entry field from which the + value is to be extracted. + 2. regex: A regular expression using the Google RE2 syntax + (https://github.com/google/re2/wiki/Syntax) with a single + capture group to extract data from the specified log + entry field. The value of the field is converted to a + string before applying the regex. It is an error to + specify a regex that does not include exactly one capture + group. + + The result of the extraction must be convertible to a double + type, as the distribution always records double values. If + either the extraction or the conversion to double fails, + then those values are not recorded in the distribution. + + Example: + ``REGEXP_EXTRACT(jsonPayload.request, ".*quantity=(\d+).*")`` + label_extractors (Sequence[~.logging_metrics.LogMetric.LabelExtractorsEntry]): + Optional. A map from a label key string to an extractor + expression which is used to extract data from a log entry + field and assign as the label value. Each label key + specified in the LabelDescriptor must have an associated + extractor expression in this map. The syntax of the + extractor expression is the same as for the + ``value_extractor`` field. + + The extracted value is converted to the type defined in the + label descriptor. If the either the extraction or the type + conversion fails, the label will have a default value. The + default value for a string label is an empty string, for an + integer label its 0, and for a boolean label its ``false``. + + Note that there are upper bounds on the maximum number of + labels and the number of active time series that are allowed + in a project. + bucket_options (~.distribution.Distribution.BucketOptions): + Optional. The ``bucket_options`` are required when the + logs-based metric is using a DISTRIBUTION value type and it + describes the bucket boundaries used to create a histogram + of the extracted values. + create_time (~.timestamp.Timestamp): + Output only. The creation timestamp of the + metric. + This field may not be present for older metrics. + update_time (~.timestamp.Timestamp): + Output only. The last update timestamp of the + metric. + This field may not be present for older metrics. + version (~.logging_metrics.LogMetric.ApiVersion): + Deprecated. The API version that created or + updated this metric. The v2 format is used by + default and cannot be changed. + """ + + class ApiVersion(proto.Enum): + r"""Logging API version.""" + V2 = 0 + V1 = 1 + + name = proto.Field(proto.STRING, number=1) + + description = proto.Field(proto.STRING, number=2) + + filter = proto.Field(proto.STRING, number=3) + + metric_descriptor = proto.Field( + proto.MESSAGE, number=5, message=ga_metric.MetricDescriptor, + ) + + value_extractor = proto.Field(proto.STRING, number=6) + + label_extractors = proto.MapField(proto.STRING, proto.STRING, number=7) + + bucket_options = proto.Field( + proto.MESSAGE, number=8, message=distribution.Distribution.BucketOptions, + ) + + create_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,) + + version = proto.Field(proto.ENUM, number=4, enum=ApiVersion,) + + +class ListLogMetricsRequest(proto.Message): + r"""The parameters to ListLogMetrics. + + Attributes: + parent (str): + Required. The name of the project containing the metrics: + + :: + + "projects/[PROJECT_ID]". + page_token (str): + Optional. If present, then retrieve the next batch of + results from the preceding call to this method. + ``pageToken`` must be the value of ``nextPageToken`` from + the previous response. The values of other method parameters + should be identical to those in the previous call. + page_size (int): + Optional. The maximum number of results to return from this + request. Non-positive values are ignored. The presence of + ``nextPageToken`` in the response indicates that more + results might be available. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + page_size = proto.Field(proto.INT32, number=3) + + +class ListLogMetricsResponse(proto.Message): + r"""Result returned from ListLogMetrics. + + Attributes: + metrics (Sequence[~.logging_metrics.LogMetric]): + A list of logs-based metrics. + next_page_token (str): + If there might be more results than appear in this response, + then ``nextPageToken`` is included. To get the next set of + results, call this method again using the value of + ``nextPageToken`` as ``pageToken``. + """ + + @property + def raw_page(self): + return self + + metrics = proto.RepeatedField(proto.MESSAGE, number=1, message="LogMetric",) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetLogMetricRequest(proto.Message): + r"""The parameters to GetLogMetric. + + Attributes: + metric_name (str): + Required. The resource name of the desired metric: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + """ + + metric_name = proto.Field(proto.STRING, number=1) + + +class CreateLogMetricRequest(proto.Message): + r"""The parameters to CreateLogMetric. + + Attributes: + parent (str): + Required. The resource name of the project in which to + create the metric: + + :: + + "projects/[PROJECT_ID]" + + The new metric must be provided in the request. + metric (~.logging_metrics.LogMetric): + Required. The new logs-based metric, which + must not have an identifier that already exists. + """ + + parent = proto.Field(proto.STRING, number=1) + + metric = proto.Field(proto.MESSAGE, number=2, message="LogMetric",) + + +class UpdateLogMetricRequest(proto.Message): + r"""The parameters to UpdateLogMetric. + + Attributes: + metric_name (str): + Required. The resource name of the metric to update: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + + The updated metric must be provided in the request and it's + ``name`` field must be the same as ``[METRIC_ID]`` If the + metric does not exist in ``[PROJECT_ID]``, then a new metric + is created. + metric (~.logging_metrics.LogMetric): + Required. The updated metric. + """ + + metric_name = proto.Field(proto.STRING, number=1) + + metric = proto.Field(proto.MESSAGE, number=2, message="LogMetric",) + + +class DeleteLogMetricRequest(proto.Message): + r"""The parameters to DeleteLogMetric. + + Attributes: + metric_name (str): + Required. The resource name of the metric to delete: + + :: + + "projects/[PROJECT_ID]/metrics/[METRIC_ID]". + """ + + metric_name = proto.Field(proto.STRING, number=1) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000..4505b4854 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 9cc3ab77f..ca45b2c40 100644 --- a/noxfile.py +++ b/noxfile.py @@ -28,7 +28,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.5", "3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov", "flask", "webob", "django") session.install("-e", ".") diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index ff599eb2a..21f6d2a26 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/scripts/fixup_logging_v2_keywords.py b/scripts/fixup_logging_v2_keywords.py new file mode 100644 index 000000000..c570c0883 --- /dev/null +++ b/scripts/fixup_logging_v2_keywords.py @@ -0,0 +1,203 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class loggingCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'create_exclusion': ('parent', 'exclusion', ), + 'create_log_metric': ('parent', 'metric', ), + 'create_sink': ('parent', 'sink', 'unique_writer_identity', ), + 'delete_exclusion': ('name', ), + 'delete_log': ('log_name', ), + 'delete_log_metric': ('metric_name', ), + 'delete_sink': ('sink_name', ), + 'get_bucket': ('name', ), + 'get_cmek_settings': ('name', ), + 'get_exclusion': ('name', ), + 'get_log_metric': ('metric_name', ), + 'get_sink': ('sink_name', ), + 'list_buckets': ('parent', 'page_token', 'page_size', ), + 'list_exclusions': ('parent', 'page_token', 'page_size', ), + 'list_log_entries': ('resource_names', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_log_metrics': ('parent', 'page_token', 'page_size', ), + 'list_logs': ('parent', 'page_size', 'page_token', ), + 'list_monitored_resource_descriptors': ('page_size', 'page_token', ), + 'list_sinks': ('parent', 'page_token', 'page_size', ), + 'update_bucket': ('name', 'bucket', 'update_mask', ), + 'update_cmek_settings': ('name', 'cmek_settings', 'update_mask', ), + 'update_exclusion': ('name', 'exclusion', 'update_mask', ), + 'update_log_metric': ('metric_name', 'metric', ), + 'update_sink': ('sink_name', 'sink', 'unique_writer_identity', 'update_mask', ), + 'write_log_entries': ('entries', 'log_name', 'resource', 'labels', 'partial_success', 'dry_run', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=loggingCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the logging client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/synth.metadata b/synth.metadata index 70f91ca29..8c0cf4d5f 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-logging.git", - "sha": "98029b5a0d997963a7a30758933e0cc8ee8f5127" + "remote": "git@github.com:googleapis/python-logging", + "sha": "231474afcd5a84549d68e8b7ae9c0c912b43431b" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "fd31b1600fc496d6127665d29f095371d985c637", - "internalRef": "336344634" + "sha": "7f31f40209008ad24058579e7112e45fc9d5715e", + "internalRef": "339939234" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "befc24dcdeb8e57ec1259826fd33120b05137e8f" + "sha": "b19b401571e77192f8dd38eab5fb2300a0de9324" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "befc24dcdeb8e57ec1259826fd33120b05137e8f" + "sha": "b19b401571e77192f8dd38eab5fb2300a0de9324" } } ], diff --git a/synth.py b/synth.py index 9965d9b69..1c87eac1f 100644 --- a/synth.py +++ b/synth.py @@ -30,13 +30,17 @@ include_protos=True, ) -# the structure of the logging directory is a bit different, so manually copy the protos -s.move(library / "google/cloud/logging_v2/proto", "google/cloud/logging_v2/proto") - -s.move(library / "google/cloud/logging_v2/gapic") -s.move(library / "tests/unit/gapic/v2") -# Don't include gapic library docs. Users should use the hand-written layer instead -# s.move(library / "docs/gapic/v2") +s.move( + library, + excludes=[ + "setup.py", + "README.rst", + "docs/index.rst", + "docs/multiprocessing.rst", + "docs/logging_v2", # Don't include gapic library docs. Users should use the hand-written layer instead + "scripts/fixup_logging_v2_keywords.py", # don't include script since it only works for generated layer + ], +) # ---------------------------------------------------------------------------- # Add templated files @@ -44,19 +48,14 @@ templated_files = common.py_library( unit_cov_level=95, cov_level=99, - system_test_python_versions = ['3.8'], - unit_test_python_versions = ['3.5', '3.6', '3.7', '3.8'], - system_test_external_dependencies = [ - 'google-cloud-bigquery', - 'google-cloud-pubsub', - 'google-cloud-storage', - 'google-cloud-testutils' - ], - unit_test_external_dependencies = [ - 'flask', - 'webob', - 'django' + microgenerator=True, + system_test_external_dependencies=[ + "google-cloud-bigquery", + "google-cloud-pubsub", + "google-cloud-storage", + "google-cloud-testutils", ], + unit_test_external_dependencies=["flask", "webob", "django"], samples=True, ) s.move(templated_files, excludes=[".coveragerc"]) @@ -67,4 +66,4 @@ python.py_samples() -s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file +s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/tests/unit/gapic/logging_v2/__init__.py b/tests/unit/gapic/logging_v2/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/tests/unit/gapic/logging_v2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/logging_v2/test_config_service_v2.py b/tests/unit/gapic/logging_v2/test_config_service_v2.py new file mode 100644 index 000000000..469684436 --- /dev/null +++ b/tests/unit/gapic/logging_v2/test_config_service_v2.py @@ -0,0 +1,4382 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.logging_v2.services.config_service_v2 import ( + ConfigServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.config_service_v2 import ConfigServiceV2Client +from google.cloud.logging_v2.services.config_service_v2 import pagers +from google.cloud.logging_v2.services.config_service_v2 import transports +from google.cloud.logging_v2.types import logging_config +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ConfigServiceV2Client._get_default_mtls_endpoint(None) is None + assert ( + ConfigServiceV2Client._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ConfigServiceV2Client._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ConfigServiceV2Client._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ConfigServiceV2Client._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ConfigServiceV2Client._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [ConfigServiceV2Client, ConfigServiceV2AsyncClient] +) +def test_config_service_v2_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "logging.googleapis.com:443" + + +def test_config_service_v2_client_get_transport_class(): + transport = ConfigServiceV2Client.get_transport_class() + assert transport == transports.ConfigServiceV2GrpcTransport + + transport = ConfigServiceV2Client.get_transport_class("grpc") + assert transport == transports.ConfigServiceV2GrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ConfigServiceV2Client, transports.ConfigServiceV2GrpcTransport, "grpc"), + ( + ConfigServiceV2AsyncClient, + transports.ConfigServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + ConfigServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConfigServiceV2Client), +) +@mock.patch.object( + ConfigServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConfigServiceV2AsyncClient), +) +def test_config_service_v2_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ConfigServiceV2Client, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ConfigServiceV2Client, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ConfigServiceV2Client, + transports.ConfigServiceV2GrpcTransport, + "grpc", + "true", + ), + ( + ConfigServiceV2AsyncClient, + transports.ConfigServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ConfigServiceV2Client, + transports.ConfigServiceV2GrpcTransport, + "grpc", + "false", + ), + ( + ConfigServiceV2AsyncClient, + transports.ConfigServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + ConfigServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConfigServiceV2Client), +) +@mock.patch.object( + ConfigServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ConfigServiceV2AsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_config_service_v2_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ConfigServiceV2Client, transports.ConfigServiceV2GrpcTransport, "grpc"), + ( + ConfigServiceV2AsyncClient, + transports.ConfigServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_config_service_v2_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ConfigServiceV2Client, transports.ConfigServiceV2GrpcTransport, "grpc"), + ( + ConfigServiceV2AsyncClient, + transports.ConfigServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_config_service_v2_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_config_service_v2_client_client_options_from_dict(): + with mock.patch( + "google.cloud.logging_v2.services.config_service_v2.transports.ConfigServiceV2GrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ConfigServiceV2Client( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_buckets( + transport: str = "grpc", request_type=logging_config.ListBucketsRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListBucketsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListBucketsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListBucketsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_buckets_from_dict(): + test_list_buckets(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_buckets_async( + transport: str = "grpc_asyncio", request_type=logging_config.ListBucketsRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListBucketsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListBucketsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBucketsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_buckets_async_from_dict(): + await test_list_buckets_async(request_type=dict) + + +def test_list_buckets_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListBucketsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + call.return_value = logging_config.ListBucketsResponse() + + client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_buckets_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListBucketsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListBucketsResponse() + ) + + await client.list_buckets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_buckets_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListBucketsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_buckets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_buckets_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_buckets( + logging_config.ListBucketsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_buckets_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListBucketsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListBucketsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_buckets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_buckets_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_buckets( + logging_config.ListBucketsRequest(), parent="parent_value", + ) + + +def test_list_buckets_pager(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListBucketsResponse( + buckets=[ + logging_config.LogBucket(), + logging_config.LogBucket(), + logging_config.LogBucket(), + ], + next_page_token="abc", + ), + logging_config.ListBucketsResponse(buckets=[], next_page_token="def",), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(),], next_page_token="ghi", + ), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(), logging_config.LogBucket(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_buckets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, logging_config.LogBucket) for i in results) + + +def test_list_buckets_pages(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_buckets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListBucketsResponse( + buckets=[ + logging_config.LogBucket(), + logging_config.LogBucket(), + logging_config.LogBucket(), + ], + next_page_token="abc", + ), + logging_config.ListBucketsResponse(buckets=[], next_page_token="def",), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(),], next_page_token="ghi", + ), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(), logging_config.LogBucket(),], + ), + RuntimeError, + ) + pages = list(client.list_buckets(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_buckets_async_pager(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_buckets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListBucketsResponse( + buckets=[ + logging_config.LogBucket(), + logging_config.LogBucket(), + logging_config.LogBucket(), + ], + next_page_token="abc", + ), + logging_config.ListBucketsResponse(buckets=[], next_page_token="def",), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(),], next_page_token="ghi", + ), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(), logging_config.LogBucket(),], + ), + RuntimeError, + ) + async_pager = await client.list_buckets(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, logging_config.LogBucket) for i in responses) + + +@pytest.mark.asyncio +async def test_list_buckets_async_pages(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_buckets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListBucketsResponse( + buckets=[ + logging_config.LogBucket(), + logging_config.LogBucket(), + logging_config.LogBucket(), + ], + next_page_token="abc", + ), + logging_config.ListBucketsResponse(buckets=[], next_page_token="def",), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(),], next_page_token="ghi", + ), + logging_config.ListBucketsResponse( + buckets=[logging_config.LogBucket(), logging_config.LogBucket(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_buckets(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_bucket( + transport: str = "grpc", request_type=logging_config.GetBucketRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogBucket( + name="name_value", + description="description_value", + retention_days=1512, + lifecycle_state=logging_config.LifecycleState.ACTIVE, + ) + + response = client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetBucketRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogBucket) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.retention_days == 1512 + + assert response.lifecycle_state == logging_config.LifecycleState.ACTIVE + + +def test_get_bucket_from_dict(): + test_get_bucket(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_bucket_async( + transport: str = "grpc_asyncio", request_type=logging_config.GetBucketRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogBucket( + name="name_value", + description="description_value", + retention_days=1512, + lifecycle_state=logging_config.LifecycleState.ACTIVE, + ) + ) + + response = await client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetBucketRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogBucket) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.retention_days == 1512 + + assert response.lifecycle_state == logging_config.LifecycleState.ACTIVE + + +@pytest.mark.asyncio +async def test_get_bucket_async_from_dict(): + await test_get_bucket_async(request_type=dict) + + +def test_get_bucket_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetBucketRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + call.return_value = logging_config.LogBucket() + + client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_bucket_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetBucketRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_bucket), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogBucket() + ) + + await client.get_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_bucket( + transport: str = "grpc", request_type=logging_config.UpdateBucketRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogBucket( + name="name_value", + description="description_value", + retention_days=1512, + lifecycle_state=logging_config.LifecycleState.ACTIVE, + ) + + response = client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateBucketRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogBucket) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.retention_days == 1512 + + assert response.lifecycle_state == logging_config.LifecycleState.ACTIVE + + +def test_update_bucket_from_dict(): + test_update_bucket(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_bucket_async( + transport: str = "grpc_asyncio", request_type=logging_config.UpdateBucketRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogBucket( + name="name_value", + description="description_value", + retention_days=1512, + lifecycle_state=logging_config.LifecycleState.ACTIVE, + ) + ) + + response = await client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateBucketRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogBucket) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.retention_days == 1512 + + assert response.lifecycle_state == logging_config.LifecycleState.ACTIVE + + +@pytest.mark.asyncio +async def test_update_bucket_async_from_dict(): + await test_update_bucket_async(request_type=dict) + + +def test_update_bucket_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateBucketRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + call.return_value = logging_config.LogBucket() + + client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_bucket_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateBucketRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_bucket), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogBucket() + ) + + await client.update_bucket(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_list_sinks( + transport: str = "grpc", request_type=logging_config.ListSinksRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListSinksResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_sinks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListSinksRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListSinksPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_sinks_from_dict(): + test_list_sinks(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_sinks_async( + transport: str = "grpc_asyncio", request_type=logging_config.ListSinksRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListSinksResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_sinks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListSinksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSinksAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_sinks_async_from_dict(): + await test_list_sinks_async(request_type=dict) + + +def test_list_sinks_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListSinksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + call.return_value = logging_config.ListSinksResponse() + + client.list_sinks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_sinks_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListSinksRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListSinksResponse() + ) + + await client.list_sinks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_sinks_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListSinksResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_sinks(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_sinks_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_sinks( + logging_config.ListSinksRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_sinks_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListSinksResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListSinksResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_sinks(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_sinks_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_sinks( + logging_config.ListSinksRequest(), parent="parent_value", + ) + + +def test_list_sinks_pager(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListSinksResponse( + sinks=[ + logging_config.LogSink(), + logging_config.LogSink(), + logging_config.LogSink(), + ], + next_page_token="abc", + ), + logging_config.ListSinksResponse(sinks=[], next_page_token="def",), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(),], next_page_token="ghi", + ), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(), logging_config.LogSink(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_sinks(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, logging_config.LogSink) for i in results) + + +def test_list_sinks_pages(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_sinks), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListSinksResponse( + sinks=[ + logging_config.LogSink(), + logging_config.LogSink(), + logging_config.LogSink(), + ], + next_page_token="abc", + ), + logging_config.ListSinksResponse(sinks=[], next_page_token="def",), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(),], next_page_token="ghi", + ), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(), logging_config.LogSink(),], + ), + RuntimeError, + ) + pages = list(client.list_sinks(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_sinks_async_pager(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_sinks), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListSinksResponse( + sinks=[ + logging_config.LogSink(), + logging_config.LogSink(), + logging_config.LogSink(), + ], + next_page_token="abc", + ), + logging_config.ListSinksResponse(sinks=[], next_page_token="def",), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(),], next_page_token="ghi", + ), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(), logging_config.LogSink(),], + ), + RuntimeError, + ) + async_pager = await client.list_sinks(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, logging_config.LogSink) for i in responses) + + +@pytest.mark.asyncio +async def test_list_sinks_async_pages(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_sinks), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListSinksResponse( + sinks=[ + logging_config.LogSink(), + logging_config.LogSink(), + logging_config.LogSink(), + ], + next_page_token="abc", + ), + logging_config.ListSinksResponse(sinks=[], next_page_token="def",), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(),], next_page_token="ghi", + ), + logging_config.ListSinksResponse( + sinks=[logging_config.LogSink(), logging_config.LogSink(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_sinks(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_sink(transport: str = "grpc", request_type=logging_config.GetSinkRequest): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + bigquery_options=logging_config.BigQueryOptions( + use_partitioned_tables=True + ), + ) + + response = client.get_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetSinkRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +def test_get_sink_from_dict(): + test_get_sink(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_sink_async( + transport: str = "grpc_asyncio", request_type=logging_config.GetSinkRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + ) + ) + + response = await client.get_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetSinkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +@pytest.mark.asyncio +async def test_get_sink_async_from_dict(): + await test_get_sink_async(request_type=dict) + + +def test_get_sink_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + call.return_value = logging_config.LogSink() + + client.get_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_sink_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + + await client.get_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +def test_get_sink_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_sink(sink_name="sink_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + +def test_get_sink_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_sink( + logging_config.GetSinkRequest(), sink_name="sink_name_value", + ) + + +@pytest.mark.asyncio +async def test_get_sink_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_sink(sink_name="sink_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + +@pytest.mark.asyncio +async def test_get_sink_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_sink( + logging_config.GetSinkRequest(), sink_name="sink_name_value", + ) + + +def test_create_sink( + transport: str = "grpc", request_type=logging_config.CreateSinkRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + bigquery_options=logging_config.BigQueryOptions( + use_partitioned_tables=True + ), + ) + + response = client.create_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.CreateSinkRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +def test_create_sink_from_dict(): + test_create_sink(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_sink_async( + transport: str = "grpc_asyncio", request_type=logging_config.CreateSinkRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + ) + ) + + response = await client.create_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.CreateSinkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +@pytest.mark.asyncio +async def test_create_sink_async_from_dict(): + await test_create_sink_async(request_type=dict) + + +def test_create_sink_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.CreateSinkRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + call.return_value = logging_config.LogSink() + + client.create_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_sink_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.CreateSinkRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + + await client.create_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_sink_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_sink( + parent="parent_value", sink=logging_config.LogSink(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].sink == logging_config.LogSink(name="name_value") + + +def test_create_sink_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_sink( + logging_config.CreateSinkRequest(), + parent="parent_value", + sink=logging_config.LogSink(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_sink_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_sink( + parent="parent_value", sink=logging_config.LogSink(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].sink == logging_config.LogSink(name="name_value") + + +@pytest.mark.asyncio +async def test_create_sink_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_sink( + logging_config.CreateSinkRequest(), + parent="parent_value", + sink=logging_config.LogSink(name="name_value"), + ) + + +def test_update_sink( + transport: str = "grpc", request_type=logging_config.UpdateSinkRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + bigquery_options=logging_config.BigQueryOptions( + use_partitioned_tables=True + ), + ) + + response = client.update_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateSinkRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +def test_update_sink_from_dict(): + test_update_sink(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_sink_async( + transport: str = "grpc_asyncio", request_type=logging_config.UpdateSinkRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink( + name="name_value", + destination="destination_value", + filter="filter_value", + description="description_value", + disabled=True, + output_version_format=logging_config.LogSink.VersionFormat.V2, + writer_identity="writer_identity_value", + include_children=True, + ) + ) + + response = await client.update_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateSinkRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogSink) + + assert response.name == "name_value" + + assert response.destination == "destination_value" + + assert response.filter == "filter_value" + + assert response.description == "description_value" + + assert response.disabled is True + + assert response.output_version_format == logging_config.LogSink.VersionFormat.V2 + + assert response.writer_identity == "writer_identity_value" + + assert response.include_children is True + + +@pytest.mark.asyncio +async def test_update_sink_async_from_dict(): + await test_update_sink_async(request_type=dict) + + +def test_update_sink_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + call.return_value = logging_config.LogSink() + + client.update_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_sink_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + + await client.update_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +def test_update_sink_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_sink( + sink_name="sink_name_value", + sink=logging_config.LogSink(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + assert args[0].sink == logging_config.LogSink(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_sink_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_sink( + logging_config.UpdateSinkRequest(), + sink_name="sink_name_value", + sink=logging_config.LogSink(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_sink_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogSink() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogSink() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_sink( + sink_name="sink_name_value", + sink=logging_config.LogSink(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + assert args[0].sink == logging_config.LogSink(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_sink_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_sink( + logging_config.UpdateSinkRequest(), + sink_name="sink_name_value", + sink=logging_config.LogSink(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_sink( + transport: str = "grpc", request_type=logging_config.DeleteSinkRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.DeleteSinkRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_sink_from_dict(): + test_delete_sink(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_sink_async( + transport: str = "grpc_asyncio", request_type=logging_config.DeleteSinkRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.DeleteSinkRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_sink_async_from_dict(): + await test_delete_sink_async(request_type=dict) + + +def test_delete_sink_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.DeleteSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + call.return_value = None + + client.delete_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_sink_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.DeleteSinkRequest() + request.sink_name = "sink_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_sink(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "sink_name=sink_name/value",) in kw["metadata"] + + +def test_delete_sink_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_sink(sink_name="sink_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + +def test_delete_sink_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_sink( + logging_config.DeleteSinkRequest(), sink_name="sink_name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_sink_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_sink), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_sink(sink_name="sink_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].sink_name == "sink_name_value" + + +@pytest.mark.asyncio +async def test_delete_sink_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_sink( + logging_config.DeleteSinkRequest(), sink_name="sink_name_value", + ) + + +def test_list_exclusions( + transport: str = "grpc", request_type=logging_config.ListExclusionsRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListExclusionsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_exclusions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListExclusionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListExclusionsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_exclusions_from_dict(): + test_list_exclusions(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_exclusions_async( + transport: str = "grpc_asyncio", request_type=logging_config.ListExclusionsRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListExclusionsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_exclusions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.ListExclusionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExclusionsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_exclusions_async_from_dict(): + await test_list_exclusions_async(request_type=dict) + + +def test_list_exclusions_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListExclusionsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + call.return_value = logging_config.ListExclusionsResponse() + + client.list_exclusions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_exclusions_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.ListExclusionsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListExclusionsResponse() + ) + + await client.list_exclusions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_exclusions_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListExclusionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_exclusions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_exclusions_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_exclusions( + logging_config.ListExclusionsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_exclusions_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.ListExclusionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.ListExclusionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_exclusions(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_exclusions_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_exclusions( + logging_config.ListExclusionsRequest(), parent="parent_value", + ) + + +def test_list_exclusions_pager(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + next_page_token="abc", + ), + logging_config.ListExclusionsResponse( + exclusions=[], next_page_token="def", + ), + logging_config.ListExclusionsResponse( + exclusions=[logging_config.LogExclusion(),], next_page_token="ghi", + ), + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_exclusions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, logging_config.LogExclusion) for i in results) + + +def test_list_exclusions_pages(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_exclusions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + next_page_token="abc", + ), + logging_config.ListExclusionsResponse( + exclusions=[], next_page_token="def", + ), + logging_config.ListExclusionsResponse( + exclusions=[logging_config.LogExclusion(),], next_page_token="ghi", + ), + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + ), + RuntimeError, + ) + pages = list(client.list_exclusions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_exclusions_async_pager(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exclusions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + next_page_token="abc", + ), + logging_config.ListExclusionsResponse( + exclusions=[], next_page_token="def", + ), + logging_config.ListExclusionsResponse( + exclusions=[logging_config.LogExclusion(),], next_page_token="ghi", + ), + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_exclusions(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, logging_config.LogExclusion) for i in responses) + + +@pytest.mark.asyncio +async def test_list_exclusions_async_pages(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_exclusions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + next_page_token="abc", + ), + logging_config.ListExclusionsResponse( + exclusions=[], next_page_token="def", + ), + logging_config.ListExclusionsResponse( + exclusions=[logging_config.LogExclusion(),], next_page_token="ghi", + ), + logging_config.ListExclusionsResponse( + exclusions=[ + logging_config.LogExclusion(), + logging_config.LogExclusion(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_exclusions(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_exclusion( + transport: str = "grpc", request_type=logging_config.GetExclusionRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + + response = client.get_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetExclusionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +def test_get_exclusion_from_dict(): + test_get_exclusion(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_exclusion_async( + transport: str = "grpc_asyncio", request_type=logging_config.GetExclusionRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + ) + + response = await client.get_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetExclusionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +@pytest.mark.asyncio +async def test_get_exclusion_async_from_dict(): + await test_get_exclusion_async(request_type=dict) + + +def test_get_exclusion_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + call.return_value = logging_config.LogExclusion() + + client.get_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_exclusion_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + + await client.get_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_exclusion_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_exclusion(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_exclusion_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_exclusion( + logging_config.GetExclusionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_exclusion_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_exclusion(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_exclusion_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_exclusion( + logging_config.GetExclusionRequest(), name="name_value", + ) + + +def test_create_exclusion( + transport: str = "grpc", request_type=logging_config.CreateExclusionRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + + response = client.create_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.CreateExclusionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +def test_create_exclusion_from_dict(): + test_create_exclusion(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_exclusion_async( + transport: str = "grpc_asyncio", request_type=logging_config.CreateExclusionRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + ) + + response = await client.create_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.CreateExclusionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +@pytest.mark.asyncio +async def test_create_exclusion_async_from_dict(): + await test_create_exclusion_async(request_type=dict) + + +def test_create_exclusion_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.CreateExclusionRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + call.return_value = logging_config.LogExclusion() + + client.create_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_exclusion_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.CreateExclusionRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + + await client.create_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_exclusion_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_exclusion( + parent="parent_value", + exclusion=logging_config.LogExclusion(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].exclusion == logging_config.LogExclusion(name="name_value") + + +def test_create_exclusion_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_exclusion( + logging_config.CreateExclusionRequest(), + parent="parent_value", + exclusion=logging_config.LogExclusion(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_exclusion_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_exclusion( + parent="parent_value", + exclusion=logging_config.LogExclusion(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].exclusion == logging_config.LogExclusion(name="name_value") + + +@pytest.mark.asyncio +async def test_create_exclusion_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_exclusion( + logging_config.CreateExclusionRequest(), + parent="parent_value", + exclusion=logging_config.LogExclusion(name="name_value"), + ) + + +def test_update_exclusion( + transport: str = "grpc", request_type=logging_config.UpdateExclusionRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + + response = client.update_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateExclusionRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +def test_update_exclusion_from_dict(): + test_update_exclusion(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_exclusion_async( + transport: str = "grpc_asyncio", request_type=logging_config.UpdateExclusionRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion( + name="name_value", + description="description_value", + filter="filter_value", + disabled=True, + ) + ) + + response = await client.update_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateExclusionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.LogExclusion) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.disabled is True + + +@pytest.mark.asyncio +async def test_update_exclusion_async_from_dict(): + await test_update_exclusion_async(request_type=dict) + + +def test_update_exclusion_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + call.return_value = logging_config.LogExclusion() + + client.update_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_exclusion_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + + await client.update_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_exclusion_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_exclusion( + name="name_value", + exclusion=logging_config.LogExclusion(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].exclusion == logging_config.LogExclusion(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_exclusion_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_exclusion( + logging_config.UpdateExclusionRequest(), + name="name_value", + exclusion=logging_config.LogExclusion(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_exclusion_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.LogExclusion() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.LogExclusion() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_exclusion( + name="name_value", + exclusion=logging_config.LogExclusion(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].exclusion == logging_config.LogExclusion(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_exclusion_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_exclusion( + logging_config.UpdateExclusionRequest(), + name="name_value", + exclusion=logging_config.LogExclusion(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_exclusion( + transport: str = "grpc", request_type=logging_config.DeleteExclusionRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.DeleteExclusionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_exclusion_from_dict(): + test_delete_exclusion(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_exclusion_async( + transport: str = "grpc_asyncio", request_type=logging_config.DeleteExclusionRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.DeleteExclusionRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_exclusion_async_from_dict(): + await test_delete_exclusion_async(request_type=dict) + + +def test_delete_exclusion_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.DeleteExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + call.return_value = None + + client.delete_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_exclusion_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.DeleteExclusionRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_exclusion(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_exclusion_flattened(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_exclusion(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_exclusion_flattened_error(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_exclusion( + logging_config.DeleteExclusionRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_exclusion_flattened_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_exclusion), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_exclusion(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_exclusion_flattened_error_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_exclusion( + logging_config.DeleteExclusionRequest(), name="name_value", + ) + + +def test_get_cmek_settings( + transport: str = "grpc", request_type=logging_config.GetCmekSettingsRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cmek_settings), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.CmekSettings( + name="name_value", + kms_key_name="kms_key_name_value", + service_account_id="service_account_id_value", + ) + + response = client.get_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetCmekSettingsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.CmekSettings) + + assert response.name == "name_value" + + assert response.kms_key_name == "kms_key_name_value" + + assert response.service_account_id == "service_account_id_value" + + +def test_get_cmek_settings_from_dict(): + test_get_cmek_settings(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_cmek_settings_async( + transport: str = "grpc_asyncio", request_type=logging_config.GetCmekSettingsRequest +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cmek_settings), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.CmekSettings( + name="name_value", + kms_key_name="kms_key_name_value", + service_account_id="service_account_id_value", + ) + ) + + response = await client.get_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.GetCmekSettingsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.CmekSettings) + + assert response.name == "name_value" + + assert response.kms_key_name == "kms_key_name_value" + + assert response.service_account_id == "service_account_id_value" + + +@pytest.mark.asyncio +async def test_get_cmek_settings_async_from_dict(): + await test_get_cmek_settings_async(request_type=dict) + + +def test_get_cmek_settings_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetCmekSettingsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cmek_settings), "__call__" + ) as call: + call.return_value = logging_config.CmekSettings() + + client.get_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cmek_settings_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.GetCmekSettingsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cmek_settings), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.CmekSettings() + ) + + await client.get_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_update_cmek_settings( + transport: str = "grpc", request_type=logging_config.UpdateCmekSettingsRequest +): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cmek_settings), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_config.CmekSettings( + name="name_value", + kms_key_name="kms_key_name_value", + service_account_id="service_account_id_value", + ) + + response = client.update_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateCmekSettingsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_config.CmekSettings) + + assert response.name == "name_value" + + assert response.kms_key_name == "kms_key_name_value" + + assert response.service_account_id == "service_account_id_value" + + +def test_update_cmek_settings_from_dict(): + test_update_cmek_settings(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_cmek_settings_async( + transport: str = "grpc_asyncio", + request_type=logging_config.UpdateCmekSettingsRequest, +): + client = ConfigServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cmek_settings), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.CmekSettings( + name="name_value", + kms_key_name="kms_key_name_value", + service_account_id="service_account_id_value", + ) + ) + + response = await client.update_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_config.UpdateCmekSettingsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_config.CmekSettings) + + assert response.name == "name_value" + + assert response.kms_key_name == "kms_key_name_value" + + assert response.service_account_id == "service_account_id_value" + + +@pytest.mark.asyncio +async def test_update_cmek_settings_async_from_dict(): + await test_update_cmek_settings_async(request_type=dict) + + +def test_update_cmek_settings_field_headers(): + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateCmekSettingsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cmek_settings), "__call__" + ) as call: + call.return_value = logging_config.CmekSettings() + + client.update_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cmek_settings_field_headers_async(): + client = ConfigServiceV2AsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_config.UpdateCmekSettingsRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cmek_settings), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_config.CmekSettings() + ) + + await client.update_cmek_settings(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ConfigServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ConfigServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConfigServiceV2Client( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ConfigServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ConfigServiceV2Client( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ConfigServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ConfigServiceV2Client(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ConfigServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ConfigServiceV2GrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConfigServiceV2GrpcTransport, + transports.ConfigServiceV2GrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ConfigServiceV2Client(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.ConfigServiceV2GrpcTransport,) + + +def test_config_service_v2_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ConfigServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_config_service_v2_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.logging_v2.services.config_service_v2.transports.ConfigServiceV2Transport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ConfigServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_buckets", + "get_bucket", + "update_bucket", + "list_sinks", + "get_sink", + "create_sink", + "update_sink", + "delete_sink", + "list_exclusions", + "get_exclusion", + "create_exclusion", + "update_exclusion", + "delete_exclusion", + "get_cmek_settings", + "update_cmek_settings", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_config_service_v2_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.logging_v2.services.config_service_v2.transports.ConfigServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ConfigServiceV2Transport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ), + quota_project_id="octopus", + ) + + +def test_config_service_v2_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.logging_v2.services.config_service_v2.transports.ConfigServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ConfigServiceV2Transport() + adc.assert_called_once() + + +def test_config_service_v2_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ConfigServiceV2Client() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ), + quota_project_id=None, + ) + + +def test_config_service_v2_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ConfigServiceV2GrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ), + quota_project_id="octopus", + ) + + +def test_config_service_v2_host_no_port(): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com" + ), + ) + assert client.transport._host == "logging.googleapis.com:443" + + +def test_config_service_v2_host_with_port(): + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com:8000" + ), + ) + assert client.transport._host == "logging.googleapis.com:8000" + + +def test_config_service_v2_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.ConfigServiceV2GrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_config_service_v2_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.ConfigServiceV2GrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConfigServiceV2GrpcTransport, + transports.ConfigServiceV2GrpcAsyncIOTransport, + ], +) +def test_config_service_v2_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ConfigServiceV2GrpcTransport, + transports.ConfigServiceV2GrpcAsyncIOTransport, + ], +) +def test_config_service_v2_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_cmek_settings_path(): + project = "squid" + + expected = "projects/{project}/cmekSettings".format(project=project,) + actual = ConfigServiceV2Client.cmek_settings_path(project) + assert expected == actual + + +def test_parse_cmek_settings_path(): + expected = { + "project": "clam", + } + path = ConfigServiceV2Client.cmek_settings_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_cmek_settings_path(path) + assert expected == actual + + +def test_log_bucket_path(): + project = "whelk" + location = "octopus" + bucket = "oyster" + + expected = "projects/{project}/locations/{location}/buckets/{bucket}".format( + project=project, location=location, bucket=bucket, + ) + actual = ConfigServiceV2Client.log_bucket_path(project, location, bucket) + assert expected == actual + + +def test_parse_log_bucket_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "bucket": "mussel", + } + path = ConfigServiceV2Client.log_bucket_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_log_bucket_path(path) + assert expected == actual + + +def test_log_exclusion_path(): + project = "winkle" + exclusion = "nautilus" + + expected = "projects/{project}/exclusions/{exclusion}".format( + project=project, exclusion=exclusion, + ) + actual = ConfigServiceV2Client.log_exclusion_path(project, exclusion) + assert expected == actual + + +def test_parse_log_exclusion_path(): + expected = { + "project": "scallop", + "exclusion": "abalone", + } + path = ConfigServiceV2Client.log_exclusion_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_log_exclusion_path(path) + assert expected == actual + + +def test_log_sink_path(): + project = "squid" + sink = "clam" + + expected = "projects/{project}/sinks/{sink}".format(project=project, sink=sink,) + actual = ConfigServiceV2Client.log_sink_path(project, sink) + assert expected == actual + + +def test_parse_log_sink_path(): + expected = { + "project": "whelk", + "sink": "octopus", + } + path = ConfigServiceV2Client.log_sink_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_log_sink_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ConfigServiceV2Client.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = ConfigServiceV2Client.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = ConfigServiceV2Client.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = ConfigServiceV2Client.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = ConfigServiceV2Client.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = ConfigServiceV2Client.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = ConfigServiceV2Client.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = ConfigServiceV2Client.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = ConfigServiceV2Client.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = ConfigServiceV2Client.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ConfigServiceV2Client.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ConfigServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + client = ConfigServiceV2Client( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ConfigServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + transport_class = ConfigServiceV2Client.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/logging_v2/test_logging_service_v2.py b/tests/unit/gapic/logging_v2/test_logging_service_v2.py new file mode 100644 index 000000000..9aed4e4ce --- /dev/null +++ b/tests/unit/gapic/logging_v2/test_logging_service_v2.py @@ -0,0 +1,2166 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import monitored_resource_pb2 as monitored_resource # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.logging_v2.services.logging_service_v2 import ( + LoggingServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client +from google.cloud.logging_v2.services.logging_service_v2 import pagers +from google.cloud.logging_v2.services.logging_service_v2 import transports +from google.cloud.logging_v2.types import log_entry +from google.cloud.logging_v2.types import logging +from google.logging.type import http_request_pb2 as http_request # type: ignore +from google.logging.type import log_severity_pb2 as log_severity # type: ignore +from google.oauth2 import service_account +from google.protobuf import any_pb2 as gp_any # type: ignore +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import struct_pb2 as struct # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert LoggingServiceV2Client._get_default_mtls_endpoint(None) is None + assert ( + LoggingServiceV2Client._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + LoggingServiceV2Client._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + LoggingServiceV2Client._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LoggingServiceV2Client._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + LoggingServiceV2Client._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [LoggingServiceV2Client, LoggingServiceV2AsyncClient] +) +def test_logging_service_v2_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "logging.googleapis.com:443" + + +def test_logging_service_v2_client_get_transport_class(): + transport = LoggingServiceV2Client.get_transport_class() + assert transport == transports.LoggingServiceV2GrpcTransport + + transport = LoggingServiceV2Client.get_transport_class("grpc") + assert transport == transports.LoggingServiceV2GrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LoggingServiceV2Client, transports.LoggingServiceV2GrpcTransport, "grpc"), + ( + LoggingServiceV2AsyncClient, + transports.LoggingServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + LoggingServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LoggingServiceV2Client), +) +@mock.patch.object( + LoggingServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LoggingServiceV2AsyncClient), +) +def test_logging_service_v2_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(LoggingServiceV2Client, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(LoggingServiceV2Client, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + LoggingServiceV2Client, + transports.LoggingServiceV2GrpcTransport, + "grpc", + "true", + ), + ( + LoggingServiceV2AsyncClient, + transports.LoggingServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + LoggingServiceV2Client, + transports.LoggingServiceV2GrpcTransport, + "grpc", + "false", + ), + ( + LoggingServiceV2AsyncClient, + transports.LoggingServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + LoggingServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LoggingServiceV2Client), +) +@mock.patch.object( + LoggingServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(LoggingServiceV2AsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_logging_service_v2_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LoggingServiceV2Client, transports.LoggingServiceV2GrpcTransport, "grpc"), + ( + LoggingServiceV2AsyncClient, + transports.LoggingServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_logging_service_v2_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (LoggingServiceV2Client, transports.LoggingServiceV2GrpcTransport, "grpc"), + ( + LoggingServiceV2AsyncClient, + transports.LoggingServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_logging_service_v2_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_logging_service_v2_client_client_options_from_dict(): + with mock.patch( + "google.cloud.logging_v2.services.logging_service_v2.transports.LoggingServiceV2GrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = LoggingServiceV2Client( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_delete_log(transport: str = "grpc", request_type=logging.DeleteLogRequest): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_log(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.DeleteLogRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_log_from_dict(): + test_delete_log(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_log_async( + transport: str = "grpc_asyncio", request_type=logging.DeleteLogRequest +): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_log(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.DeleteLogRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_log_async_from_dict(): + await test_delete_log_async(request_type=dict) + + +def test_delete_log_field_headers(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging.DeleteLogRequest() + request.log_name = "log_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + call.return_value = None + + client.delete_log(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "log_name=log_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_log_field_headers_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging.DeleteLogRequest() + request.log_name = "log_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_log(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "log_name=log_name/value",) in kw["metadata"] + + +def test_delete_log_flattened(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_log(log_name="log_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].log_name == "log_name_value" + + +def test_delete_log_flattened_error(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_log( + logging.DeleteLogRequest(), log_name="log_name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_log_flattened_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_log), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_log(log_name="log_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].log_name == "log_name_value" + + +@pytest.mark.asyncio +async def test_delete_log_flattened_error_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_log( + logging.DeleteLogRequest(), log_name="log_name_value", + ) + + +def test_write_log_entries( + transport: str = "grpc", request_type=logging.WriteLogEntriesRequest +): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_log_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging.WriteLogEntriesResponse() + + response = client.write_log_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.WriteLogEntriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging.WriteLogEntriesResponse) + + +def test_write_log_entries_from_dict(): + test_write_log_entries(request_type=dict) + + +@pytest.mark.asyncio +async def test_write_log_entries_async( + transport: str = "grpc_asyncio", request_type=logging.WriteLogEntriesRequest +): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_log_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.WriteLogEntriesResponse() + ) + + response = await client.write_log_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.WriteLogEntriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging.WriteLogEntriesResponse) + + +@pytest.mark.asyncio +async def test_write_log_entries_async_from_dict(): + await test_write_log_entries_async(request_type=dict) + + +def test_write_log_entries_flattened(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_log_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging.WriteLogEntriesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_log_entries( + log_name="log_name_value", + resource=monitored_resource.MonitoredResource(type_="type__value"), + labels={"key_value": "value_value"}, + entries=[log_entry.LogEntry(log_name="log_name_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].log_name == "log_name_value" + + assert args[0].resource == monitored_resource.MonitoredResource( + type_="type__value" + ) + + assert args[0].labels == {"key_value": "value_value"} + + assert args[0].entries == [log_entry.LogEntry(log_name="log_name_value")] + + +def test_write_log_entries_flattened_error(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_log_entries( + logging.WriteLogEntriesRequest(), + log_name="log_name_value", + resource=monitored_resource.MonitoredResource(type_="type__value"), + labels={"key_value": "value_value"}, + entries=[log_entry.LogEntry(log_name="log_name_value")], + ) + + +@pytest.mark.asyncio +async def test_write_log_entries_flattened_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_log_entries), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging.WriteLogEntriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.WriteLogEntriesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_log_entries( + log_name="log_name_value", + resource=monitored_resource.MonitoredResource(type_="type__value"), + labels={"key_value": "value_value"}, + entries=[log_entry.LogEntry(log_name="log_name_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].log_name == "log_name_value" + + assert args[0].resource == monitored_resource.MonitoredResource( + type_="type__value" + ) + + assert args[0].labels == {"key_value": "value_value"} + + assert args[0].entries == [log_entry.LogEntry(log_name="log_name_value")] + + +@pytest.mark.asyncio +async def test_write_log_entries_flattened_error_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_log_entries( + logging.WriteLogEntriesRequest(), + log_name="log_name_value", + resource=monitored_resource.MonitoredResource(type_="type__value"), + labels={"key_value": "value_value"}, + entries=[log_entry.LogEntry(log_name="log_name_value")], + ) + + +def test_list_log_entries( + transport: str = "grpc", request_type=logging.ListLogEntriesRequest +): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogEntriesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_log_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListLogEntriesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListLogEntriesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_log_entries_from_dict(): + test_list_log_entries(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_log_entries_async( + transport: str = "grpc_asyncio", request_type=logging.ListLogEntriesRequest +): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListLogEntriesResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_log_entries(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListLogEntriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogEntriesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_log_entries_async_from_dict(): + await test_list_log_entries_async(request_type=dict) + + +def test_list_log_entries_flattened(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogEntriesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_log_entries( + resource_names=["resource_names_value"], + filter="filter_value", + order_by="order_by_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource_names == ["resource_names_value"] + + assert args[0].filter == "filter_value" + + assert args[0].order_by == "order_by_value" + + +def test_list_log_entries_flattened_error(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_log_entries( + logging.ListLogEntriesRequest(), + resource_names=["resource_names_value"], + filter="filter_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_log_entries_flattened_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogEntriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListLogEntriesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_log_entries( + resource_names=["resource_names_value"], + filter="filter_value", + order_by="order_by_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource_names == ["resource_names_value"] + + assert args[0].filter == "filter_value" + + assert args[0].order_by == "order_by_value" + + +@pytest.mark.asyncio +async def test_list_log_entries_flattened_error_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_log_entries( + logging.ListLogEntriesRequest(), + resource_names=["resource_names_value"], + filter="filter_value", + order_by="order_by_value", + ) + + +def test_list_log_entries_pager(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogEntriesResponse( + entries=[ + log_entry.LogEntry(), + log_entry.LogEntry(), + log_entry.LogEntry(), + ], + next_page_token="abc", + ), + logging.ListLogEntriesResponse(entries=[], next_page_token="def",), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(),], next_page_token="ghi", + ), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(), log_entry.LogEntry(),], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_log_entries(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, log_entry.LogEntry) for i in results) + + +def test_list_log_entries_pages(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_entries), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogEntriesResponse( + entries=[ + log_entry.LogEntry(), + log_entry.LogEntry(), + log_entry.LogEntry(), + ], + next_page_token="abc", + ), + logging.ListLogEntriesResponse(entries=[], next_page_token="def",), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(),], next_page_token="ghi", + ), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(), log_entry.LogEntry(),], + ), + RuntimeError, + ) + pages = list(client.list_log_entries(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_log_entries_async_pager(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_log_entries), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogEntriesResponse( + entries=[ + log_entry.LogEntry(), + log_entry.LogEntry(), + log_entry.LogEntry(), + ], + next_page_token="abc", + ), + logging.ListLogEntriesResponse(entries=[], next_page_token="def",), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(),], next_page_token="ghi", + ), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(), log_entry.LogEntry(),], + ), + RuntimeError, + ) + async_pager = await client.list_log_entries(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, log_entry.LogEntry) for i in responses) + + +@pytest.mark.asyncio +async def test_list_log_entries_async_pages(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_log_entries), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogEntriesResponse( + entries=[ + log_entry.LogEntry(), + log_entry.LogEntry(), + log_entry.LogEntry(), + ], + next_page_token="abc", + ), + logging.ListLogEntriesResponse(entries=[], next_page_token="def",), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(),], next_page_token="ghi", + ), + logging.ListLogEntriesResponse( + entries=[log_entry.LogEntry(), log_entry.LogEntry(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_log_entries(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_monitored_resource_descriptors( + transport: str = "grpc", + request_type=logging.ListMonitoredResourceDescriptorsRequest, +): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListMonitoredResourceDescriptorsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListMonitoredResourceDescriptorsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListMonitoredResourceDescriptorsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_monitored_resource_descriptors_from_dict(): + test_list_monitored_resource_descriptors(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async( + transport: str = "grpc_asyncio", + request_type=logging.ListMonitoredResourceDescriptorsRequest, +): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListMonitoredResourceDescriptorsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_monitored_resource_descriptors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListMonitoredResourceDescriptorsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMonitoredResourceDescriptorsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async_from_dict(): + await test_list_monitored_resource_descriptors_async(request_type=dict) + + +def test_list_monitored_resource_descriptors_pager(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_monitored_resource_descriptors(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, monitored_resource.MonitoredResourceDescriptor) + for i in results + ) + + +def test_list_monitored_resource_descriptors_pages(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + pages = list(client.list_monitored_resource_descriptors(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async_pager(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_monitored_resource_descriptors(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, monitored_resource.MonitoredResourceDescriptor) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_monitored_resource_descriptors_async_pages(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_monitored_resource_descriptors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="abc", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[], next_page_token="def", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + ], + next_page_token="ghi", + ), + logging.ListMonitoredResourceDescriptorsResponse( + resource_descriptors=[ + monitored_resource.MonitoredResourceDescriptor(), + monitored_resource.MonitoredResourceDescriptor(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in ( + await client.list_monitored_resource_descriptors(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_list_logs(transport: str = "grpc", request_type=logging.ListLogsRequest): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogsResponse( + log_names=["log_names_value"], next_page_token="next_page_token_value", + ) + + response = client.list_logs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListLogsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListLogsPager) + + assert response.log_names == ["log_names_value"] + + assert response.next_page_token == "next_page_token_value" + + +def test_list_logs_from_dict(): + test_list_logs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_logs_async( + transport: str = "grpc_asyncio", request_type=logging.ListLogsRequest +): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListLogsResponse( + log_names=["log_names_value"], next_page_token="next_page_token_value", + ) + ) + + response = await client.list_logs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging.ListLogsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogsAsyncPager) + + assert response.log_names == ["log_names_value"] + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_logs_async_from_dict(): + await test_list_logs_async(request_type=dict) + + +def test_list_logs_field_headers(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging.ListLogsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + call.return_value = logging.ListLogsResponse() + + client.list_logs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_logs_field_headers_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging.ListLogsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListLogsResponse() + ) + + await client.list_logs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_logs_flattened(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_logs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_logs_flattened_error(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_logs( + logging.ListLogsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_logs_flattened_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging.ListLogsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging.ListLogsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_logs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_logs_flattened_error_async(): + client = LoggingServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_logs( + logging.ListLogsRequest(), parent="parent_value", + ) + + +def test_list_logs_pager(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogsResponse( + log_names=[str(), str(), str(),], next_page_token="abc", + ), + logging.ListLogsResponse(log_names=[], next_page_token="def",), + logging.ListLogsResponse(log_names=[str(),], next_page_token="ghi",), + logging.ListLogsResponse(log_names=[str(), str(),],), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_logs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, str) for i in results) + + +def test_list_logs_pages(): + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_logs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogsResponse( + log_names=[str(), str(), str(),], next_page_token="abc", + ), + logging.ListLogsResponse(log_names=[], next_page_token="def",), + logging.ListLogsResponse(log_names=[str(),], next_page_token="ghi",), + logging.ListLogsResponse(log_names=[str(), str(),],), + RuntimeError, + ) + pages = list(client.list_logs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_logs_async_pager(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logs), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogsResponse( + log_names=[str(), str(), str(),], next_page_token="abc", + ), + logging.ListLogsResponse(log_names=[], next_page_token="def",), + logging.ListLogsResponse(log_names=[str(),], next_page_token="ghi",), + logging.ListLogsResponse(log_names=[str(), str(),],), + RuntimeError, + ) + async_pager = await client.list_logs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, str) for i in responses) + + +@pytest.mark.asyncio +async def test_list_logs_async_pages(): + client = LoggingServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logs), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging.ListLogsResponse( + log_names=[str(), str(), str(),], next_page_token="abc", + ), + logging.ListLogsResponse(log_names=[], next_page_token="def",), + logging.ListLogsResponse(log_names=[str(),], next_page_token="ghi",), + logging.ListLogsResponse(log_names=[str(), str(),],), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_logs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.LoggingServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.LoggingServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LoggingServiceV2Client( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LoggingServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LoggingServiceV2Client( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LoggingServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = LoggingServiceV2Client(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.LoggingServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.LoggingServiceV2GrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LoggingServiceV2GrpcTransport, + transports.LoggingServiceV2GrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = LoggingServiceV2Client(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.LoggingServiceV2GrpcTransport,) + + +def test_logging_service_v2_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.LoggingServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_logging_service_v2_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.logging_v2.services.logging_service_v2.transports.LoggingServiceV2Transport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.LoggingServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "delete_log", + "write_log_entries", + "list_log_entries", + "list_monitored_resource_descriptors", + "list_logs", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_logging_service_v2_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.logging_v2.services.logging_service_v2.transports.LoggingServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LoggingServiceV2Transport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id="octopus", + ) + + +def test_logging_service_v2_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.logging_v2.services.logging_service_v2.transports.LoggingServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.LoggingServiceV2Transport() + adc.assert_called_once() + + +def test_logging_service_v2_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + LoggingServiceV2Client() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id=None, + ) + + +def test_logging_service_v2_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.LoggingServiceV2GrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id="octopus", + ) + + +def test_logging_service_v2_host_no_port(): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com" + ), + ) + assert client.transport._host == "logging.googleapis.com:443" + + +def test_logging_service_v2_host_with_port(): + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com:8000" + ), + ) + assert client.transport._host == "logging.googleapis.com:8000" + + +def test_logging_service_v2_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LoggingServiceV2GrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_logging_service_v2_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.LoggingServiceV2GrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LoggingServiceV2GrpcTransport, + transports.LoggingServiceV2GrpcAsyncIOTransport, + ], +) +def test_logging_service_v2_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.LoggingServiceV2GrpcTransport, + transports.LoggingServiceV2GrpcAsyncIOTransport, + ], +) +def test_logging_service_v2_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_log_path(): + project = "squid" + log = "clam" + + expected = "projects/{project}/logs/{log}".format(project=project, log=log,) + actual = LoggingServiceV2Client.log_path(project, log) + assert expected == actual + + +def test_parse_log_path(): + expected = { + "project": "whelk", + "log": "octopus", + } + path = LoggingServiceV2Client.log_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_log_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = LoggingServiceV2Client.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = LoggingServiceV2Client.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = LoggingServiceV2Client.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = LoggingServiceV2Client.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = LoggingServiceV2Client.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = LoggingServiceV2Client.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = LoggingServiceV2Client.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = LoggingServiceV2Client.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = LoggingServiceV2Client.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = LoggingServiceV2Client.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = LoggingServiceV2Client.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.LoggingServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + client = LoggingServiceV2Client( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.LoggingServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + transport_class = LoggingServiceV2Client.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/logging_v2/test_metrics_service_v2.py b/tests/unit/gapic/logging_v2/test_metrics_service_v2.py new file mode 100644 index 000000000..0cf2e8944 --- /dev/null +++ b/tests/unit/gapic/logging_v2/test_metrics_service_v2.py @@ -0,0 +1,2189 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api import distribution_pb2 as distribution # type: ignore +from google.api import label_pb2 as label # type: ignore +from google.api import launch_stage_pb2 as launch_stage # type: ignore +from google.api import metric_pb2 as ga_metric # type: ignore +from google.api import metric_pb2 as metric # type: ignore +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.logging_v2.services.metrics_service_v2 import ( + MetricsServiceV2AsyncClient, +) +from google.cloud.logging_v2.services.metrics_service_v2 import MetricsServiceV2Client +from google.cloud.logging_v2.services.metrics_service_v2 import pagers +from google.cloud.logging_v2.services.metrics_service_v2 import transports +from google.cloud.logging_v2.types import logging_metrics +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetricsServiceV2Client._get_default_mtls_endpoint(None) is None + assert ( + MetricsServiceV2Client._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + MetricsServiceV2Client._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + MetricsServiceV2Client._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetricsServiceV2Client._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + MetricsServiceV2Client._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [MetricsServiceV2Client, MetricsServiceV2AsyncClient] +) +def test_metrics_service_v2_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "logging.googleapis.com:443" + + +def test_metrics_service_v2_client_get_transport_class(): + transport = MetricsServiceV2Client.get_transport_class() + assert transport == transports.MetricsServiceV2GrpcTransport + + transport = MetricsServiceV2Client.get_transport_class("grpc") + assert transport == transports.MetricsServiceV2GrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricsServiceV2Client, transports.MetricsServiceV2GrpcTransport, "grpc"), + ( + MetricsServiceV2AsyncClient, + transports.MetricsServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + MetricsServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricsServiceV2Client), +) +@mock.patch.object( + MetricsServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricsServiceV2AsyncClient), +) +def test_metrics_service_v2_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetricsServiceV2Client, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetricsServiceV2Client, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + MetricsServiceV2Client, + transports.MetricsServiceV2GrpcTransport, + "grpc", + "true", + ), + ( + MetricsServiceV2AsyncClient, + transports.MetricsServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + MetricsServiceV2Client, + transports.MetricsServiceV2GrpcTransport, + "grpc", + "false", + ), + ( + MetricsServiceV2AsyncClient, + transports.MetricsServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + MetricsServiceV2Client, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricsServiceV2Client), +) +@mock.patch.object( + MetricsServiceV2AsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(MetricsServiceV2AsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metrics_service_v2_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricsServiceV2Client, transports.MetricsServiceV2GrpcTransport, "grpc"), + ( + MetricsServiceV2AsyncClient, + transports.MetricsServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metrics_service_v2_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (MetricsServiceV2Client, transports.MetricsServiceV2GrpcTransport, "grpc"), + ( + MetricsServiceV2AsyncClient, + transports.MetricsServiceV2GrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_metrics_service_v2_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_metrics_service_v2_client_client_options_from_dict(): + with mock.patch( + "google.cloud.logging_v2.services.metrics_service_v2.transports.MetricsServiceV2GrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = MetricsServiceV2Client( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_list_log_metrics( + transport: str = "grpc", request_type=logging_metrics.ListLogMetricsRequest +): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.ListLogMetricsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_log_metrics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.ListLogMetricsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListLogMetricsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_log_metrics_from_dict(): + test_list_log_metrics(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_log_metrics_async( + transport: str = "grpc_asyncio", request_type=logging_metrics.ListLogMetricsRequest +): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.ListLogMetricsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_log_metrics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.ListLogMetricsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogMetricsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_log_metrics_async_from_dict(): + await test_list_log_metrics_async(request_type=dict) + + +def test_list_log_metrics_field_headers(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.ListLogMetricsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + call.return_value = logging_metrics.ListLogMetricsResponse() + + client.list_log_metrics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_log_metrics_field_headers_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.ListLogMetricsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.ListLogMetricsResponse() + ) + + await client.list_log_metrics(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_log_metrics_flattened(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.ListLogMetricsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_log_metrics(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_log_metrics_flattened_error(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_log_metrics( + logging_metrics.ListLogMetricsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_log_metrics_flattened_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.ListLogMetricsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.ListLogMetricsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_log_metrics(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_log_metrics_flattened_error_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_log_metrics( + logging_metrics.ListLogMetricsRequest(), parent="parent_value", + ) + + +def test_list_log_metrics_pager(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_metrics.ListLogMetricsResponse( + metrics=[ + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + ], + next_page_token="abc", + ), + logging_metrics.ListLogMetricsResponse(metrics=[], next_page_token="def",), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(),], next_page_token="ghi", + ), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(), logging_metrics.LogMetric(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_log_metrics(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, logging_metrics.LogMetric) for i in results) + + +def test_list_log_metrics_pages(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_log_metrics), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_metrics.ListLogMetricsResponse( + metrics=[ + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + ], + next_page_token="abc", + ), + logging_metrics.ListLogMetricsResponse(metrics=[], next_page_token="def",), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(),], next_page_token="ghi", + ), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(), logging_metrics.LogMetric(),], + ), + RuntimeError, + ) + pages = list(client.list_log_metrics(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_log_metrics_async_pager(): + client = MetricsServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_log_metrics), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_metrics.ListLogMetricsResponse( + metrics=[ + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + ], + next_page_token="abc", + ), + logging_metrics.ListLogMetricsResponse(metrics=[], next_page_token="def",), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(),], next_page_token="ghi", + ), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(), logging_metrics.LogMetric(),], + ), + RuntimeError, + ) + async_pager = await client.list_log_metrics(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, logging_metrics.LogMetric) for i in responses) + + +@pytest.mark.asyncio +async def test_list_log_metrics_async_pages(): + client = MetricsServiceV2AsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_log_metrics), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + logging_metrics.ListLogMetricsResponse( + metrics=[ + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + logging_metrics.LogMetric(), + ], + next_page_token="abc", + ), + logging_metrics.ListLogMetricsResponse(metrics=[], next_page_token="def",), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(),], next_page_token="ghi", + ), + logging_metrics.ListLogMetricsResponse( + metrics=[logging_metrics.LogMetric(), logging_metrics.LogMetric(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_log_metrics(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_log_metric( + transport: str = "grpc", request_type=logging_metrics.GetLogMetricRequest +): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + + response = client.get_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.GetLogMetricRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +def test_get_log_metric_from_dict(): + test_get_log_metric(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_log_metric_async( + transport: str = "grpc_asyncio", request_type=logging_metrics.GetLogMetricRequest +): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + ) + + response = await client.get_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.GetLogMetricRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +@pytest.mark.asyncio +async def test_get_log_metric_async_from_dict(): + await test_get_log_metric_async(request_type=dict) + + +def test_get_log_metric_field_headers(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.GetLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + call.return_value = logging_metrics.LogMetric() + + client.get_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_log_metric_field_headers_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.GetLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + + await client.get_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +def test_get_log_metric_flattened(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_log_metric(metric_name="metric_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + +def test_get_log_metric_flattened_error(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_log_metric( + logging_metrics.GetLogMetricRequest(), metric_name="metric_name_value", + ) + + +@pytest.mark.asyncio +async def test_get_log_metric_flattened_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_log_metric), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_log_metric(metric_name="metric_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + +@pytest.mark.asyncio +async def test_get_log_metric_flattened_error_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_log_metric( + logging_metrics.GetLogMetricRequest(), metric_name="metric_name_value", + ) + + +def test_create_log_metric( + transport: str = "grpc", request_type=logging_metrics.CreateLogMetricRequest +): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + + response = client.create_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.CreateLogMetricRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +def test_create_log_metric_from_dict(): + test_create_log_metric(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_log_metric_async( + transport: str = "grpc_asyncio", request_type=logging_metrics.CreateLogMetricRequest +): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + ) + + response = await client.create_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.CreateLogMetricRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +@pytest.mark.asyncio +async def test_create_log_metric_async_from_dict(): + await test_create_log_metric_async(request_type=dict) + + +def test_create_log_metric_field_headers(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.CreateLogMetricRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + call.return_value = logging_metrics.LogMetric() + + client.create_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_log_metric_field_headers_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.CreateLogMetricRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + + await client.create_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_log_metric_flattened(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_log_metric( + parent="parent_value", metric=logging_metrics.LogMetric(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metric == logging_metrics.LogMetric(name="name_value") + + +def test_create_log_metric_flattened_error(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_log_metric( + logging_metrics.CreateLogMetricRequest(), + parent="parent_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_log_metric_flattened_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_log_metric( + parent="parent_value", metric=logging_metrics.LogMetric(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].metric == logging_metrics.LogMetric(name="name_value") + + +@pytest.mark.asyncio +async def test_create_log_metric_flattened_error_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_log_metric( + logging_metrics.CreateLogMetricRequest(), + parent="parent_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + +def test_update_log_metric( + transport: str = "grpc", request_type=logging_metrics.UpdateLogMetricRequest +): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + + response = client.update_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.UpdateLogMetricRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +def test_update_log_metric_from_dict(): + test_update_log_metric(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_log_metric_async( + transport: str = "grpc_asyncio", request_type=logging_metrics.UpdateLogMetricRequest +): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric( + name="name_value", + description="description_value", + filter="filter_value", + value_extractor="value_extractor_value", + version=logging_metrics.LogMetric.ApiVersion.V1, + ) + ) + + response = await client.update_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.UpdateLogMetricRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, logging_metrics.LogMetric) + + assert response.name == "name_value" + + assert response.description == "description_value" + + assert response.filter == "filter_value" + + assert response.value_extractor == "value_extractor_value" + + assert response.version == logging_metrics.LogMetric.ApiVersion.V1 + + +@pytest.mark.asyncio +async def test_update_log_metric_async_from_dict(): + await test_update_log_metric_async(request_type=dict) + + +def test_update_log_metric_field_headers(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.UpdateLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + call.return_value = logging_metrics.LogMetric() + + client.update_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_log_metric_field_headers_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.UpdateLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + + await client.update_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +def test_update_log_metric_flattened(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_log_metric( + metric_name="metric_name_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + assert args[0].metric == logging_metrics.LogMetric(name="name_value") + + +def test_update_log_metric_flattened_error(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_log_metric( + logging_metrics.UpdateLogMetricRequest(), + metric_name="metric_name_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_update_log_metric_flattened_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = logging_metrics.LogMetric() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + logging_metrics.LogMetric() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_log_metric( + metric_name="metric_name_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + assert args[0].metric == logging_metrics.LogMetric(name="name_value") + + +@pytest.mark.asyncio +async def test_update_log_metric_flattened_error_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_log_metric( + logging_metrics.UpdateLogMetricRequest(), + metric_name="metric_name_value", + metric=logging_metrics.LogMetric(name="name_value"), + ) + + +def test_delete_log_metric( + transport: str = "grpc", request_type=logging_metrics.DeleteLogMetricRequest +): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.DeleteLogMetricRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_log_metric_from_dict(): + test_delete_log_metric(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_log_metric_async( + transport: str = "grpc_asyncio", request_type=logging_metrics.DeleteLogMetricRequest +): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == logging_metrics.DeleteLogMetricRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_log_metric_async_from_dict(): + await test_delete_log_metric_async(request_type=dict) + + +def test_delete_log_metric_field_headers(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.DeleteLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + call.return_value = None + + client.delete_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_log_metric_field_headers_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = logging_metrics.DeleteLogMetricRequest() + request.metric_name = "metric_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_log_metric(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "metric_name=metric_name/value",) in kw["metadata"] + + +def test_delete_log_metric_flattened(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_log_metric(metric_name="metric_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + +def test_delete_log_metric_flattened_error(): + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_log_metric( + logging_metrics.DeleteLogMetricRequest(), metric_name="metric_name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_log_metric_flattened_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_log_metric), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_log_metric(metric_name="metric_name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].metric_name == "metric_name_value" + + +@pytest.mark.asyncio +async def test_delete_log_metric_flattened_error_async(): + client = MetricsServiceV2AsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_log_metric( + logging_metrics.DeleteLogMetricRequest(), metric_name="metric_name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetricsServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetricsServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricsServiceV2Client( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetricsServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetricsServiceV2Client( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetricsServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = MetricsServiceV2Client(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetricsServiceV2GrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetricsServiceV2GrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricsServiceV2GrpcTransport, + transports.MetricsServiceV2GrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetricsServiceV2Client(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.MetricsServiceV2GrpcTransport,) + + +def test_metrics_service_v2_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.MetricsServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_metrics_service_v2_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.logging_v2.services.metrics_service_v2.transports.MetricsServiceV2Transport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.MetricsServiceV2Transport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "list_log_metrics", + "get_log_metric", + "create_log_metric", + "update_log_metric", + "delete_log_metric", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_metrics_service_v2_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.logging_v2.services.metrics_service_v2.transports.MetricsServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetricsServiceV2Transport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id="octopus", + ) + + +def test_metrics_service_v2_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.logging_v2.services.metrics_service_v2.transports.MetricsServiceV2Transport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.MetricsServiceV2Transport() + adc.assert_called_once() + + +def test_metrics_service_v2_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + MetricsServiceV2Client() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id=None, + ) + + +def test_metrics_service_v2_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.MetricsServiceV2GrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + quota_project_id="octopus", + ) + + +def test_metrics_service_v2_host_no_port(): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com" + ), + ) + assert client.transport._host == "logging.googleapis.com:443" + + +def test_metrics_service_v2_host_with_port(): + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="logging.googleapis.com:8000" + ), + ) + assert client.transport._host == "logging.googleapis.com:8000" + + +def test_metrics_service_v2_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.MetricsServiceV2GrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metrics_service_v2_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that channel is used if provided. + transport = transports.MetricsServiceV2GrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricsServiceV2GrpcTransport, + transports.MetricsServiceV2GrpcAsyncIOTransport, + ], +) +def test_metrics_service_v2_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetricsServiceV2GrpcTransport, + transports.MetricsServiceV2GrpcAsyncIOTransport, + ], +) +def test_metrics_service_v2_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_log_metric_path(): + project = "squid" + metric = "clam" + + expected = "projects/{project}/metrics/{metric}".format( + project=project, metric=metric, + ) + actual = MetricsServiceV2Client.log_metric_path(project, metric) + assert expected == actual + + +def test_parse_log_metric_path(): + expected = { + "project": "whelk", + "metric": "octopus", + } + path = MetricsServiceV2Client.log_metric_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_log_metric_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = MetricsServiceV2Client.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = MetricsServiceV2Client.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = MetricsServiceV2Client.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = MetricsServiceV2Client.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = MetricsServiceV2Client.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = MetricsServiceV2Client.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = MetricsServiceV2Client.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = MetricsServiceV2Client.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = MetricsServiceV2Client.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = MetricsServiceV2Client.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetricsServiceV2Client.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.MetricsServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + client = MetricsServiceV2Client( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.MetricsServiceV2Transport, "_prep_wrapped_messages" + ) as prep: + transport_class = MetricsServiceV2Client.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) From bff65b0d2f11fd6572eab80730ad261604624a7d Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Tue, 10 Nov 2020 16:46:47 +0000 Subject: [PATCH 51/58] fix: fix _gapic, unit tests --- google/cloud/logging/__init__.py | 147 +---- google/cloud/logging_v2/__init__.py | 60 +- google/cloud/logging_v2/_gapic.py | 567 ++++++++---------- google/cloud/logging_v2/client.py | 1 + setup.py | 3 +- synth.py | 9 + .../logging_v2/test_logging_service_v2.py | 14 +- tests/unit/test__gapic.py | 453 +++++++------- 8 files changed, 583 insertions(+), 671 deletions(-) diff --git a/google/cloud/logging/__init__.py b/google/cloud/logging/__init__.py index 0953416af..4481cea11 100644 --- a/google/cloud/logging/__init__.py +++ b/google/cloud/logging/__init__.py @@ -15,122 +15,39 @@ # limitations under the License. # -from google.cloud.logging_v2.services.config_service_v2.async_client import ( - ConfigServiceV2AsyncClient, -) -from google.cloud.logging_v2.services.config_service_v2.client import ( - ConfigServiceV2Client, -) -from google.cloud.logging_v2.services.logging_service_v2.async_client import ( - LoggingServiceV2AsyncClient, -) -from google.cloud.logging_v2.services.logging_service_v2.client import ( - LoggingServiceV2Client, -) -from google.cloud.logging_v2.services.metrics_service_v2.async_client import ( - MetricsServiceV2AsyncClient, -) -from google.cloud.logging_v2.services.metrics_service_v2.client import ( - MetricsServiceV2Client, -) -from google.cloud.logging_v2.types.log_entry import LogEntry -from google.cloud.logging_v2.types.log_entry import LogEntryOperation -from google.cloud.logging_v2.types.log_entry import LogEntrySourceLocation -from google.cloud.logging_v2.types.logging import DeleteLogRequest -from google.cloud.logging_v2.types.logging import ListLogEntriesRequest -from google.cloud.logging_v2.types.logging import ListLogEntriesResponse -from google.cloud.logging_v2.types.logging import ListLogsRequest -from google.cloud.logging_v2.types.logging import ListLogsResponse -from google.cloud.logging_v2.types.logging import ( - ListMonitoredResourceDescriptorsRequest, -) -from google.cloud.logging_v2.types.logging import ( - ListMonitoredResourceDescriptorsResponse, -) -from google.cloud.logging_v2.types.logging import WriteLogEntriesPartialErrors -from google.cloud.logging_v2.types.logging import WriteLogEntriesRequest -from google.cloud.logging_v2.types.logging import WriteLogEntriesResponse -from google.cloud.logging_v2.types.logging_config import BigQueryOptions -from google.cloud.logging_v2.types.logging_config import CmekSettings -from google.cloud.logging_v2.types.logging_config import CreateExclusionRequest -from google.cloud.logging_v2.types.logging_config import CreateSinkRequest -from google.cloud.logging_v2.types.logging_config import DeleteExclusionRequest -from google.cloud.logging_v2.types.logging_config import DeleteSinkRequest -from google.cloud.logging_v2.types.logging_config import GetBucketRequest -from google.cloud.logging_v2.types.logging_config import GetCmekSettingsRequest -from google.cloud.logging_v2.types.logging_config import GetExclusionRequest -from google.cloud.logging_v2.types.logging_config import GetSinkRequest -from google.cloud.logging_v2.types.logging_config import LifecycleState -from google.cloud.logging_v2.types.logging_config import ListBucketsRequest -from google.cloud.logging_v2.types.logging_config import ListBucketsResponse -from google.cloud.logging_v2.types.logging_config import ListExclusionsRequest -from google.cloud.logging_v2.types.logging_config import ListExclusionsResponse -from google.cloud.logging_v2.types.logging_config import ListSinksRequest -from google.cloud.logging_v2.types.logging_config import ListSinksResponse -from google.cloud.logging_v2.types.logging_config import LogBucket -from google.cloud.logging_v2.types.logging_config import LogExclusion -from google.cloud.logging_v2.types.logging_config import LogSink -from google.cloud.logging_v2.types.logging_config import UpdateBucketRequest -from google.cloud.logging_v2.types.logging_config import UpdateCmekSettingsRequest -from google.cloud.logging_v2.types.logging_config import UpdateExclusionRequest -from google.cloud.logging_v2.types.logging_config import UpdateSinkRequest -from google.cloud.logging_v2.types.logging_metrics import CreateLogMetricRequest -from google.cloud.logging_v2.types.logging_metrics import DeleteLogMetricRequest -from google.cloud.logging_v2.types.logging_metrics import GetLogMetricRequest -from google.cloud.logging_v2.types.logging_metrics import ListLogMetricsRequest -from google.cloud.logging_v2.types.logging_metrics import ListLogMetricsResponse -from google.cloud.logging_v2.types.logging_metrics import LogMetric -from google.cloud.logging_v2.types.logging_metrics import UpdateLogMetricRequest +from google.cloud.logging_v2 import __version__ +from google.cloud.logging_v2 import ASCENDING +from google.cloud.logging_v2 import DESCENDING + +from google.cloud.logging_v2.client import Client +from google.cloud.logging_v2.entries import logger_name_from_path +from google.cloud.logging_v2.entries import LogEntry +from google.cloud.logging_v2.entries import TextEntry +from google.cloud.logging_v2.entries import StructEntry +from google.cloud.logging_v2.entries import ProtobufEntry +from google.cloud.logging_v2 import handlers +from google.cloud.logging_v2.logger import Logger +from google.cloud.logging_v2.logger import Batch +from google.cloud.logging_v2.metric import Metric +from google.cloud.logging_v2.resource import Resource +from google.cloud.logging_v2.sink import Sink +from google.cloud.logging_v2 import types __all__ = ( - "BigQueryOptions", - "CmekSettings", - "ConfigServiceV2AsyncClient", - "ConfigServiceV2Client", - "CreateExclusionRequest", - "CreateLogMetricRequest", - "CreateSinkRequest", - "DeleteExclusionRequest", - "DeleteLogMetricRequest", - "DeleteLogRequest", - "DeleteSinkRequest", - "GetBucketRequest", - "GetCmekSettingsRequest", - "GetExclusionRequest", - "GetLogMetricRequest", - "GetSinkRequest", - "LifecycleState", - "ListBucketsRequest", - "ListBucketsResponse", - "ListExclusionsRequest", - "ListExclusionsResponse", - "ListLogEntriesRequest", - "ListLogEntriesResponse", - "ListLogMetricsRequest", - "ListLogMetricsResponse", - "ListLogsRequest", - "ListLogsResponse", - "ListMonitoredResourceDescriptorsRequest", - "ListMonitoredResourceDescriptorsResponse", - "ListSinksRequest", - "ListSinksResponse", - "LogBucket", + "__version__", + "ASCENDING", + "Batch", + "Client", + "DESCENDING", + "handlers", + "logger_name_from_path", + "Logger", "LogEntry", - "LogEntryOperation", - "LogEntrySourceLocation", - "LogExclusion", - "LogMetric", - "LogSink", - "LoggingServiceV2AsyncClient", - "LoggingServiceV2Client", - "MetricsServiceV2AsyncClient", - "MetricsServiceV2Client", - "UpdateBucketRequest", - "UpdateCmekSettingsRequest", - "UpdateExclusionRequest", - "UpdateLogMetricRequest", - "UpdateSinkRequest", - "WriteLogEntriesPartialErrors", - "WriteLogEntriesRequest", - "WriteLogEntriesResponse", + "Metric", + "ProtobufEntry", + "Resource", + "Sink", + "StructEntry", + "TextEntry", + "types", ) diff --git a/google/cloud/logging_v2/__init__.py b/google/cloud/logging_v2/__init__.py index 964c99572..98954d550 100644 --- a/google/cloud/logging_v2/__init__.py +++ b/google/cloud/logging_v2/__init__.py @@ -14,32 +14,50 @@ from __future__ import absolute_import +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution("google-cloud-logging").version +except pkg_resources.DistributionNotFound: + __version__ = None + + +from google.cloud.logging_v2.client import Client +from google.cloud.logging_v2.entries import logger_name_from_path +from google.cloud.logging_v2.entries import LogEntry +from google.cloud.logging_v2.entries import TextEntry +from google.cloud.logging_v2.entries import StructEntry +from google.cloud.logging_v2.entries import ProtobufEntry +from google.cloud.logging_v2 import handlers +from google.cloud.logging_v2.logger import Logger +from google.cloud.logging_v2.logger import Batch +from google.cloud.logging_v2.metric import Metric +from google.cloud.logging_v2.resource import Resource +from google.cloud.logging_v2.sink import Sink from google.cloud.logging_v2 import types -from google.cloud.logging_v2.gapic import config_service_v2_client -from google.cloud.logging_v2.gapic import enums -from google.cloud.logging_v2.gapic import logging_service_v2_client -from google.cloud.logging_v2.gapic import metrics_service_v2_client -class LoggingServiceV2Client(logging_service_v2_client.LoggingServiceV2Client): - __doc__ = logging_service_v2_client.LoggingServiceV2Client.__doc__ - enums = enums - - -class ConfigServiceV2Client(config_service_v2_client.ConfigServiceV2Client): - __doc__ = config_service_v2_client.ConfigServiceV2Client.__doc__ - enums = enums - - -class MetricsServiceV2Client(metrics_service_v2_client.MetricsServiceV2Client): - __doc__ = metrics_service_v2_client.MetricsServiceV2Client.__doc__ - enums = enums +ASCENDING = "timestamp asc" +"""Query string to order by ascending timestamps.""" +DESCENDING = "timestamp desc" +"""Query string to order by decending timestamps.""" __all__ = ( - "enums", + "__version__", + "ASCENDING", + "Batch", + "Client", + "DESCENDING", + "handlers", + "logger_name_from_path", + "Logger", + "LogEntry", + "Metric", + "ProtobufEntry", + "Resource", + "Sink", + "StructEntry", + "TextEntry", "types", - "LoggingServiceV2Client", - "ConfigServiceV2Client", - "MetricsServiceV2Client", ) diff --git a/google/cloud/logging_v2/_gapic.py b/google/cloud/logging_v2/_gapic.py index 026db660d..01facccf9 100644 --- a/google/cloud/logging_v2/_gapic.py +++ b/google/cloud/logging_v2/_gapic.py @@ -16,22 +16,31 @@ client.""" import functools +import json +import types + +from typing import Iterable + +from google.api import monitored_resource_pb2 +from google.cloud.logging_v2.services.config_service_v2 import ConfigServiceV2Client +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client +from google.cloud.logging_v2.services.metrics_service_v2 import MetricsServiceV2Client +from google.cloud.logging_v2.types import CreateSinkRequest +from google.cloud.logging_v2.types import UpdateSinkRequest +from google.cloud.logging_v2.types import ListSinksRequest +from google.cloud.logging_v2.types import ListLogMetricsRequest +from google.cloud.logging_v2.types import ListLogEntriesRequest +from google.cloud.logging_v2.types import WriteLogEntriesRequest +from google.cloud.logging_v2.types import LogSink +from google.cloud.logging_v2.types import LogMetric +from google.cloud.logging_v2.types import LogEntry as LogEntryPB -from google.cloud.logging_v2.gapic.config_service_v2_client import ConfigServiceV2Client -from google.cloud.logging_v2.gapic.logging_service_v2_client import ( - LoggingServiceV2Client, -) -from google.cloud.logging_v2.gapic.metrics_service_v2_client import ( - MetricsServiceV2Client, -) -from google.cloud.logging_v2.proto.logging_config_pb2 import LogSink -from google.cloud.logging_v2.proto.logging_metrics_pb2 import LogMetric -from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry from google.protobuf.json_format import MessageToDict from google.protobuf.json_format import ParseDict from google.cloud.logging_v2._helpers import entry_from_resource from google.cloud.logging_v2.sink import Sink +from google.cloud.logging_v2.entries import LogEntry from google.cloud.logging_v2.metric import Metric @@ -51,7 +60,7 @@ def __init__(self, gapic_api, client): self._client = client def list_entries( - self, projects, filter_="", order_by="", page_size=0, page_token=None + self, projects, *, filter_="", order_by="", page_size=0, page_token=None ): """Return a page of log entry resources. @@ -81,62 +90,65 @@ def list_entries( :returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry` accessible to the current API. """ - page_iter = self._gapic_api.list_log_entries( - [], - project_ids=projects, - filter_=filter_, + # full resource names are expected by the API + projects = [f"projects/{p}" for p in projects] + request = ListLogEntriesRequest( + resource_names=projects, + filter=filter_, order_by=order_by, page_size=page_size, + page_token=page_token ) - page_iter.client = self._client - page_iter.next_page_token = page_token + + response = self._gapic_api.list_log_entries(request=request) + page_iter = iter(response) # We attach a mutable loggers dictionary so that as Logger # objects are created by entry_from_resource, they can be # re-used by other log entries from the same logger. loggers = {} - page_iter.item_to_value = functools.partial(_item_to_entry, loggers=loggers) - return page_iter - - def write_entries(self, entries, logger_name=None, resource=None, labels=None): - """API call: log an entry resource via a POST request - - :type entries: sequence of mapping - :param entries: the log entry resources to log. - :type logger_name: str - :param logger_name: name of default logger to which to log the entries; - individual entries may override. - - :type resource: mapping - :param resource: default resource to associate with entries; - individual entries may override. - - :type labels: mapping - :param labels: default labels to associate with entries; - individual entries may override. + def log_entries_pager(page_iter): + for page in page_iter: + log_entry_dict = _parse_log_entry(LogEntryPB.pb(page)) + yield entry_from_resource(log_entry_dict, self._client, loggers) + + return log_entries_pager(page_iter) + + def write_entries(self, entries, *, logger_name=None, resource=None, labels=None): + """Log an entry resource via a POST request + + Args: + entries (Sequence[Mapping[str, ...]]): sequence of mappings representing + the log entry resources to log. + logger_name (Optional[str]): name of default logger to which to log the entries; + individual entries may override. + resource(Optional[Mapping[str, ...]]): default resource to associate with entries; + individual entries may override. + labels (Optional[Mapping[str, ...]]): default labels to associate with entries; + individual entries may override. """ partial_success = False - entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries] - self._gapic_api.write_log_entries( - entry_pbs, + log_entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries] + + request = WriteLogEntriesRequest( log_name=logger_name, resource=resource, labels=labels, + entries=log_entry_pbs, partial_success=partial_success, ) + self._gapic_api.write_log_entries(request=request) def logger_delete(self, project, logger_name): - """API call: delete all entries in a logger via a DELETE request + """Delete all entries in a logger. - :type project: str - :param project: ID of project containing the log entries to delete - - :type logger_name: str - :param logger_name: name of logger containing the log entries to delete + Args: + project (str): ID of project containing the log entries to delete + logger_name (str): name of logger containing the log entries to delete """ - path = "projects/%s/logs/%s" % (project, logger_name) - self._gapic_api.delete_log(path) + path = f"projects/{project}/logs/{logger_name}" + self._gapic_api.delete_log(log_name=path) class _SinksAPI(object): @@ -157,135 +169,130 @@ def __init__(self, gapic_api, client): def list_sinks(self, project, page_size=0, page_token=None): """List sinks for the project associated with this client. - :type project: str - :param project: ID of the project whose sinks are to be listed. - - :type page_size: int - :param page_size: maximum number of sinks to return, If not passed, - defaults to a value set by the API. + Args: + project (str): ID of the project whose sinks are to be listed. + page_size (int): Maximum number of sinks to return, If not passed, + defaults to a value set by the API. + page_token (str): Opaque marker for the next "page" of sinks. If not + passed, the API will return the first page of + sinks. + + Returns: + Iterable[logging_v2.Sink]: Iterable of sinks. + """ + path = f"projects/{project}" + request = ListSinksRequest( + parent=path, page_size=page_size, page_token=page_token + ) + response = self._gapic_api.list_sinks(request) + page_iter = iter(response) - :type page_token: str - :param page_token: opaque marker for the next "page" of sinks. If not - passed, the API will return the first page of - sinks. + def sinks_pager(page_iter): + for page in page_iter: + # Convert the GAPIC sink type into the handwritten `Sink` type + yield Sink.from_api_repr(LogSink.to_dict(page), client=self._client) - :rtype: tuple, (list, str) - :returns: list of mappings, plus a "next page token" string: - if not None, indicates that more sinks can be retrieved - with another call (pass that value as ``page_token``). - """ - path = "projects/%s" % (project,) - page_iter = self._gapic_api.list_sinks(path, page_size=page_size) - page_iter.client = self._client - page_iter.next_page_token = page_token - page_iter.item_to_value = _item_to_sink - return page_iter + return sinks_pager(page_iter) def sink_create( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, project, sink_name, filter_, destination, *, unique_writer_identity=False ): - """API call: create a sink resource. + """Create a sink resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create - :type project: str - :param project: ID of the project in which to create the sink. - - :type sink_name: str - :param sink_name: the name of the sink - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the sink. - - :type destination: str - :param destination: destination URI for the entries exported by - the sink. - - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. - - :rtype: dict - :returns: The sink resource returned from the API (converted from a - protobuf to a dictionary). + Args: + project (str): ID of the project in which to create the sink. + sink_name (str): The name of the sink. + filter_ (str): The advanced logs filter expression defining the + entries exported by the sink. + destination (str): Destination URI for the entries exported by + the sink. + unique_writer_identity (Optional[bool]): determines the kind of + IAM identity returned as writer_identity in the new sink. + + Returns: + dict: The sink resource returned from the API (converted from a + protobuf to a dictionary). """ - parent = "projects/%s" % (project,) + parent = f"projects/{project}" sink_pb = LogSink(name=sink_name, filter=filter_, destination=destination) - created_pb = self._gapic_api.create_sink( - parent, sink_pb, unique_writer_identity=unique_writer_identity + request = CreateSinkRequest( + parent=parent, sink=sink_pb, unique_writer_identity=unique_writer_identity + ) + created_pb = self._gapic_api.create_sink(request=request) + return MessageToDict( + LogSink.pb(created_pb), + preserving_proto_field_name=False, + including_default_value_fields=False, ) - return MessageToDict(created_pb) def sink_get(self, project, sink_name): - """API call: retrieve a sink resource. - - :type project: str - :param project: ID of the project containing the sink. + """Retrieve a sink resource. - :type sink_name: str - :param sink_name: the name of the sink + Args: + project (str): ID of the project containing the sink. + sink_name (str): the name of the sink - :rtype: dict - :returns: The sink object returned from the API (converted from a + Returns: + dict: The sink object returned from the API (converted from a protobuf to a dictionary). """ - path = "projects/%s/sinks/%s" % (project, sink_name) - sink_pb = self._gapic_api.get_sink(path) + path = f"projects/{project}/sinks/{sink_name}" + sink_pb = self._gapic_api.get_sink(sink_name=path) # NOTE: LogSink message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. - return MessageToDict(sink_pb) + return MessageToDict( + LogSink.pb(sink_pb), + preserving_proto_field_name=False, + including_default_value_fields=False, + ) def sink_update( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, project, sink_name, filter_, destination, *, unique_writer_identity=False ): - """API call: update a sink resource. - - :type project: str - :param project: ID of the project containing the sink. - - :type sink_name: str - :param sink_name: the name of the sink - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the sink. - - :type destination: str - :param destination: destination URI for the entries exported by - the sink. - - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. - - :rtype: dict - :returns: The sink resource returned from the API (converted from a + """Update a sink resource. + + Args: + project (str): ID of the project containing the sink. + sink_name (str): The name of the sink + filter_ (str): The advanced logs filter expression defining the + entries exported by the sink. + destination (str): + destionation (str): destination URI for the entries exported by + the sink. + unique_writer_identity (Optional[bool]): determines the kind of + IAM identity returned as writer_identity in the new sink. + + returns: + dict: The sink resource returned from the API (converted from a protobuf to a dictionary). """ - path = "projects/%s/sinks/%s" % (project, sink_name) + path = f"projects/{project}/sinks/{sink_name}" sink_pb = LogSink(name=path, filter=filter_, destination=destination) - sink_pb = self._gapic_api.update_sink( - path, sink_pb, unique_writer_identity=unique_writer_identity + + request = UpdateSinkRequest( + sink_name=path, sink=sink_pb, unique_writer_identity=unique_writer_identity ) + sink_pb = self._gapic_api.update_sink(request=request) # NOTE: LogSink message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. - return MessageToDict(sink_pb) + return MessageToDict( + LogSink.pb(sink_pb), + preserving_proto_field_name=False, + including_default_value_fields=False, + ) def sink_delete(self, project, sink_name): - """API call: delete a sink resource. - - :type project: str - :param project: ID of the project containing the sink. + """Delete a sink resource. - :type sink_name: str - :param sink_name: the name of the sink + Args: + project (str): ID of the project containing the sink. + sink_name (str): The name of the sink """ - path = "projects/%s/sinks/%s" % (project, sink_name) - self._gapic_api.delete_sink(path) + path = f"projects/{project}/sinks/{sink_name}" + self._gapic_api.delete_sink(sink_name=path) class _MetricsAPI(object): @@ -307,110 +314,105 @@ def __init__(self, gapic_api, client): def list_metrics(self, project, page_size=0, page_token=None): """List metrics for the project associated with this client. - :type project: str - :param project: ID of the project whose metrics are to be listed. - - :type page_size: int - :param page_size: maximum number of metrics to return, If not passed, - defaults to a value set by the API. + Args: + project (str): ID of the project whose metrics are to be listed. + page_size (int): Maximum number of metrics to return, If not passed, + defaults to a value set by the API. + page_token (str): Opaque marker for the next "page" of metrics. If not + passed, the API will return the first page of + sinks. + + Returns: + Iterable[logging_v2.Metric]: Iterable of metrics. + """ + path = f"projects/{project}" + request = ListLogMetricsRequest( + parent=path, page_size=page_size, page_token=page_token, + ) + response = self._gapic_api.list_log_metrics(request=request) + page_iter = iter(response) - :type page_token: str - :param page_token: opaque marker for the next "page" of metrics. If not - passed, the API will return the first page of - metrics. + def metrics_pager(page_iter): + for page in page_iter: + # Convert GAPIC metrics type into handwritten `Metric` type + yield Metric.from_api_repr(LogMetric.to_dict(page), client=self._client) - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of - :class:`~google.cloud.logging.metric.Metric` - accessible to the current API. - """ - path = "projects/%s" % (project,) - page_iter = self._gapic_api.list_log_metrics(path, page_size=page_size) - page_iter.client = self._client - page_iter.next_page_token = page_token - page_iter.item_to_value = _item_to_metric - return page_iter + return metrics_pager(page_iter) def metric_create(self, project, metric_name, filter_, description): - """API call: create a metric resource. + """Create a metric resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create - :type project: str - :param project: ID of the project in which to create the metric. - - :type metric_name: str - :param metric_name: the name of the metric - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the metric. - - :type description: str - :param description: description of the metric. + Args: + project (str): ID of the project in which to create the metric. + metric_name (str): The name of the metric + filter_ (str): The advanced logs filter expression defining the + entries exported by the metric. + description (str): description of the metric. """ - parent = "projects/%s" % (project,) + parent = f"projects/{project}" metric_pb = LogMetric(name=metric_name, filter=filter_, description=description) - self._gapic_api.create_log_metric(parent, metric_pb) + self._gapic_api.create_log_metric(parent=parent, metric=metric_pb) def metric_get(self, project, metric_name): """API call: retrieve a metric resource. - :type project: str - :param project: ID of the project containing the metric. + Args: + project (str): ID of the project containing the metric. + metric_name (str): The name of the metric - :type metric_name: str - :param metric_name: the name of the metric - - :rtype: dict - :returns: The metric object returned from the API (converted from a + Returns: + dict: The metric object returned from the API (converted from a protobuf to a dictionary). """ - path = "projects/%s/metrics/%s" % (project, metric_name) - metric_pb = self._gapic_api.get_log_metric(path) + path = f"projects/{project}/metrics/{metric_name}" + metric_pb = self._gapic_api.get_log_metric(metric_name=path) # NOTE: LogMetric message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. - return MessageToDict(metric_pb) + return MessageToDict( + LogMetric.pb(metric_pb), + preserving_proto_field_name=False, + including_default_value_fields=False, + ) def metric_update(self, project, metric_name, filter_, description): - """API call: update a metric resource. - - :type project: str - :param project: ID of the project containing the metric. - - :type metric_name: str - :param metric_name: the name of the metric - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the metric. + """Update a metric resource. - :type description: str - :param description: description of the metric. + Args: + project (str): ID of the project containing the metric. + metric_name (str): the name of the metric + filter_ (str): the advanced logs filter expression defining the + entries exported by the metric. + description (str): description of the metric. - :rtype: dict - :returns: The metric object returned from the API (converted from a + Returns: + The metric object returned from the API (converted from a protobuf to a dictionary). """ - path = "projects/%s/metrics/%s" % (project, metric_name) + path = f"projects/{project}/metrics/{metric_name}" metric_pb = LogMetric(name=path, filter=filter_, description=description) - metric_pb = self._gapic_api.update_log_metric(path, metric_pb) + metric_pb = self._gapic_api.update_log_metric( + metric_name=path, metric=metric_pb + ) # NOTE: LogMetric message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. - return MessageToDict(metric_pb) + return MessageToDict( + LogMetric.pb(metric_pb), + preserving_proto_field_name=False, + including_default_value_fields=False, + ) def metric_delete(self, project, metric_name): - """API call: delete a metric resource. + """Delete a metric resource. - :type project: str - :param project: ID of the project containing the metric. - - :type metric_name: str - :param metric_name: the name of the metric + Args: + project (str): ID of the project containing the metric. + metric_name (str): The name of the metric """ - path = "projects/%s/metrics/%s" % (project, metric_name) - self._gapic_api.delete_log_metric(path) + path = f"projects/{project}/metrics/{metric_name}" + self._gapic_api.delete_log_metric(metric_name=path) def _parse_log_entry(entry_pb): @@ -421,21 +423,27 @@ def _parse_log_entry(entry_pb): ``google.protobuf`` registry. To help with parsing unregistered types, this function will remove ``proto_payload`` before parsing. - :type entry_pb: :class:`.log_entry_pb2.LogEntry` - :param entry_pb: Log entry protobuf. + Args: + entry_pb (LogEntry): Log entry protobuf. - :rtype: dict - :returns: The parsed log entry. The ``protoPayload`` key may contain + Returns: + dict: The parsed log entry. The ``protoPayload`` key may contain the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if it could not be parsed. """ try: - return MessageToDict(entry_pb) + return MessageToDict(entry_pb, + preserving_proto_field_name=False, + including_default_value_fields=False, + ) except TypeError: if entry_pb.HasField("proto_payload"): proto_payload = entry_pb.proto_payload entry_pb.ClearField("proto_payload") - entry_mapping = MessageToDict(entry_pb) + entry_mapping = MessageToDict(entry_pb, + preserving_proto_field_name=False, + including_default_value_fields=False, + ) entry_mapping["protoPayload"] = proto_payload return entry_mapping else: @@ -448,7 +456,7 @@ def _log_entry_mapping_to_pb(mapping): Performs "impedance matching" between the protobuf attrs and the keys expected in the JSON API. """ - entry_pb = LogEntry() + entry_pb = LogEntryPB.pb(LogEntryPB()) # NOTE: We assume ``mapping`` was created in ``Batch.commit`` # or ``Logger._make_entry_resource``. In either case, if # the ``protoPayload`` key is present, we assume that the @@ -457,89 +465,22 @@ def _log_entry_mapping_to_pb(mapping): # of the corresponding ``proto_payload`` in the log entry # (it is an ``Any`` field). ParseDict(mapping, entry_pb) - return entry_pb - - -def _item_to_entry(iterator, entry_pb, loggers): - """Convert a log entry protobuf to the native object. - - .. note:: - - This method does not have the correct signature to be used as - the ``item_to_value`` argument to - :class:`~google.api_core.page_iterator.Iterator`. It is intended to be - patched with a mutable ``loggers`` argument that can be updated - on subsequent calls. For an example, see how the method is - used above in :meth:`_LoggingAPI.list_entries`. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type entry_pb: :class:`.log_entry_pb2.LogEntry` - :param entry_pb: Log entry protobuf returned from the API. - - :type loggers: dict - :param loggers: - A mapping of logger fullnames -> loggers. If the logger - that owns the entry is not in ``loggers``, the entry - will have a newly-created logger. - - :rtype: :class:`~google.cloud.logging.entries._BaseEntry` - :returns: The next log entry in the page. - """ - resource = _parse_log_entry(entry_pb) - return entry_from_resource(resource, iterator.client, loggers) - - -def _item_to_sink(iterator, log_sink_pb): - """Convert a sink protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type log_sink_pb: - :class:`.logging_config_pb2.LogSink` - :param log_sink_pb: Sink protobuf returned from the API. - - :rtype: :class:`~google.cloud.logging.sink.Sink` - :returns: The next sink in the page. - """ - # NOTE: LogSink message type does not have an ``Any`` field - # so `MessageToDict`` can safely be used. - resource = MessageToDict(log_sink_pb) - return Sink.from_api_repr(resource, iterator.client) - - -def _item_to_metric(iterator, log_metric_pb): - """Convert a metric protobuf to the native object. - - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type log_metric_pb: - :class:`.logging_metrics_pb2.LogMetric` - :param log_metric_pb: Metric protobuf returned from the API. - - :rtype: :class:`~google.cloud.logging.metric.Metric` - :returns: The next metric in the page. - """ - # NOTE: LogMetric message type does not have an ``Any`` field - # so `MessageToDict`` can safely be used. - resource = MessageToDict(log_metric_pb) - return Metric.from_api_repr(resource, iterator.client) + return LogEntryPB(entry_pb) def make_logging_api(client): """Create an instance of the Logging API adapter. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that holds configuration details. - - :rtype: :class:`_LoggingAPI` - :returns: A metrics API instance with the proper credentials. + + Args: + client (google.cloud.logging_v2.client.Client): The client + that holds configuration details. + + Returns: + _LoggingAPI: A metrics API instance with the proper credentials. """ generated = LoggingServiceV2Client( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, client_info=client._client_info, + client_options=client._client_options ) return _LoggingAPI(generated, client) @@ -547,14 +488,17 @@ def make_logging_api(client): def make_metrics_api(client): """Create an instance of the Metrics API adapter. - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that holds configuration details. + Args: + client (google.cloud.logging_v2.client.Client): The client + that holds configuration details. - :rtype: :class:`_MetricsAPI` - :returns: A metrics API instance with the proper credentials. + Returns: + _MetricsAPI: A metrics API instance with the proper credentials. """ generated = MetricsServiceV2Client( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options ) return _MetricsAPI(generated, client) @@ -562,13 +506,16 @@ def make_metrics_api(client): def make_sinks_api(client): """Create an instance of the Sinks API adapter. - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that holds configuration details. - - :rtype: :class:`_SinksAPI` - :returns: A metrics API instance with the proper credentials. + Args: + client (google.cloud.logging_v2.client.Client): The client + that holds configuration details. + + Returns: + _SinksAPI: A metrics API instance with the proper credentials. """ generated = ConfigServiceV2Client( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options ) return _SinksAPI(generated, client) diff --git a/google/cloud/logging_v2/client.py b/google/cloud/logging_v2/client.py index 4d2442a7b..6883acee7 100644 --- a/google/cloud/logging_v2/client.py +++ b/google/cloud/logging_v2/client.py @@ -144,6 +144,7 @@ def __init__( self._connection = Connection(self, **kw_args) self._client_info = client_info + self._client_options = client_options if _use_grpc is None: self._use_grpc = _USE_GRPC else: diff --git a/setup.py b/setup.py index ebd73c131..69b2b439b 100644 --- a/setup.py +++ b/setup.py @@ -29,8 +29,9 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.15.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", + "proto-plus >= 1.11.0", ] extras = {} diff --git a/synth.py b/synth.py index 1c87eac1f..ed90592e7 100644 --- a/synth.py +++ b/synth.py @@ -35,6 +35,8 @@ excludes=[ "setup.py", "README.rst", + "google/cloud/logging/__init__.py", # generated types are hidden from users + "google/cloud/logging_v2/__init__.py", "docs/index.rst", "docs/multiprocessing.rst", "docs/logging_v2", # Don't include gapic library docs. Users should use the hand-written layer instead @@ -42,6 +44,13 @@ ], ) +# Fix generated unit tests +s.replace( + "tests/unit/gapic/logging_v2/test_logging_service_v2.py", + "MonitoredResource\(\s*type_", + "MonitoredResource(type" +) + # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- diff --git a/tests/unit/gapic/logging_v2/test_logging_service_v2.py b/tests/unit/gapic/logging_v2/test_logging_service_v2.py index 9aed4e4ce..5e318e02c 100644 --- a/tests/unit/gapic/logging_v2/test_logging_service_v2.py +++ b/tests/unit/gapic/logging_v2/test_logging_service_v2.py @@ -729,7 +729,7 @@ def test_write_log_entries_flattened(): # using the keyword arguments to the method. client.write_log_entries( log_name="log_name_value", - resource=monitored_resource.MonitoredResource(type_="type__value"), + resource=monitored_resource.MonitoredResource(type="type__value"), labels={"key_value": "value_value"}, entries=[log_entry.LogEntry(log_name="log_name_value")], ) @@ -741,8 +741,7 @@ def test_write_log_entries_flattened(): assert args[0].log_name == "log_name_value" - assert args[0].resource == monitored_resource.MonitoredResource( - type_="type__value" + assert args[0].resource == monitored_resource.MonitoredResource(type="type__value" ) assert args[0].labels == {"key_value": "value_value"} @@ -759,7 +758,7 @@ def test_write_log_entries_flattened_error(): client.write_log_entries( logging.WriteLogEntriesRequest(), log_name="log_name_value", - resource=monitored_resource.MonitoredResource(type_="type__value"), + resource=monitored_resource.MonitoredResource(type="type__value"), labels={"key_value": "value_value"}, entries=[log_entry.LogEntry(log_name="log_name_value")], ) @@ -785,7 +784,7 @@ async def test_write_log_entries_flattened_async(): # using the keyword arguments to the method. response = await client.write_log_entries( log_name="log_name_value", - resource=monitored_resource.MonitoredResource(type_="type__value"), + resource=monitored_resource.MonitoredResource(type="type__value"), labels={"key_value": "value_value"}, entries=[log_entry.LogEntry(log_name="log_name_value")], ) @@ -797,8 +796,7 @@ async def test_write_log_entries_flattened_async(): assert args[0].log_name == "log_name_value" - assert args[0].resource == monitored_resource.MonitoredResource( - type_="type__value" + assert args[0].resource == monitored_resource.MonitoredResource(type="type__value" ) assert args[0].labels == {"key_value": "value_value"} @@ -818,7 +816,7 @@ async def test_write_log_entries_flattened_error_async(): await client.write_log_entries( logging.WriteLogEntriesRequest(), log_name="log_name_value", - resource=monitored_resource.MonitoredResource(type_="type__value"), + resource=monitored_resource.MonitoredResource(type="type__value"), labels={"key_value": "value_value"}, entries=[log_entry.LogEntry(log_name="log_name_value")], ) diff --git a/tests/unit/test__gapic.py b/tests/unit/test__gapic.py index d7d5dd7a6..95934f6b3 100644 --- a/tests/unit/test__gapic.py +++ b/tests/unit/test__gapic.py @@ -20,106 +20,113 @@ import mock import google.cloud.logging_v2 +from google.cloud import logging_v2 from google.cloud.logging_v2 import _gapic -from google.cloud.logging_v2.gapic import config_service_v2_client -from google.cloud.logging_v2.gapic import logging_service_v2_client -from google.cloud.logging_v2.gapic import metrics_service_v2_client -from google.cloud.logging_v2.proto import log_entry_pb2 -from google.cloud.logging_v2.proto import logging_pb2 -from google.cloud.logging_v2.proto import logging_config_pb2 -from google.cloud.logging_v2.proto import logging_metrics_pb2 +from google.cloud.logging_v2.services.config_service_v2 import ConfigServiceV2Client +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client +from google.cloud.logging_v2.services.metrics_service_v2 import MetricsServiceV2Client +from google.cloud.logging_v2.types import LogSink +from google.cloud.logging_v2.types import LogMetric +from google.cloud.logging_v2.types import LogEntry as LogEntryPB PROJECT = "PROJECT" -PROJECT_PATH = "projects/%s" % (PROJECT,) +PROJECT_PATH = f"projects/{PROJECT}" FILTER = "logName:syslog AND severity>=ERROR" class Test_LoggingAPI(object): LOG_NAME = "log_name" - LOG_PATH = "projects/%s/logs/%s" % (PROJECT, LOG_NAME) + LOG_PATH = f"projects/{PROJECT}/logs/{LOG_NAME}" @staticmethod def make_logging_api(): - channel = grpc_helpers.ChannelStub() - gapic_client = logging_service_v2_client.LoggingServiceV2Client(channel=channel) + gapic_client = LoggingServiceV2Client() handwritten_client = mock.Mock() api = _gapic._LoggingAPI(gapic_client, handwritten_client) - return channel, api + return api def test_ctor(self): - channel = grpc_helpers.ChannelStub() - gapic_client = logging_service_v2_client.LoggingServiceV2Client(channel=channel) + gapic_client = LoggingServiceV2Client() api = _gapic._LoggingAPI(gapic_client, mock.sentinel.client) assert api._gapic_api is gapic_client assert api._client is mock.sentinel.client def test_list_entries(self): - channel, api = self.make_logging_api() + client = self.make_logging_api() - log_entry_msg = log_entry_pb2.LogEntry( - log_name=self.LOG_PATH, text_payload="text" - ) - channel.ListLogEntries.response = logging_pb2.ListLogEntriesResponse( - entries=[log_entry_msg] - ) - result = api.list_entries([PROJECT], FILTER, google.cloud.logging_v2.DESCENDING) + log_entry_msg = LogEntryPB(log_name=self.LOG_PATH, text_payload="text") + + with mock.patch.object( + type(client._gapic_api.transport.list_log_entries), "__call__" + ) as call: + call.return_value = logging_v2.types.ListLogEntriesResponse( + entries=[log_entry_msg] + ) + result = client.list_entries( + [PROJECT], filter_=FILTER, order_by=logging_v2.DESCENDING + ) entries = list(result) # Check the response assert len(entries) == 1 entry = entries[0] - assert isinstance(entry, google.cloud.logging_v2.entries.TextEntry) + + assert isinstance(entry, logging_v2.entries.TextEntry) assert entry.payload == "text" # Check the request - assert len(channel.ListLogEntries.requests) == 1 - request = channel.ListLogEntries.requests[0] - assert request.project_ids == [PROJECT] + call.assert_called_once() + request = call.call_args.args[0] + assert request.resource_names == [PROJECT_PATH] assert request.filter == FILTER - assert request.order_by == google.cloud.logging_v2.DESCENDING + assert request.order_by == logging_v2.DESCENDING def test_list_entries_with_options(self): - channel, api = self.make_logging_api() + client = self.make_logging_api() - channel.ListLogEntries.response = logging_pb2.ListLogEntriesResponse(entries=[]) + with mock.patch.object( + type(client._gapic_api.transport.list_log_entries), "__call__" + ) as call: + call.return_value = logging_v2.types.ListLogEntriesResponse(entries=[]) - result = api.list_entries( - [PROJECT], - FILTER, - google.cloud.logging_v2.ASCENDING, - page_size=42, - page_token="token", - ) + result = client.list_entries( + [PROJECT], + filter_=FILTER, + order_by=google.cloud.logging_v2.ASCENDING, + page_size=42, + page_token="token", + ) list(result) # Check the request - assert len(channel.ListLogEntries.requests) == 1 - request = channel.ListLogEntries.requests[0] - assert request.project_ids == [PROJECT] + call.assert_called_once() + request = call.call_args.args[0] + assert request.resource_names == [PROJECT_PATH] assert request.filter == FILTER assert request.order_by == google.cloud.logging_v2.ASCENDING assert request.page_size == 42 assert request.page_token == "token" def test_write_entries_single(self): - channel, api = self.make_logging_api() - - channel.WriteLogEntries.response = empty_pb2.Empty() - - entry = { - "logName": self.LOG_PATH, - "resource": {"type": "global"}, - "textPayload": "text", - } - - api.write_entries([entry]) + client = self.make_logging_api() + + with mock.patch.object( + type(client._gapic_api.transport.write_log_entries), "__call__" + ) as call: + call.return_value = logging_v2.types.WriteLogEntriesResponse() + entry = { + "logName": self.LOG_PATH, + "resource": {"type": "global"}, + "textPayload": "text", + } + result = client.write_entries([entry]) # Check the request - assert len(channel.WriteLogEntries.requests) == 1 - request = channel.WriteLogEntries.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.partial_success is False assert len(request.entries) == 1 assert request.entries[0].log_name == entry["logName"] @@ -127,49 +134,48 @@ def test_write_entries_single(self): assert request.entries[0].text_payload == "text" def test_logger_delete(self): - channel, api = self.make_logging_api() + client = self.make_logging_api() - channel.DeleteLog.response = empty_pb2.Empty() - - api.logger_delete(PROJECT, self.LOG_NAME) - - assert len(channel.DeleteLog.requests) == 1 - request = channel.DeleteLog.requests[0] - assert request.log_name == self.LOG_PATH + with mock.patch.object( + type(client._gapic_api.transport.delete_log), "__call__" + ) as call: + client.logger_delete(PROJECT, self.LOG_NAME) + call.assert_called_once() + assert call.call_args.args[0].log_name == self.LOG_PATH class Test_SinksAPI(object): SINK_NAME = "sink_name" - SINK_PATH = "projects/%s/sinks/%s" % (PROJECT, SINK_NAME) + SINK_PATH = f"projects/{PROJECT}/sinks/{SINK_NAME}" DESTINATION_URI = "faux.googleapis.com/destination" SINK_WRITER_IDENTITY = "serviceAccount:project-123@example.com" @staticmethod def make_sinks_api(): - channel = grpc_helpers.ChannelStub() - gapic_client = config_service_v2_client.ConfigServiceV2Client(channel=channel) + gapic_client = ConfigServiceV2Client() handwritten_client = mock.Mock() api = _gapic._SinksAPI(gapic_client, handwritten_client) - return channel, api + return api def test_ctor(self): - channel = grpc_helpers.ChannelStub() - gapic_client = config_service_v2_client.ConfigServiceV2Client(channel=channel) + gapic_client = ConfigServiceV2Client() api = _gapic._SinksAPI(gapic_client, mock.sentinel.client) assert api._gapic_api is gapic_client assert api._client is mock.sentinel.client def test_list_sinks(self): - channel, api = self.make_sinks_api() + client = self.make_sinks_api() - sink_msg = logging_config_pb2.LogSink( + sink_msg = LogSink( name=self.SINK_PATH, destination=self.DESTINATION_URI, filter=FILTER ) - channel.ListSinks.response = logging_config_pb2.ListSinksResponse( - sinks=[sink_msg] - ) + with mock.patch.object( + type(client._gapic_api.transport.list_sinks), "__call__" + ) as call: + call.return_value = logging_v2.types.ListSinksResponse(sinks=[sink_msg]) + + result = client.list_sinks(PROJECT) - result = api.list_sinks(PROJECT) sinks = list(result) # Check the response @@ -181,54 +187,57 @@ def test_list_sinks(self): assert sink.filter_ == FILTER # Check the request - assert len(channel.ListSinks.requests) == 1 - request = channel.ListSinks.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.parent == PROJECT_PATH def test_list_sinks_with_options(self): - channel, api = self.make_sinks_api() - - channel.ListSinks.response = logging_config_pb2.ListSinksResponse(sinks=[]) + client = self.make_sinks_api() - result = api.list_sinks(PROJECT, page_size=42, page_token="token") + with mock.patch.object( + type(client._gapic_api.transport.list_sinks), "__call__" + ) as call: + call.return_value = logging_v2.types.ListSinksResponse(sinks=[]) + result = client.list_sinks(PROJECT, page_size=42, page_token="token") list(result) # Check the request - assert len(channel.ListSinks.requests) == 1 - request = channel.ListSinks.requests[0] - assert request.parent == "projects/%s" % PROJECT + call.assert_called_once() + request = call.call_args.args[0] + assert request.parent == f"projects/{PROJECT}" assert request.page_size == 42 assert request.page_token == "token" def test_sink_create(self): - channel, api = self.make_sinks_api() - - channel.CreateSink.response = logging_config_pb2.LogSink( - name=self.SINK_NAME, - destination=self.DESTINATION_URI, - filter=FILTER, - writer_identity=self.SINK_WRITER_IDENTITY, - ) - - result = api.sink_create( - PROJECT, - self.SINK_NAME, - FILTER, - self.DESTINATION_URI, - unique_writer_identity=True, - ) + client = self.make_sinks_api() + with mock.patch.object( + type(client._gapic_api.transport.create_sink), "__call__" + ) as call: + call.return_value = logging_v2.types.LogSink( + name=self.SINK_NAME, + destination=self.DESTINATION_URI, + filter=FILTER, + writer_identity=self.SINK_WRITER_IDENTITY, + ) + + result = client.sink_create( + PROJECT, + self.SINK_NAME, + FILTER, + self.DESTINATION_URI, + unique_writer_identity=True, + ) # Check response - assert result == { - "name": self.SINK_NAME, - "filter": FILTER, - "destination": self.DESTINATION_URI, - "writerIdentity": self.SINK_WRITER_IDENTITY, - } + # TODO: response has extra fields (blank fields) is this OK? + assert result["name"] == self.SINK_NAME + assert result["filter"] == FILTER + assert result["destination"] == self.DESTINATION_URI + assert result["writerIdentity"] == self.SINK_WRITER_IDENTITY # Check request - assert len(channel.CreateSink.requests) == 1 - request = channel.CreateSink.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.parent == PROJECT_PATH assert request.unique_writer_identity is True assert request.sink.name == self.SINK_NAME @@ -236,13 +245,15 @@ def test_sink_create(self): assert request.sink.destination == self.DESTINATION_URI def test_sink_get(self): - channel, api = self.make_sinks_api() + client = self.make_sinks_api() + with mock.patch.object( + type(client._gapic_api.transport.get_sink), "__call__" + ) as call: + call.return_value = logging_v2.types.LogSink( + name=self.SINK_PATH, destination=self.DESTINATION_URI, filter=FILTER + ) - channel.GetSink.response = logging_config_pb2.LogSink( - name=self.SINK_PATH, destination=self.DESTINATION_URI, filter=FILTER - ) - - response = api.sink_get(PROJECT, self.SINK_NAME) + response = client.sink_get(PROJECT, self.SINK_NAME) # Check response assert response == { @@ -252,27 +263,29 @@ def test_sink_get(self): } # Check request - assert len(channel.GetSink.requests) == 1 - request = channel.GetSink.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.sink_name == self.SINK_PATH def test_sink_update(self): - channel, api = self.make_sinks_api() - - channel.UpdateSink.response = logging_config_pb2.LogSink( - name=self.SINK_NAME, - destination=self.DESTINATION_URI, - filter=FILTER, - writer_identity=self.SINK_WRITER_IDENTITY, - ) - - result = api.sink_update( - PROJECT, - self.SINK_NAME, - FILTER, - self.DESTINATION_URI, - unique_writer_identity=True, - ) + client = self.make_sinks_api() + with mock.patch.object( + type(client._gapic_api.transport.update_sink), "__call__" + ) as call: + call.return_value = logging_v2.types.LogSink( + name=self.SINK_NAME, + destination=self.DESTINATION_URI, + filter=FILTER, + writer_identity=self.SINK_WRITER_IDENTITY, + ) + + result = client.sink_update( + PROJECT, + self.SINK_NAME, + FILTER, + self.DESTINATION_URI, + unique_writer_identity=True, + ) # Check response assert result == { @@ -283,8 +296,8 @@ def test_sink_update(self): } # Check request - assert len(channel.UpdateSink.requests) == 1 - request = channel.UpdateSink.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.sink_name == self.SINK_PATH assert request.unique_writer_identity is True assert request.sink.name == self.SINK_PATH @@ -292,48 +305,48 @@ def test_sink_update(self): assert request.sink.destination == self.DESTINATION_URI def test_sink_delete(self): - channel, api = self.make_sinks_api() - - channel.DeleteSink.response = empty_pb2.Empty() - - api.sink_delete(PROJECT, self.SINK_NAME) - - assert len(channel.DeleteSink.requests) == 1 - request = channel.DeleteSink.requests[0] + client = self.make_sinks_api() + with mock.patch.object( + type(client._gapic_api.transport.get_sink), "__call__" + ) as call: + client.sink_delete(PROJECT, self.SINK_NAME) + + call.assert_called_once() + request = call.call_args.args[0] assert request.sink_name == self.SINK_PATH class Test_MetricsAPI(object): METRIC_NAME = "metric_name" - METRIC_PATH = "projects/%s/metrics/%s" % (PROJECT, METRIC_NAME) + METRIC_PATH = f"projects/{PROJECT}/metrics/{METRIC_NAME}" DESCRIPTION = "Description" @staticmethod def make_metrics_api(): - channel = grpc_helpers.ChannelStub() - gapic_client = metrics_service_v2_client.MetricsServiceV2Client(channel=channel) + gapic_client = MetricsServiceV2Client() handwritten_client = mock.Mock() api = _gapic._MetricsAPI(gapic_client, handwritten_client) - return channel, api + return api def test_ctor(self): - channel = grpc_helpers.ChannelStub() - gapic_client = metrics_service_v2_client.MetricsServiceV2Client(channel=channel) + gapic_client = MetricsServiceV2Client() api = _gapic._MetricsAPI(gapic_client, mock.sentinel.client) assert api._gapic_api is gapic_client assert api._client is mock.sentinel.client def test_list_metrics(self): - channel, api = self.make_metrics_api() + client = self.make_metrics_api() - sink_msg = logging_metrics_pb2.LogMetric( + metric = logging_v2.types.LogMetric( name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER ) - channel.ListLogMetrics.response = logging_metrics_pb2.ListLogMetricsResponse( - metrics=[sink_msg] - ) - - result = api.list_metrics(PROJECT) + with mock.patch.object( + type(client._gapic_api.transport.list_log_metrics), "__call__" + ) as call: + call.return_value = logging_v2.types.ListLogMetricsResponse( + metrics=[metric] + ) + result = client.list_metrics(PROJECT) metrics = list(result) # Check the response @@ -345,50 +358,59 @@ def test_list_metrics(self): assert metric.filter_ == FILTER # Check the request - assert len(channel.ListLogMetrics.requests) == 1 - request = channel.ListLogMetrics.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.parent == PROJECT_PATH def test_list_metrics_options(self): - channel, api = self.make_metrics_api() + client = self.make_metrics_api() - channel.ListLogMetrics.response = logging_metrics_pb2.ListLogMetricsResponse( - metrics=[] + metric = logging_v2.types.LogMetric( + name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER ) + with mock.patch.object( + type(client._gapic_api.transport.list_log_metrics), "__call__" + ) as call: + call.return_value = logging_v2.types.ListLogMetricsResponse(metrics=[]) - result = api.list_metrics(PROJECT, page_size=42, page_token="token") + result = client.list_metrics(PROJECT, page_size=42, page_token="token") list(result) # Check the request - assert len(channel.ListLogMetrics.requests) == 1 - request = channel.ListLogMetrics.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.parent == PROJECT_PATH assert request.page_size == 42 assert request.page_token == "token" def test_metric_create(self): - channel, api = self.make_metrics_api() + client = self.make_metrics_api() - channel.CreateLogMetric.response = empty_pb2.Empty() - - api.metric_create(PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION) + with mock.patch.object( + type(client._gapic_api.transport.create_log_metric), "__call__" + ) as call: + result = client.metric_create( + PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION + ) # Check the request - assert len(channel.CreateLogMetric.requests) == 1 - request = channel.CreateLogMetric.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.parent == PROJECT_PATH assert request.metric.name == self.METRIC_NAME assert request.metric.filter == FILTER assert request.metric.description == self.DESCRIPTION def test_metric_get(self): - channel, api = self.make_metrics_api() - - channel.GetLogMetric.response = logging_metrics_pb2.LogMetric( - name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER - ) + client = self.make_metrics_api() - response = api.metric_get(PROJECT, self.METRIC_NAME) + with mock.patch.object( + type(client._gapic_api.transport.get_log_metric), "__call__" + ) as call: + call.return_value = logging_v2.types.LogMetric( + name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER + ) + response = client.metric_get(PROJECT, self.METRIC_NAME) # Check the response assert response == { @@ -398,20 +420,23 @@ def test_metric_get(self): } # Check the request - assert len(channel.GetLogMetric.requests) == 1 - request = channel.GetLogMetric.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.metric_name == self.METRIC_PATH def test_metric_update(self): - channel, api = self.make_metrics_api() + client = self.make_metrics_api() - channel.UpdateLogMetric.response = logging_metrics_pb2.LogMetric( - name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER - ) + with mock.patch.object( + type(client._gapic_api.transport.update_log_metric), "__call__" + ) as call: + call.return_value = logging_v2.types.LogMetric( + name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER + ) - response = api.metric_update( - PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION - ) + response = client.metric_update( + PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION + ) # Check the response assert response == { @@ -421,22 +446,22 @@ def test_metric_update(self): } # Check the request - assert len(channel.UpdateLogMetric.requests) == 1 - request = channel.UpdateLogMetric.requests[0] + call.assert_called_once() + request = call.call_args.args[0] assert request.metric_name == self.METRIC_PATH assert request.metric.name == self.METRIC_PATH assert request.metric.filter == FILTER assert request.metric.description == self.DESCRIPTION def test_metric_delete(self): - channel, api = self.make_metrics_api() - - channel.DeleteLogMetric.response = empty_pb2.Empty() - - api.metric_delete(PROJECT, self.METRIC_NAME) - - assert len(channel.DeleteLogMetric.requests) == 1 - request = channel.DeleteLogMetric.requests[0] + client = self.make_metrics_api() + with mock.patch.object( + type(client._gapic_api.transport.delete_log_metric), "__call__" + ) as call: + client.metric_delete(PROJECT, self.METRIC_NAME) + + call.assert_called_once() + request = call.call_args.args[0] assert request.metric_name == self.METRIC_PATH @@ -448,10 +473,8 @@ def _call_fut(*args, **kwargs): return _parse_log_entry(*args, **kwargs) def test_simple(self): - from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry - - entry_pb = LogEntry(log_name=u"lol-jk", text_payload=u"bah humbug") - result = self._call_fut(entry_pb) + entry_pb = LogEntryPB(log_name="lol-jk", text_payload="bah humbug") + result = self._call_fut(LogEntryPB.pb(entry_pb)) expected = {"logName": entry_pb.log_name, "textPayload": entry_pb.text_payload} self.assertEqual(result, expected) @@ -463,10 +486,9 @@ def test_non_registry_failure(self, msg_to_dict_mock): self._call_fut(entry_pb) entry_pb.HasField.assert_called_once_with("proto_payload") - msg_to_dict_mock.assert_called_once_with(entry_pb) + msg_to_dict_mock.assert_called_once_with(entry_pb, preserving_proto_field_name=False, including_default_value_fields=False) def test_unregistered_type(self): - from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry from google.protobuf import any_pb2 from google.protobuf import descriptor_pool from google.protobuf.timestamp_pb2 import Timestamp @@ -482,8 +504,8 @@ def test_unregistered_type(self): any_pb = any_pb2.Any(type_url=type_url, value=metadata_bytes) timestamp = Timestamp(seconds=61, nanos=1234000) - entry_pb = LogEntry(proto_payload=any_pb, timestamp=timestamp) - result = self._call_fut(entry_pb) + entry_pb = LogEntryPB(proto_payload=any_pb, timestamp=timestamp) + result = self._call_fut(LogEntryPB.pb(entry_pb)) self.assertEqual(len(result), 2) self.assertEqual(result["timestamp"], "1970-01-01T00:01:01.001234Z") # NOTE: This "hack" is needed on Windows, where the equality check @@ -492,7 +514,6 @@ def test_unregistered_type(self): self.assertEqual(result["protoPayload"].value, metadata_bytes) def test_registered_type(self): - from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry from google.protobuf import any_pb2 from google.protobuf import descriptor_pool from google.protobuf.struct_pb2 import Struct @@ -506,12 +527,12 @@ def test_registered_type(self): type_url = "type.googleapis.com/" + type_name field_name = "foo" - field_value = u"Bar" + field_value = "Bar" struct_pb = Struct(fields={field_name: Value(string_value=field_value)}) any_pb = any_pb2.Any(type_url=type_url, value=struct_pb.SerializeToString()) - entry_pb = LogEntry(proto_payload=any_pb, log_name=u"all-good") - result = self._call_fut(entry_pb) + entry_pb = LogEntryPB(proto_payload=any_pb, log_name="all-good") + result = self._call_fut(LogEntryPB.pb(entry_pb)) expected_proto = { "logName": entry_pb.log_name, "protoPayload": {"@type": type_url, "value": {field_name: field_value}}, @@ -527,10 +548,8 @@ def _call_fut(*args, **kwargs): return _log_entry_mapping_to_pb(*args, **kwargs) def test_simple(self): - from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry - result = self._call_fut({}) - self.assertEqual(result, LogEntry()) + self.assertEqual(result, LogEntryPB()) def test_unregistered_type(self): from google.protobuf import descriptor_pool @@ -554,7 +573,6 @@ def test_unregistered_type(self): self._call_fut(json_mapping) def test_registered_type(self): - from google.cloud.logging_v2.proto.log_entry_pb2 import LogEntry from google.protobuf import any_pb2 from google.protobuf import descriptor_pool @@ -566,14 +584,14 @@ def test_registered_type(self): type_url = "type.googleapis.com/" + type_name field_name = "foo" - field_value = u"Bar" + field_value = "Bar" json_mapping = { - "logName": u"hi-everybody", + "logName": "hi-everybody", "protoPayload": {"@type": type_url, "value": {field_name: field_value}}, } # Convert to a valid LogEntry. result = self._call_fut(json_mapping) - entry_pb = LogEntry( + entry_pb = LogEntryPB( log_name=json_mapping["logName"], proto_payload=any_pb2.Any( type_url=type_url, value=b"\n\014\n\003foo\022\005\032\003Bar" @@ -584,32 +602,35 @@ def test_registered_type(self): @mock.patch("google.cloud.logging_v2._gapic.LoggingServiceV2Client", autospec=True) def test_make_logging_api(gapic_client): - client = mock.Mock(spec=["_credentials", "_client_info"]) + client = mock.Mock(spec=["_credentials", "_client_info", "_client_options"]) api = _gapic.make_logging_api(client) assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, client_info=client._client_info, + client_options=client._client_options ) @mock.patch("google.cloud.logging_v2._gapic.MetricsServiceV2Client", autospec=True) def test_make_metrics_api(gapic_client): - client = mock.Mock(spec=["_credentials", "_client_info"]) + client = mock.Mock(spec=["_credentials", "_client_info", "_client_options"]) api = _gapic.make_metrics_api(client) assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, client_info=client._client_info, + client_options=client._client_options ) @mock.patch("google.cloud.logging_v2._gapic.ConfigServiceV2Client", autospec=True) def test_make_sinks_api(gapic_client): - client = mock.Mock(spec=["_credentials", "_client_info"]) + client = mock.Mock(spec=["_credentials", "_client_info", "_client_options"]) api = _gapic.make_sinks_api(client) assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info + credentials=client._credentials, client_info=client._client_info, + client_options=client._client_options ) From dd52ae8a86a55be370c15dcfb5b1849fb63cbc35 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Wed, 11 Nov 2020 05:26:43 +0000 Subject: [PATCH 52/58] fix: allow non-project resources --- google/cloud/logging_v2/_gapic.py | 284 +++++----- google/cloud/logging_v2/_helpers.py | 71 ++- google/cloud/logging_v2/_http.py | 489 +++++++++--------- google/cloud/logging_v2/client.py | 347 ++++++------- google/cloud/logging_v2/entries.py | 128 ++--- .../cloud/logging_v2/handlers/app_engine.py | 2 +- .../logging_v2/handlers/container_engine.py | 2 +- google/cloud/logging_v2/handlers/handlers.py | 6 +- .../logging_v2/handlers/middleware/request.py | 11 +- .../handlers/transports/background_thread.py | 4 +- .../logging_v2/handlers/transports/sync.py | 24 +- google/cloud/logging_v2/logger.py | 330 ++++++------ google/cloud/logging_v2/metric.py | 128 +++-- google/cloud/logging_v2/resource.py | 16 +- google/cloud/logging_v2/sink.py | 213 ++++---- samples/snippets/export.py | 5 +- samples/snippets/export_test.py | 4 +- .../v2/test_system_logging_service_v2_v2.py | 6 +- tests/system/test_system.py | 22 +- .../logging_v2/test_logging_service_v2.py | 6 +- tests/unit/handlers/transports/test_sync.py | 2 +- tests/unit/test__gapic.py | 69 ++- tests/unit/test__http.py | 78 +-- tests/unit/test_client.py | 33 +- tests/unit/test_logger.py | 22 +- tests/unit/test_metric.py | 39 +- tests/unit/test_sink.py | 85 +-- 27 files changed, 1195 insertions(+), 1231 deletions(-) diff --git a/google/cloud/logging_v2/_gapic.py b/google/cloud/logging_v2/_gapic.py index 01facccf9..31bbfcbb5 100644 --- a/google/cloud/logging_v2/_gapic.py +++ b/google/cloud/logging_v2/_gapic.py @@ -15,13 +15,6 @@ """Wrapper for adapting the autogenerated gapic client to the hand-written client.""" -import functools -import json -import types - -from typing import Iterable - -from google.api import monitored_resource_pb2 from google.cloud.logging_v2.services.config_service_v2 import ConfigServiceV2Client from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client from google.cloud.logging_v2.services.metrics_service_v2 import MetricsServiceV2Client @@ -40,64 +33,53 @@ from google.cloud.logging_v2._helpers import entry_from_resource from google.cloud.logging_v2.sink import Sink -from google.cloud.logging_v2.entries import LogEntry from google.cloud.logging_v2.metric import Metric class _LoggingAPI(object): - """Helper mapping logging-related APIs. - - :type gapic_api: - :class:`.logging_service_v2_client.LoggingServiceV2Client` - :param gapic_api: API object used to make RPCs. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that owns this API object. - """ + """Helper mapping logging-related APIs.""" def __init__(self, gapic_api, client): self._gapic_api = gapic_api self._client = client def list_entries( - self, projects, *, filter_="", order_by="", page_size=0, page_token=None + self, resource_names, *, filter_="", order_by="", page_size=0, page_token=None ): """Return a page of log entry resources. - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the API's client. - - :type filter_: str - :param filter_: - a filter expression. See - https://cloud.google.com/logging/docs/view/advanced_filters + Args: + resource_names (Sequence[str]): Names of one or more parent resources + from which to retrieve log entries: - :type order_by: str - :param order_by: One of :data:`~google.cloud.logging.ASCENDING` - or :data:`~google.cloud.logging.DESCENDING`. + :: - :type page_size: int - :param page_size: maximum number of entries to return, If not passed, - defaults to a value set by the API. + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" - :type page_token: str - :param page_token: opaque marker for the next "page" of entries. If not - passed, the API will return the first page of - entries. + filter_ (str): a filter expression. See + https://cloud.google.com/logging/docs/view/advanced_filters + order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` + or :data:`~google.cloud.logging_v2.DESCENDING`. + page_size (int): maximum number of entries to return, If not passed, + defaults to a value set by the API. + page_token (str): opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry` - accessible to the current API. + Returns: + Iterator[google.cloud.logging_v2.LogEntry] """ # full resource names are expected by the API - projects = [f"projects/{p}" for p in projects] + resource_names = resource_names request = ListLogEntriesRequest( - resource_names=projects, + resource_names=resource_names, filter=filter_, order_by=order_by, page_size=page_size, - page_token=page_token + page_token=page_token, ) response = self._gapic_api.list_log_entries(request=request) @@ -111,11 +93,20 @@ def list_entries( def log_entries_pager(page_iter): for page in page_iter: log_entry_dict = _parse_log_entry(LogEntryPB.pb(page)) - yield entry_from_resource(log_entry_dict, self._client, loggers) + yield entry_from_resource(log_entry_dict, self._client, loggers=loggers) return log_entries_pager(page_iter) - def write_entries(self, entries, *, logger_name=None, resource=None, labels=None): + def write_entries( + self, + entries, + *, + logger_name=None, + resource=None, + labels=None, + partial_success=False, + dry_run=False, + ): """Log an entry resource via a POST request Args: @@ -127,8 +118,19 @@ def write_entries(self, entries, *, logger_name=None, resource=None, labels=None individual entries may override. labels (Optional[Mapping[str, ...]]): default labels to associate with entries; individual entries may override. + partial_success (Optional[bool]): Whether valid entries should be written even if + some other entries fail due to INVALID_ARGUMENT or + PERMISSION_DENIED errors. If any entry is not written, then + the response status is the error associated with one of the + failed entries and the response includes error details keyed + by the entries' zero-based index in the ``entries.write`` + method. + dry_run (Optional[bool]): + If true, the request should expect normal response, + but the entries won't be persisted nor exported. + Useful for checking whether the logging API endpoints are working + properly before sending valuable data. """ - partial_success = False log_entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries] request = WriteLogEntriesRequest( @@ -140,49 +142,56 @@ def write_entries(self, entries, *, logger_name=None, resource=None, labels=None ) self._gapic_api.write_log_entries(request=request) - def logger_delete(self, project, logger_name): + def logger_delete(self, logger_name): """Delete all entries in a logger. Args: - project (str): ID of project containing the log entries to delete - logger_name (str): name of logger containing the log entries to delete - """ - path = f"projects/{project}/logs/{logger_name}" - self._gapic_api.delete_log(log_name=path) + logger_name (str): The resource name of the log to delete: + :: -class _SinksAPI(object): - """Helper mapping sink-related APIs. + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" - :type gapic_api: - :class:`.config_service_v2_client.ConfigServiceV2Client` - :param gapic_api: API object used to make RPCs. + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + """ + self._gapic_api.delete_log(log_name=logger_name) - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that owns this API object. - """ + +class _SinksAPI(object): + """Helper mapping sink-related APIs.""" def __init__(self, gapic_api, client): self._gapic_api = gapic_api self._client = client - def list_sinks(self, project, page_size=0, page_token=None): - """List sinks for the project associated with this client. + def list_sinks(self, parent, *, page_size=0, page_token=None): + """List sinks for the parent resource. Args: - project (str): ID of the project whose sinks are to be listed. - page_size (int): Maximum number of sinks to return, If not passed, + parent (str): The parent resource whose sinks are to be listed: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + page_size (Optional[int]): Maximum number of sinks to return, If not passed, defaults to a value set by the API. - page_token (str): Opaque marker for the next "page" of sinks. If not + page_token (Optional[str]): Opaque marker for the next "page" of sinks. If not passed, the API will return the first page of sinks. - + Returns: - Iterable[logging_v2.Sink]: Iterable of sinks. + Iterator[google.cloud.logging_v2.Sink] """ - path = f"projects/{project}" request = ListSinksRequest( - parent=path, page_size=page_size, page_token=page_token + parent=parent, page_size=page_size, page_token=page_token ) response = self._gapic_api.list_sinks(request) page_iter = iter(response) @@ -195,7 +204,7 @@ def sinks_pager(page_iter): return sinks_pager(page_iter) def sink_create( - self, project, sink_name, filter_, destination, *, unique_writer_identity=False + self, parent, sink_name, filter_, destination, *, unique_writer_identity=False ): """Create a sink resource. @@ -203,7 +212,15 @@ def sink_create( https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create Args: - project (str): ID of the project in which to create the sink. + parent(str): The resource in which to create the sink, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". sink_name (str): The name of the sink. filter_ (str): The advanced logs filter expression defining the entries exported by the sink. @@ -216,7 +233,6 @@ def sink_create( dict: The sink resource returned from the API (converted from a protobuf to a dictionary). """ - parent = f"projects/{project}" sink_pb = LogSink(name=sink_name, filter=filter_, destination=destination) request = CreateSinkRequest( parent=parent, sink=sink_pb, unique_writer_identity=unique_writer_identity @@ -228,19 +244,25 @@ def sink_create( including_default_value_fields=False, ) - def sink_get(self, project, sink_name): + def sink_get(self, sink_name): """Retrieve a sink resource. Args: - project (str): ID of the project containing the sink. - sink_name (str): the name of the sink + sink_name (str): The resource name of the sink, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" - Returns: + Returns: dict: The sink object returned from the API (converted from a - protobuf to a dictionary). + protobuf to a dictionary). """ - path = f"projects/{project}/sinks/{sink_name}" - sink_pb = self._gapic_api.get_sink(sink_name=path) + sink_pb = self._gapic_api.get_sink(sink_name=sink_name) # NOTE: LogSink message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. return MessageToDict( @@ -250,30 +272,48 @@ def sink_get(self, project, sink_name): ) def sink_update( - self, project, sink_name, filter_, destination, *, unique_writer_identity=False + self, + sink_name, + filter_, + destination, + *, + unique_writer_identity=False, ): """Update a sink resource. Args: - project (str): ID of the project containing the sink. - sink_name (str): The name of the sink + sink_name (str): Required. The resource name of the sink, + including the parent resource and the sink identifier: + + :: + + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" filter_ (str): The advanced logs filter expression defining the entries exported by the sink. - destination (str): - destionation (str): destination URI for the entries exported by + destination (str): destination URI for the entries exported by the sink. unique_writer_identity (Optional[bool]): determines the kind of IAM identity returned as writer_identity in the new sink. - returns: + + Returns: dict: The sink resource returned from the API (converted from a protobuf to a dictionary). """ - path = f"projects/{project}/sinks/{sink_name}" - sink_pb = LogSink(name=path, filter=filter_, destination=destination) + name = sink_name.split("/")[-1] # parse name out of full resoure name + sink_pb = LogSink( + name=name, + filter=filter_, + destination=destination, + ) request = UpdateSinkRequest( - sink_name=path, sink=sink_pb, unique_writer_identity=unique_writer_identity + sink_name=sink_name, + sink=sink_pb, + unique_writer_identity=unique_writer_identity, ) sink_pb = self._gapic_api.update_sink(request=request) # NOTE: LogSink message type does not have an ``Any`` field @@ -284,34 +324,33 @@ def sink_update( including_default_value_fields=False, ) - def sink_delete(self, project, sink_name): + def sink_delete(self, sink_name): """Delete a sink resource. Args: - project (str): ID of the project containing the sink. - sink_name (str): The name of the sink - """ - path = f"projects/{project}/sinks/{sink_name}" - self._gapic_api.delete_sink(sink_name=path) + sink_name (str): Required. The full resource name of the sink to delete, + including the parent resource and the sink identifier: + :: -class _MetricsAPI(object): - """Helper mapping sink-related APIs. + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" - :type gapic_api: - :class:`.metrics_service_v2_client.MetricsServiceV2Client` + Example: ``"projects/my-project-id/sinks/my-sink-id"``. + """ + self._gapic_api.delete_sink(sink_name=sink_name) - :param gapic_api: API object used to make RPCs. - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that owns this API object. - """ +class _MetricsAPI(object): + """Helper mapping sink-related APIs. """ def __init__(self, gapic_api, client): self._gapic_api = gapic_api self._client = client - def list_metrics(self, project, page_size=0, page_token=None): + def list_metrics(self, project, *, page_size=0, page_token=None): """List metrics for the project associated with this client. Args: @@ -321,7 +360,7 @@ def list_metrics(self, project, page_size=0, page_token=None): page_token (str): Opaque marker for the next "page" of metrics. If not passed, the API will return the first page of sinks. - + Returns: Iterable[logging_v2.Metric]: Iterable of metrics. """ @@ -357,7 +396,7 @@ def metric_create(self, project, metric_name, filter_, description): self._gapic_api.create_log_metric(parent=parent, metric=metric_pb) def metric_get(self, project, metric_name): - """API call: retrieve a metric resource. + """Retrieve a metric resource. Args: project (str): ID of the project containing the metric. @@ -377,7 +416,13 @@ def metric_get(self, project, metric_name): including_default_value_fields=False, ) - def metric_update(self, project, metric_name, filter_, description): + def metric_update( + self, + project, + metric_name, + filter_, + description, + ): """Update a metric resource. Args: @@ -386,13 +431,17 @@ def metric_update(self, project, metric_name, filter_, description): filter_ (str): the advanced logs filter expression defining the entries exported by the metric. description (str): description of the metric. - + Returns: The metric object returned from the API (converted from a protobuf to a dictionary). """ path = f"projects/{project}/metrics/{metric_name}" - metric_pb = LogMetric(name=path, filter=filter_, description=description) + metric_pb = LogMetric( + name=path, + filter=filter_, + description=description, + ) metric_pb = self._gapic_api.update_log_metric( metric_name=path, metric=metric_pb ) @@ -432,7 +481,8 @@ def _parse_log_entry(entry_pb): it could not be parsed. """ try: - return MessageToDict(entry_pb, + return MessageToDict( + entry_pb, preserving_proto_field_name=False, including_default_value_fields=False, ) @@ -440,7 +490,8 @@ def _parse_log_entry(entry_pb): if entry_pb.HasField("proto_payload"): proto_payload = entry_pb.proto_payload entry_pb.ClearField("proto_payload") - entry_mapping = MessageToDict(entry_pb, + entry_mapping = MessageToDict( + entry_pb, preserving_proto_field_name=False, including_default_value_fields=False, ) @@ -470,17 +521,18 @@ def _log_entry_mapping_to_pb(mapping): def make_logging_api(client): """Create an instance of the Logging API adapter. - + Args: client (google.cloud.logging_v2.client.Client): The client that holds configuration details. - + Returns: _LoggingAPI: A metrics API instance with the proper credentials. """ generated = LoggingServiceV2Client( - credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options, ) return _LoggingAPI(generated, client) @@ -498,7 +550,7 @@ def make_metrics_api(client): generated = MetricsServiceV2Client( credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + client_options=client._client_options, ) return _MetricsAPI(generated, client) @@ -509,13 +561,13 @@ def make_sinks_api(client): Args: client (google.cloud.logging_v2.client.Client): The client that holds configuration details. - + Returns: _SinksAPI: A metrics API instance with the proper credentials. """ generated = ConfigServiceV2Client( credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + client_options=client._client_options, ) return _SinksAPI(generated, client) diff --git a/google/cloud/logging_v2/_helpers.py b/google/cloud/logging_v2/_helpers.py index a9e979dc6..0023c7a06 100644 --- a/google/cloud/logging_v2/_helpers.py +++ b/google/cloud/logging_v2/_helpers.py @@ -64,31 +64,29 @@ class LogSeverity(object): def entry_from_resource(resource, client, loggers): """Detect correct entry type from resource and instantiate. - :type resource: dict - :param resource: One entry resource from API response. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: Client that owns the log entry. - - :type loggers: dict - :param loggers: - A mapping of logger fullnames -> loggers. If the logger - that owns the entry is not in ``loggers``, the entry - will have a newly-created logger. - - :rtype: :class:`~google.cloud.logging.entries._BaseEntry` - :returns: The entry instance, constructed via the resource + Args: + resource (dict): One entry resource from API response. + client (google.cloud.logging_v2.client.Client): + Client that owns the log entry. + loggers (dict): + A mapping of logger fullnames -> loggers. If the logger + that owns the entry is not in ``loggers``, the entry + will have a newly-created logger. + + Returns: + google.cloud.logging_v2.entries._BaseEntry: + The entry instance, constructed via the resource """ if "textPayload" in resource: - return TextEntry.from_api_repr(resource, client, loggers) + return TextEntry.from_api_repr(resource, client, loggers=loggers) if "jsonPayload" in resource: - return StructEntry.from_api_repr(resource, client, loggers) + return StructEntry.from_api_repr(resource, client, loggers=loggers) if "protoPayload" in resource: - return ProtobufEntry.from_api_repr(resource, client, loggers) + return ProtobufEntry.from_api_repr(resource, client, loggers=loggers) - return LogEntry.from_api_repr(resource, client, loggers) + return LogEntry.from_api_repr(resource, client, loggers=loggers) def retrieve_metadata_server(metadata_key): @@ -96,13 +94,14 @@ def retrieve_metadata_server(metadata_key): See: https://cloud.google.com/compute/docs/storing-retrieving-metadata - :type metadata_key: str - :param metadata_key: Key of the metadata which will form the url. You can - also supply query parameters after the metadata key. - e.g. "tags?alt=json" + Args: + metadata_key (str): + Key of the metadata which will form the url. You can + also supply query parameters after the metadata key. + e.g. "tags?alt=json" - :rtype: str - :returns: The value of the metadata key returned by the metadata server. + Returns: + str: The value of the metadata key returned by the metadata server. """ url = METADATA_URL + metadata_key @@ -123,11 +122,11 @@ def retrieve_metadata_server(metadata_key): def _normalize_severity(stdlib_level): """Normalize a Python stdlib severity to LogSeverity enum. - :type stdlib_level: int - :param stdlib_level: 'levelno' from a :class:`logging.LogRecord` - - :rtype: int - :returns: Corresponding Stackdriver severity. + Args: + stdlib_level (int): 'levelno' from a :class:`logging.LogRecord` + + Returns: + int: Corresponding Stackdriver severity. """ return _NORMALIZED_SEVERITIES.get(stdlib_level, stdlib_level) @@ -135,18 +134,18 @@ def _normalize_severity(stdlib_level): def _add_defaults_to_filter(filter_): """Modify the input filter expression to add sensible defaults. - :type filter_: str - :param filter_: The original filter expression - - :rtype: str - :returns: sensible default filter string + Args: + filter_ (str): The original filter expression + + Returns: + str: sensible default filter string """ # By default, requests should only return logs in the last 24 hours yesterday = datetime.now(timezone.utc) - timedelta(days=1) - time_filter = 'timestamp>="%s"' % yesterday.strftime(_TIME_FORMAT) + time_filter = f'timestamp>="{yesterday.strftime(_TIME_FORMAT)}"' if filter_ is None: filter_ = time_filter elif "timestamp" not in filter_.lower(): - filter_ = "%s AND %s" % (filter_, time_filter) + filter_ = f"{filter_} AND {time_filter}" return filter_ diff --git a/google/cloud/logging_v2/_http.py b/google/cloud/logging_v2/_http.py index 30e45b238..4c446e587 100644 --- a/google/cloud/logging_v2/_http.py +++ b/google/cloud/logging_v2/_http.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Interact with Stackdriver Logging via JSON-over-HTTP.""" +"""Interact with Cloud Logging via JSON-over-HTTP.""" import functools @@ -26,22 +26,21 @@ class Connection(_http.JSONConnection): - """A connection to Google Stackdriver Logging via the JSON REST API. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client that owns the current connection. - - :type client_info: :class:`~google.api_core.client_info.ClientInfo` - :param client_info: (Optional) instance used to generate user agent. - - :type client_options: :class:`~google.api_core.client_options.ClientOptions` - :param client_options (Optional) Client options used to set user options - on the client. API Endpoint should be set through client_options. - """ DEFAULT_API_ENDPOINT = "https://logging.googleapis.com" - def __init__(self, client, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT): + def __init__(self, client, *, client_info=None, api_endpoint=DEFAULT_API_ENDPOINT): + """A connection to Google Cloud Logging via the JSON REST API. + + Args: + client (google.cloud.logging_v2.cliet.Client): + The client that owns the current connection. + client_info (Optional[google.api_core.client_info.ClientInfo]): + Instance used to generate user agent. + client_options (Optional[google.api_core.client_options.ClientOptions]): + Client options used to set user options + on the client. API Endpoint should be set through client_options. + """ super(Connection, self).__init__(client, client_info) self.API_BASE_URL = api_endpoint self._client_info.gapic_version = __version__ @@ -70,40 +69,35 @@ def __init__(self, client): self.api_request = client._connection.api_request def list_entries( - self, projects, filter_=None, order_by=None, page_size=None, page_token=None + self, resource_names, *, filter_=None, order_by=None, page_size=None, page_token=None ): """Return a page of log entry resources. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list - - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the client. - - :type filter_: str - :param filter_: - a filter expression. See - https://cloud.google.com/logging/docs/view/advanced_filters - - :type order_by: str - :param order_by: One of :data:`~google.cloud.logging.ASCENDING` - or :data:`~google.cloud.logging.DESCENDING`. - - :type page_size: int - :param page_size: maximum number of entries to return, If not passed, - defaults to a value set by the API. - - :type page_token: str - :param page_token: opaque marker for the next "page" of entries. If not - passed, the API will return the first page of - entries. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry` - accessible to the current API. + Args: + resource_names (Sequence[str]): Names of one or more parent resources + from which to retrieve log entries: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + filter_ (str): a filter expression. See + https://cloud.google.com/logging/docs/view/advanced_filters + order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` + or :data:`~google.cloud.logging_v2.DESCENDING`. + page_size (int): maximum number of entries to return, If not passed, + defaults to a value set by the API. + page_token (str): opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. + + Returns: + Iterator[google.cloud.logging_v2.LogEntry] """ - extra_params = {"projectIds": projects} + extra_params = {"resourceNames": resource_names} if filter_ is not None: extra_params["filter"] = filter_ @@ -133,28 +127,35 @@ def list_entries( iterator._HTTP_METHOD = "POST" return iterator - def write_entries(self, entries, logger_name=None, resource=None, labels=None): - """API call: log an entry resource via a POST request - + def write_entries(self, entries, *, logger_name=None, resource=None, labels=None, partial_success=False, dry_run=False): + """Log an entry resource via a POST request + See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write - :type entries: sequence of mapping - :param entries: the log entry resources to log. - - :type logger_name: str - :param logger_name: name of default logger to which to log the entries; - individual entries may override. - - :type resource: mapping - :param resource: default resource to associate with entries; - individual entries may override. - - :type labels: mapping - :param labels: default labels to associate with entries; - individual entries may override. + Args: + entries (Sequence[Mapping[str, ...]]): sequence of mappings representing + the log entry resources to log. + logger_name (Optional[str]): name of default logger to which to log the entries; + individual entries may override. + resource(Optional[Mapping[str, ...]]): default resource to associate with entries; + individual entries may override. + labels (Optional[Mapping[str, ...]]): default labels to associate with entries; + individual entries may override. + partial_success (Optional[bool]): Whether valid entries should be written even if + some other entries fail due to INVALID_ARGUMENT or + PERMISSION_DENIED errors. If any entry is not written, then + the response status is the error associated with one of the + failed entries and the response includes error details keyed + by the entries' zero-based index in the ``entries.write`` + method. + dry_run (Optional[bool]): + If true, the request should expect normal response, + but the entries won't be persisted nor exported. + Useful for checking whether the logging API endpoints are working + properly before sending valuable data. """ - data = {"entries": list(entries)} + data = {"entries": list(entries), "partialSuccess": partial_success, "dry_run": dry_run} if logger_name is not None: data["logName"] = logger_name @@ -167,19 +168,24 @@ def write_entries(self, entries, logger_name=None, resource=None, labels=None): self.api_request(method="POST", path="/entries:write", data=data) - def logger_delete(self, project, logger_name): - """API call: delete all entries in a logger via a DELETE request + def logger_delete(self, logger_name): + """Delete all entries in a logger. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete + Args: + logger_name (str): The resource name of the log to delete: - :type project: str - :param project: ID of project containing the log entries to delete + :: - :type logger_name: str - :param logger_name: name of logger containing the log entries to delete + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. """ - path = "/projects/%s/logs/%s" % (project, logger_name) + path = f"/{logger_name}" self.api_request(method="DELETE", path=path) @@ -188,44 +194,42 @@ class _SinksAPI(object): See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client used to make API requests. """ def __init__(self, client): self._client = client self.api_request = client._connection.api_request - def list_sinks(self, project, page_size=None, page_token=None): - """List sinks for the project associated with this client. + def list_sinks(self, parent, *, page_size=None, page_token=None): + """List sinks for the parent resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list - :type project: str - :param project: ID of the project whose sinks are to be listed. + Args: + parent (str): The parent resource whose sinks are to be listed: - :type page_size: int - :param page_size: maximum number of sinks to return, If not passed, - defaults to a value set by the API. + :: - :type page_token: str - :param page_token: opaque marker for the next "page" of sinks. If not - passed, the API will return the first page of - sinks. + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + page_size (Optional[int]): Maximum number of sinks to return, If not passed, + defaults to a value set by the API. + page_token (Optional[str]): Opaque marker for the next "page" of sinks. If not + passed, the API will return the first page of + sinks. - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of - :class:`~google.cloud.logging.sink.Sink` - accessible to the current API. + Returns: + Iterator[google.cloud.logging_v2.Sink] """ extra_params = {} if page_size is not None: extra_params["pageSize"] = page_size - path = "/projects/%s/sinks" % (project,) + path = f"/{parent}/sinks" return page_iterator.HTTPIterator( client=self._client, api_request=self._client._connection.api_request, @@ -237,110 +241,109 @@ def list_sinks(self, project, page_size=None, page_token=None): ) def sink_create( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, parent, sink_name, filter_, destination, *, unique_writer_identity=False ): - """API call: create a sink resource. + """Create a sink resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create - :type project: str - :param project: ID of the project in which to create the sink. - - :type sink_name: str - :param sink_name: the name of the sink - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the sink. - - :type destination: str - :param destination: destination URI for the entries exported by - the sink. - - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. - - :rtype: dict - :returns: The returned (created) resource. + Args: + parent(str): The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + sink_name (str): The name of the sink. + filter_ (str): The advanced logs filter expression defining the + entries exported by the sink. + destination (str): Destination URI for the entries exported by + the sink. + unique_writer_identity (Optional[bool]): determines the kind of + IAM identity returned as writer_identity in the new sink. + + Returns: + dict: The sink resource returned from the API. """ - target = "/projects/%s/sinks" % (project,) + target = f"/{parent}/sinks" data = {"name": sink_name, "filter": filter_, "destination": destination} query_params = {"uniqueWriterIdentity": unique_writer_identity} return self.api_request( method="POST", path=target, data=data, query_params=query_params ) - def sink_get(self, project, sink_name): - """API call: retrieve a sink resource. + def sink_get(self, sink_name): + """Retrieve a sink resource. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get + Args: + sink_name (str): The resource name of the sink: - :type project: str - :param project: ID of the project containing the sink. + :: - :type sink_name: str - :param sink_name: the name of the sink + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" - :rtype: dict - :returns: The JSON sink object returned from the API. + Returns: + dict: The JSON sink object returned from the API. """ - target = "/projects/%s/sinks/%s" % (project, sink_name) + target = f"/{sink_name}" return self.api_request(method="GET", path=target) def sink_update( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, sink_name, filter_, destination, *, unique_writer_identity=False ): - """API call: update a sink resource. + """Update a sink resource. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update - - :type project: str - :param project: ID of the project containing the sink. + Args: + sink_name (str): Required. The resource name of the sink: - :type sink_name: str - :param sink_name: the name of the sink + :: - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the sink. + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" + filter_ (str): The advanced logs filter expression defining the + entries exported by the sink. + destination (str): destination URI for the entries exported by + the sink. + unique_writer_identity (Optional[bool]): determines the kind of + IAM identity returned as writer_identity in the new sink. - :type destination: str - :param destination: destination URI for the entries exported by - the sink. - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. - - :rtype: dict - :returns: The returned (updated) resource. + Returns: + dict: The returned (updated) resource. """ - target = "/projects/%s/sinks/%s" % (project, sink_name) - data = {"name": sink_name, "filter": filter_, "destination": destination} + target = f"/{sink_name}" + name = sink_name.split("/")[-1] # parse name out of full resoure name + data = {"name": name, "filter": filter_, "destination": destination} query_params = {"uniqueWriterIdentity": unique_writer_identity} return self.api_request( method="PUT", path=target, query_params=query_params, data=data ) - def sink_delete(self, project, sink_name): - """API call: delete a sink resource. + def sink_delete(self, sink_name): + """Delete a sink resource. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete + Args: + sink_name (str): Required. The full resource name of the sink to delete, + including the parent resource and the sink identifier: + + :: - :type project: str - :param project: ID of the project containing the sink. + "projects/[PROJECT_ID]/sinks/[SINK_ID]" + "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + "folders/[FOLDER_ID]/sinks/[SINK_ID]" - :type sink_name: str - :param sink_name: the name of the sink + Example: ``"projects/my-project-id/sinks/my-sink-id"``. """ - target = "/projects/%s/sinks/%s" % (project, sink_name) + target = f"/{sink_name}" self.api_request(method="DELETE", path=target) @@ -358,35 +361,31 @@ def __init__(self, client): self._client = client self.api_request = client._connection.api_request - def list_metrics(self, project, page_size=None, page_token=None): + def list_metrics(self, project, *, page_size=None, page_token=None): """List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list - :type project: str - :param project: ID of the project whose metrics are to be listed. - - :type page_size: int - :param page_size: maximum number of metrics to return, If not passed, - defaults to a value set by the API. - - :type page_token: str - :param page_token: opaque marker for the next "page" of metrics. If not - passed, the API will return the first page of - metrics. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of - :class:`~google.cloud.logging.metric.Metric` - accessible to the current API. + Args: + page_size (Optional[int]): The maximum number of sinks in each + page of results from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + page_token (Optional[str]): If present, return the next batch of sinks, using the + value, which must correspond to the ``nextPageToken`` value + returned in the previous response. Deprecated: use the ``pages`` + property ofthe returned iterator instead of manually passing the + token. + + Returns: + Iterator[google.cloud.logging_v2.metric.Metric] """ extra_params = {} if page_size is not None: extra_params["pageSize"] = page_size - path = "/projects/%s/metrics" % (project,) + path = f"/projects/{project}/metrics" return page_iterator.HTTPIterator( client=self._client, api_request=self._client._connection.api_request, @@ -397,86 +396,64 @@ def list_metrics(self, project, page_size=None, page_token=None): extra_params=extra_params, ) - def metric_create(self, project, metric_name, filter_, description=None): - """API call: create a metric resource. + def metric_create(self, project, metric_name, filter_, description): + """Create a metric resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create - :type project: str - :param project: ID of the project in which to create the metric. - - :type metric_name: str - :param metric_name: the name of the metric - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the metric. - - :type description: str - :param description: description of the metric. + Args: + project (str): ID of the project in which to create the metric. + metric_name (str): The name of the metric + filter_ (str): The advanced logs filter expression defining the + entries exported by the metric. + description (str): description of the metric. """ - target = "/projects/%s/metrics" % (project,) + target = f"/projects/{project}/metrics" data = {"name": metric_name, "filter": filter_, "description": description} self.api_request(method="POST", path=target, data=data) def metric_get(self, project, metric_name): - """API call: retrieve a metric resource. - - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get + """Retrieve a metric resource. - :type project: str - :param project: ID of the project containing the metric. + Args: + project (str): ID of the project containing the metric. + metric_name (str): The name of the metric - :type metric_name: str - :param metric_name: the name of the metric - - :rtype: dict - :returns: The JSON metric object returned from the API. + Returns: + dict: The JSON metric object returned from the API. """ - target = "/projects/%s/metrics/%s" % (project, metric_name) + target = f"/projects/{project}/metrics/{metric_name}" return self.api_request(method="GET", path=target) def metric_update(self, project, metric_name, filter_, description): - """API call: update a metric resource. + """Update a metric resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update - :type project: str - :param project: ID of the project containing the metric. - - :type metric_name: str - :param metric_name: the name of the metric - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries exported by the metric. - - :type description: str - :param description: description of the metric. - - :rtype: dict - :returns: The returned (updated) resource. + Args: + project (str): ID of the project containing the metric. + metric_name (str): the name of the metric + filter_ (str): the advanced logs filter expression defining the + entries exported by the metric. + description (str): description of the metric. + + Returns: + dict: The returned (updated) resource. """ - target = "/projects/%s/metrics/%s" % (project, metric_name) + target = f"/projects/{project}/metrics/{metric_name}" data = {"name": metric_name, "filter": filter_, "description": description} return self.api_request(method="PUT", path=target, data=data) def metric_delete(self, project, metric_name): - """API call: delete a metric resource. - - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete + """Delete a metric resource. - :type project: str - :param project: ID of the project containing the metric. - - :type metric_name: str - :param metric_name: the name of the metric. + Args: + project (str): ID of the project containing the metric. + metric_name (str): The name of the metric """ - target = "/projects/%s/metrics/%s" % (project, metric_name) + target = f"/projects/{project}/metrics/{metric_name}" self.api_request(method="DELETE", path=target) @@ -492,20 +469,17 @@ def _item_to_entry(iterator, resource, loggers): on subsequent calls. For an example, see how the method is used above in :meth:`_LoggingAPI.list_entries`. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type resource: dict - :param resource: Log entry JSON resource returned from the API. - - :type loggers: dict - :param loggers: - A mapping of logger fullnames -> loggers. If the logger - that owns the entry is not in ``loggers``, the entry - will have a newly-created logger. - - :rtype: :class:`~google.cloud.logging.entries._BaseEntry` - :returns: The next log entry in the page. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that + is currently in use. + resource (dict): Log entry JSON resource returned from the API. + loggers (Mapping[str, google.cloud.logging_v2.logger.Logger]): + A mapping of logger fullnames -> loggers. If the logger + that owns the entry is not in ``loggers``, the entry + will have a newly-created logger. + + Returns: + google.cloud.logging_v2.entries._BaseEntry: The next log entry in the page. """ return entry_from_resource(resource, iterator.client, loggers) @@ -513,14 +487,13 @@ def _item_to_entry(iterator, resource, loggers): def _item_to_sink(iterator, resource): """Convert a sink resource to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type resource: dict - :param resource: Sink JSON resource returned from the API. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that + is currently in use. + resource (dict): Sink JSON resource returned from the API. - :rtype: :class:`~google.cloud.logging.sink.Sink` - :returns: The next sink in the page. + Returns: + google.cloud.logging_v2.sink.Sink: The next sink in the page. """ return Sink.from_api_repr(resource, iterator.client) @@ -528,13 +501,13 @@ def _item_to_sink(iterator, resource): def _item_to_metric(iterator, resource): """Convert a metric resource to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. - - :type resource: dict - :param resource: Metric JSON resource returned from the API. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that + is currently in use. + resource (dict): Sink JSON resource returned from the API. - :rtype: :class:`~google.cloud.logging.metric.Metric` - :returns: The next metric in the page. + Returns: + google.cloud.logging_v2.metric.Metric: + The next metric in the page. """ return Metric.from_api_repr(resource, iterator.client) diff --git a/google/cloud/logging_v2/client.py b/google/cloud/logging_v2/client.py index 6883acee7..9633d0b7a 100644 --- a/google/cloud/logging_v2/client.py +++ b/google/cloud/logging_v2/client.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Client for interacting with the Google Stackdriver Logging API.""" +"""Client for interacting with the Google Cloud Logging API.""" import logging import os @@ -59,49 +59,7 @@ class Client(ClientWithProject): - """Client to bundle configuration needed for API requests. - - :type project: str - :param project: the project which the client acts on behalf of. - If not passed, falls back to the default inferred - from the environment. - - :type credentials: :class:`~google.auth.credentials.Credentials` - :param credentials: (Optional) The OAuth2 Credentials to use for this - client. If not passed (and if no ``_http`` object is - passed), falls back to the default inferred from the - environment. - - :type _http: :class:`~requests.Session` - :param _http: (Optional) HTTP object to make requests. Can be any object - that defines ``request()`` with the same interface as - :meth:`requests.Session.request`. If not passed, an - ``_http`` object is created that is bound to the - ``credentials`` for the current object. - This parameter should be considered private, and could - change in the future. - - :type _use_grpc: bool - :param _use_grpc: (Optional) Explicitly specifies whether - to use the gRPC transport or HTTP. If unset, - falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC`` - environment variable - This parameter should be considered private, and could - change in the future. - - :type client_info: - :class:`google.api_core.client_info.ClientInfo` or - :class:`google.api_core.gapic_v1.client_info.ClientInfo` - :param client_info: - The client info used to send a user-agent string along with API - requests. If ``None``, then default info will be used. Generally, - you only need to set this if you're developing your own library - or partner tool. - :type client_options: :class:`~google.api_core.client_options.ClientOptions` - or :class:`dict` - :param client_options: (Optional) Client options used to set user options - on the client. API Endpoint should be set through client_options. - """ + """Client to bundle configuration needed for API requests.""" _logging_api = None _sinks_api = None @@ -117,6 +75,7 @@ class Client(ClientWithProject): def __init__( self, + *, project=None, credentials=None, _http=None, @@ -124,6 +83,38 @@ def __init__( client_info=None, client_options=None, ): + """ + Args: + project (Optional[str]): the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + credentials (Optional[google.auth.credentials.Credentials]): + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + _http (Optional[requests.Session]): HTTP object to make requests. + Can be any object that defines ``request()`` with the same interface as + :meth:`requests.Session.request`. If not passed, an + ``_http`` object is created that is bound to the + ``credentials`` for the current object. + This parameter should be considered private, and could + change in the future. + _use_grpc (Optional[bool]): Explicitly specifies whether + to use the gRPC transport or HTTP. If unset, + falls back to the ``GOOGLE_CLOUD_DISABLE_GRPC`` + environment variable + This parameter should be considered private, and could + change in the future. + client_info (Optional[Union[google.api_core.client_info.ClientInfo, google.api_core.gapic_v1.client_info.ClientInfo]]): + The client info used to send a user-agent string along with API + requests. If ``None``, then default info will be used. Generally, + you only need to set this if you're developing your own library + or partner tool. + client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]): + Client options used to set user options + on the client. API Endpoint should be set through client_options. + """ super(Client, self).__init__( project=project, credentials=credentials, @@ -196,178 +187,155 @@ def metrics_api(self): def logger(self, name): """Creates a logger bound to the current client. - :type name: str - :param name: the name of the logger to be constructed. - - :rtype: :class:`google.cloud.logging.logger.Logger` - :returns: Logger created with the current client. + Args: + name (str): The name of the logger to be constructed. + + Returns: + google.cloud.logging_v2.logger.Logger: Logger created with the current client. """ return Logger(name, client=self) def list_entries( - self, - projects=None, - filter_=None, - order_by=None, - page_size=None, - page_token=None, + self, *, resource_names=None, filter_=None, order_by=None, page_size=None, page_token=None ): - """Return a page of log entries. + """Return a page of log entry resources. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list - - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the client. - - :type filter_: str - :param filter_: - a filter expression. See - https://cloud.google.com/logging/docs/view/advanced_filters - By default, a 24 hour filter is applied. - - :type order_by: str - :param order_by: One of :data:`~google.cloud.logging.ASCENDING` - or :data:`~google.cloud.logging.DESCENDING`. - - :type page_size: int - :param page_size: - Optional. The maximum number of entries in each page of results - from this request. Non-positive values are ignored. Defaults - to a sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of entries, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry` - accessible to the current client. - """ - if projects is None: - projects = [self.project] + Args: + resource_names (Sequence[str]): Names of one or more parent resources + from which to retrieve log entries: + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + + If not passed, defaults to the project bound to the API's client. + + filter_ (str): a filter expression. See + https://cloud.google.com/logging/docs/view/advanced_filters + order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` + or :data:`~google.cloud.logging_v2.DESCENDING`. + page_size (int): maximum number of entries to return, If not passed, + defaults to a value set by the API. + page_token (str): opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. + + Returns: + Iterable[LogEntry] + """ + if resource_names is None: + resource_names = [f"projects/{self.project}"] filter_ = _add_defaults_to_filter(filter_) return self.logging_api.list_entries( - projects=projects, + resource_names=resource_names, filter_=filter_, order_by=order_by, page_size=page_size, page_token=page_token, ) - def sink(self, name, filter_=None, destination=None): + def sink(self, name, *, filter_=None, destination=None): """Creates a sink bound to the current client. - :type name: str - :param name: the name of the sink to be constructed. - - :type filter_: str - :param filter_: (optional) the advanced logs filter expression - defining the entries exported by the sink. If not - passed, the instance should already exist, to be - refreshed via :meth:`Sink.reload`. - - :type destination: str - :param destination: destination URI for the entries exported by - the sink. If not passed, the instance should - already exist, to be refreshed via - :meth:`Sink.reload`. - - :rtype: :class:`google.cloud.logging.sink.Sink` - :returns: Sink created with the current client. + Args: + name (str): the name of the sink to be constructed. + filter_ (Optional[str]): the advanced logs filter expression + defining the entries exported by the sink. If not + passed, the instance should already exist, to be + refreshed via :meth:`Sink.reload`. + destination (str): destination URI for the entries exported by + the sink. If not passed, the instance should + already exist, to be refreshed via + :meth:`Sink.reload`. + + Returns: + google.cloud.logging_v2.sink.Sink: Sink created with the current client. """ - return Sink(name, filter_, destination, client=self) + return Sink(name, filter_=filter_, destination=destination, client=self) - def list_sinks(self, page_size=None, page_token=None): - """List sinks for the project associated with this client. + def list_sinks(self, *, parent=None, page_size=None, page_token=None): + """List sinks for the a parent resource. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list - :type page_size: int - :param page_size: - Optional. The maximum number of sinks in each page of results from - this request. Non-positive values are ignored. Defaults to a - sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of sinks, using the - value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing the - token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of - :class:`~google.cloud.logging.sink.Sink` - accessible to the current client. + Args: + parent (Optional[str]): The parent resource whose sinks are to be listed: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + If not passed, defaults to the project bound to the API's client. + page_size (Optional[int]): The maximum number of sinks in each + page of results from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + page_token (Optional[str]): If present, return the next batch of sinks, using the + value, which must correspond to the ``nextPageToken`` value + returned in the previous response. Deprecated: use the ``pages`` + property ofthe returned iterator instead of manually passing the + token. + + Returns: + Iterator[google.cloud.logging_v2.sink.Sink] """ - return self.sinks_api.list_sinks(self.project, page_size, page_token) + if parent is None: + parent = f"projects/{self.project}" + return self.sinks_api.list_sinks(parent=parent, page_size=page_size, page_token=page_token) - def metric(self, name, filter_=None, description=""): + def metric(self, name, *, filter_=None, description=""): """Creates a metric bound to the current client. - :type name: str - :param name: the name of the metric to be constructed. - - :type filter_: str - :param filter_: the advanced logs filter expression defining the - entries tracked by the metric. If not - passed, the instance should already exist, to be - refreshed via :meth:`Metric.reload`. - - :type description: str - :param description: the description of the metric to be constructed. - If not passed, the instance should already exist, - to be refreshed via :meth:`Metric.reload`. - - :rtype: :class:`google.cloud.logging.metric.Metric` - :returns: Metric created with the current client. + Args: + name (str): The name of the metric to be constructed. + filter_(Optional[str]): The advanced logs filter expression defining the + entries tracked by the metric. If not + passed, the instance should already exist, to be + refreshed via :meth:`Metric.reload`. + description (Optional[str]): The description of the metric to be constructed. + If not passed, the instance should already exist, + to be refreshed via :meth:`Metric.reload`. + + Returns: + google.cloud.logging.metric.Metric: Metric created with the current client. """ - return Metric(name, filter_, client=self, description=description) + return Metric(name, filter_=filter_, client=self, description=description) - def list_metrics(self, page_size=None, page_token=None): + def list_metrics(self, *, page_size=None, page_token=None): """List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list - :type page_size: int - :param page_size: - Optional. The maximum number of metrics in each page of results - from this request. Non-positive values are ignored. Defaults to a - sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of metrics, using the - value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing the - token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` - accessible to the current client. + Args: + page_size (Optional[int]): The maximum number of sinks in each + page of results from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + page_token (Optional[str]): If present, return the next batch of sinks, using the + value, which must correspond to the ``nextPageToken`` value + returned in the previous response. Deprecated: use the ``pages`` + property ofthe returned iterator instead of manually passing the + token. + + Returns: + Iterator[google.cloud.logging_v2.metric.Metric] """ - return self.metrics_api.list_metrics(self.project, page_size, page_token) + return self.metrics_api.list_metrics(self.project, page_size=page_size, page_token=page_token) def get_default_handler(self, **kw): """Return the default logging handler based on the local environment. - :type kw: dict - :param kw: keyword args passed to handler constructor + Args: + kw (dict): keyword args passed to handler constructor - :rtype: :class:`logging.Handler` - :returns: The default log handler based on the environment + Returns: + logging.Handler: The default log handler based on the environment """ gke_cluster_name = retrieve_metadata_server(_GKE_CLUSTER_NAME) @@ -382,27 +350,24 @@ def get_default_handler(self, **kw): return CloudLoggingHandler(self, **kw) def setup_logging( - self, log_level=logging.INFO, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, **kw + self, *, log_level=logging.INFO, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, **kw ): - """Attach default Stackdriver logging handler to the root logger. + """Attach default Cloud Logging handler to the root logger. This method uses the default log handler, obtained by :meth:`~get_default_handler`, and attaches it to the root Python logger, so that a call such as ``logging.warn``, as well as all child - loggers, will report to Stackdriver logging. - - :type log_level: int - :param log_level: (Optional) Python logging log level. Defaults to - :const:`logging.INFO`. - - :type excluded_loggers: tuple - :param excluded_loggers: (Optional) The loggers to not attach the - handler to. This will always include the - loggers in the path of the logging client - itself. - - :type kw: dict - :param kw: keyword args passed to handler constructor + loggers, will report to Cloud Logging. + + Args: + log_level (Optional[int]): Python logging log level. Defaults to + :const:`logging.INFO`. + excluded_loggers (Optional[Tuple[]]): The loggers to not attach the + handler to. This will always include the + loggers in the path of the logging client + itself. + Returns: + dict: keyword args passed to handler constructor """ handler = self.get_default_handler(**kw) setup_logging(handler, log_level=log_level, excluded_loggers=excluded_loggers) diff --git a/google/cloud/logging_v2/entries.py b/google/cloud/logging_v2/entries.py index 6d3b0eef3..d2faf0d04 100644 --- a/google/cloud/logging_v2/entries.py +++ b/google/cloud/logging_v2/entries.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Log entries within the Google Stackdriver Logging API.""" +"""Log entries within the Google Cloud Logging API.""" import collections import json @@ -45,14 +45,15 @@ def logger_name_from_path(path): """Validate a logger URI path and get the logger name. - :type path: str - :param path: URI path for a logger API request. + Args: + path (str): URI path for a logger API request - :rtype: str - :returns: Logger name parsed from ``path``. - :raises: :class:`ValueError` if the ``path`` is ill-formed or if - the project from the ``path`` does not agree with the - ``project`` passed in. + Returns: + str: Logger name parsed from ``path``. + + Raises: + ValueError: If the ``path`` is ill-formed of if the project + from ``path`` does not agree with the ``project`` passed in. """ return _name_from_project_path(path, None, _LOGGER_TEMPLATE) @@ -91,50 +92,28 @@ def _int_or_none(value): _LOG_ENTRY_PARAM_DOCSTRING = """\ - :type log_name: str - :param log_name: the name of the logger used to post the entry. - - :type labels: dict - :param labels: (optional) mapping of labels for the entry - - :type insert_id: text - :param insert_id: (optional) the ID used to identify an entry uniquely. - - :type severity: str - :param severity: (optional) severity of event being logged. - - :type http_request: dict - :param http_request: (optional) info about HTTP request associated with - the entry. - - :type timestamp: :class:`datetime.datetime` - :param timestamp: (optional) timestamp for the entry - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry - - :type trace: str - :param trace: (optional) traceid to apply to the entry. - - :type span_id: str - :param span_id: (optional) span_id within the trace for the log entry. - Specify the trace parameter if span_id is set. - - :type trace_sampled: bool - :param trace_sampled: (optional) the sampling decision of the trace - associated with the log entry. - - :type source_location: dict - :param source_location: (optional) location in source code from which - the entry was emitted. - - :type operation: dict - :param operation: (optional) additional information about a potentially - long-running operation associated with the log entry. - - :type logger: :class:`google.cloud.logging.logger.Logger` - :param logger: the logger used to write the entry. - + Args: + log_name (str): The name of the logger used to post the entry. + labels (Optional[dict]): Mapping of labels for the entry + insert_id (Optional[str]): The ID used to identify an entry + uniquely. + severity (Optional[str]): The severity of the event being logged. + http_request (Optional[dict]): Info about HTTP request associated + with the entry. + timestamp (Optional[datetime.datetime]): Timestamp for the entry. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the entry. + trace (Optional[str]): Trace ID to apply to the entry. + span_id (Optional[str]): Span ID within the trace for the log + entry. Specify the trace parameter if ``span_id`` is set. + trace_sampled (Optional[bool]): The sampling decision of the trace + associated with the log entry. + source_location (Optional[dict]): Location in source code from which + the entry was emitted. + operation (Optional[dict]): Additional information about a potentially + long-running operation associated with the log entry. + logger (google.cloud.logging_v2.logger.Logger): the logger used + to write the entry. """ _LOG_ENTRY_SEE_ALSO_DOCSTRING = """\ @@ -162,24 +141,20 @@ def _extract_payload(cls, resource): return None @classmethod - def from_api_repr(cls, resource, client, loggers=None): - """Factory: construct an entry given its API representation - - :type resource: dict - :param resource: text entry resource representation returned from - the API - - :type client: :class:`google.cloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration. - - :type loggers: dict - :param loggers: - (Optional) A mapping of logger fullnames -> loggers. If not - passed, the entry will have a newly-created logger. - - :rtype: :class:`google.cloud.logging.entries.LogEntry` - :returns: Log entry parsed from ``resource``. + def from_api_repr(cls, resource, client, *, loggers=None): + """Construct an entry given its API representation + + Args: + resource (dict): text entry resource representation returned from + the API + client (google.cloud.logging_v2.client.Client): + Client which holds credentials and project configuration. + loggers (Optional[dict]): + A mapping of logger fullnames -> loggers. If not + passed, the entry will have a newly-created logger. + + Returns: + google.cloud.logging.entries.LogEntry: Log entry parsed from ``resource``. """ if loggers is None: loggers = {} @@ -272,8 +247,7 @@ class TextEntry(LogEntry): + _LOG_ENTRY_PARAM_DOCSTRING + """ - :type payload: str | unicode - :param payload: payload for the log entry. + payload (str): payload for the log entry. """ + _LOG_ENTRY_SEE_ALSO_DOCSTRING ) @@ -299,8 +273,7 @@ class StructEntry(LogEntry): + _LOG_ENTRY_PARAM_DOCSTRING + """ - :type payload: dict - :param payload: payload for the log entry. + payload (dict): payload for the log entry. """ + _LOG_ENTRY_SEE_ALSO_DOCSTRING ) @@ -326,8 +299,7 @@ class ProtobufEntry(LogEntry): + _LOG_ENTRY_PARAM_DOCSTRING + """ - :type payload: protobuf message - :param payload: payload for the log entry. + payload (google.protobuf.Message): payload for the log entry. """ + _LOG_ENTRY_SEE_ALSO_DOCSTRING ) @@ -358,8 +330,8 @@ def parse_message(self, message): Mutates the passed-in ``message`` in place. - :type message: Protobuf message - :param message: the message to be logged + Args: + message (google.protobuf.Message): the message to be logged """ # NOTE: This assumes that ``payload`` is already a deserialized # ``Any`` field and ``message`` has come from an imported diff --git a/google/cloud/logging_v2/handlers/app_engine.py b/google/cloud/logging_v2/handlers/app_engine.py index ceef46f59..a269b3e61 100644 --- a/google/cloud/logging_v2/handlers/app_engine.py +++ b/google/cloud/logging_v2/handlers/app_engine.py @@ -14,7 +14,7 @@ """Logging handler for App Engine Flexible -Sends logs to the Stackdriver Logging API with the appropriate resource +Sends logs to the Cloud Logging API with the appropriate resource and labels for App Engine logs. """ diff --git a/google/cloud/logging_v2/handlers/container_engine.py b/google/cloud/logging_v2/handlers/container_engine.py index 823de0285..f2356fe96 100644 --- a/google/cloud/logging_v2/handlers/container_engine.py +++ b/google/cloud/logging_v2/handlers/container_engine.py @@ -31,7 +31,7 @@ class ContainerEngineHandler(logging.StreamHandler): (GKE) fluentd plugin, so that metadata such as log level are properly set. :type name: str - :param name: (optional) the name of the custom log in Stackdriver Logging. + :param name: (optional) the name of the custom log in Cloud Logging. :type stream: file-like object :param stream: (optional) stream to be used by the handler. diff --git a/google/cloud/logging_v2/handlers/handlers.py b/google/cloud/logging_v2/handlers/handlers.py index 3721f9050..e7c781d05 100644 --- a/google/cloud/logging_v2/handlers/handlers.py +++ b/google/cloud/logging_v2/handlers/handlers.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Python :mod:`logging` handlers for Stackdriver Logging.""" +"""Python :mod:`logging` handlers for Cloud Logging.""" import logging @@ -25,7 +25,7 @@ class CloudLoggingHandler(logging.StreamHandler): - """Handler that directly makes Stackdriver logging API calls. + """Handler that directly makes Cloud Logging API calls. This is a Python standard ``logging`` handler using that can be used to route Python standard logging messages directly to the Stackdriver @@ -40,7 +40,7 @@ class CloudLoggingHandler(logging.StreamHandler): handler to use :type name: str - :param name: the name of the custom log in Stackdriver Logging. Defaults + :param name: the name of the custom log in Cloud Logging. Defaults to 'python'. The name of the Python logger will be represented in the ``python_logger`` field. diff --git a/google/cloud/logging_v2/handlers/middleware/request.py b/google/cloud/logging_v2/handlers/middleware/request.py index 0229e4c8e..da361b967 100644 --- a/google/cloud/logging_v2/handlers/middleware/request.py +++ b/google/cloud/logging_v2/handlers/middleware/request.py @@ -27,8 +27,8 @@ def _get_django_request(): """Get Django request from thread local. - :rtype: str - :returns: Django request. + Returns: + str: Django request """ return getattr(_thread_locals, "request", None) @@ -42,13 +42,14 @@ def _get_django_request(): class RequestMiddleware(MiddlewareMixin): """Saves the request in thread local""" - def __init__(self, get_response=None): + def __init__(self, *, get_response=None): self.get_response = get_response def process_request(self, request): """Called on each request, before Django decides which view to execute. - :type request: :class:`~django.http.request.HttpRequest` - :param request: Django http request. + Args: + request(django.http.request.HttpRequest): + Django http request. """ _thread_locals.request = request diff --git a/google/cloud/logging_v2/handlers/transports/background_thread.py b/google/cloud/logging_v2/handlers/transports/background_thread.py index 406c6c258..986f88f43 100644 --- a/google/cloud/logging_v2/handlers/transports/background_thread.py +++ b/google/cloud/logging_v2/handlers/transports/background_thread.py @@ -14,7 +14,7 @@ """Transport for Python logging handler -Uses a background worker to log to Stackdriver Logging asynchronously. +Uses a background worker to log to Cloud Logging asynchronously. """ from __future__ import print_function @@ -218,7 +218,7 @@ def _main_thread_terminated(self): if not self._queue.empty(): print( "Program shutting down, attempting to send %d queued log " - "entries to Stackdriver Logging..." % (self._queue.qsize(),), + "entries to Cloud Logging..." % (self._queue.qsize(),), file=sys.stderr, ) diff --git a/google/cloud/logging_v2/handlers/transports/sync.py b/google/cloud/logging_v2/handlers/transports/sync.py index 1e73c7a8e..f8e4a05bd 100644 --- a/google/cloud/logging_v2/handlers/transports/sync.py +++ b/google/cloud/logging_v2/handlers/transports/sync.py @@ -14,7 +14,7 @@ """Transport for Python logging handler. -Logs directly to the the Stackdriver Logging API with a synchronous call. +Logs directly to the the Cloud Logging API with a synchronous call. """ from google.cloud.logging_v2 import _helpers @@ -31,22 +31,18 @@ def __init__(self, client, name): self.logger = client.logger(name) def send( - self, record, message, resource=None, labels=None, trace=None, span_id=None + self, record, message, *, resource=None, labels=None, trace=None, span_id=None ): """Overrides transport.send(). - :type record: :class:`logging.LogRecord` - :param record: Python log record that the handler was called with. - - :type message: str - :param message: The message from the ``LogRecord`` after being - formatted by the associated log formatters. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry. - - :type labels: dict - :param labels: (Optional) Mapping of labels for the entry. + Args: + record (logging.LogRecord): + Python log record that the handler was called with. + message (str): The message from the ``LogRecord`` after being + formatted by the associated log formatters. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the entry. + labels (Optional[dict]): Mapping of labels for the entry. """ info = {"message": message, "python_logger": record.name} self.logger.log_struct( diff --git a/google/cloud/logging_v2/logger.py b/google/cloud/logging_v2/logger.py index 8621558b3..d60e5f58e 100644 --- a/google/cloud/logging_v2/logger.py +++ b/google/cloud/logging_v2/logger.py @@ -43,24 +43,23 @@ class Logger(object): - """Loggers represent named targets for log entries. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs + def __init__(self, name, client, *, labels=None): + """Loggers represent named targets for log entries. - :type name: str - :param name: the name of the logger + See + https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs - :type client: :class:`google.cloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the logger (which requires a project). + Args: + name (str): The name of the logger. + client (google.cloud.logging_v2.client.Client): + A client which holds credentials and project configuration + for the logger (which requires a project). + labels (Optional[dict]): Mapping of default labels for entries written + via this logger. - :type labels: dict - :param labels: (optional) mapping of default labels for entries written - via this logger. - """ - def __init__(self, name, client, labels=None): + """ self.name = name self._client = client self.labels = labels @@ -78,38 +77,39 @@ def project(self): @property def full_name(self): """Fully-qualified name used in logging APIs""" - return "projects/%s/logs/%s" % (self.project, self.name) + return f"projects/{self.project}/logs/{self.name}" @property def path(self): """URI path for use in logging APIs""" - return "/%s" % (self.full_name,) + return f"/{self.full_name}" def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :rtype: :class:`google.cloud.logging.client.Client` - :returns: The client passed in or the currently bound client. + """Check client or verify over-ride. Also sets ``parent``. + + Args: + client (Union[None, google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + Returns: + google.cloud.logging_v2.client.Client: The client passed in + or the currently bound client. """ if client is None: client = self._client return client - def batch(self, client=None): + def batch(self, *, client=None): """Return a batch to use as a context manager. - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current topic. + Args: + client (Union[None, google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. - :rtype: :class:`Batch` - :returns: A batch to use as a context manager. + Returns: + Batch: A batch to use as a context manager. """ client = self._require_client(client) return Batch(self, client) @@ -131,100 +131,103 @@ def _do_log(self, client, _entry_class, payload=None, **kw): api_repr = entry.to_api_repr() client.logging_api.write_entries([api_repr]) - def log_empty(self, client=None, **kw): - """API call: log an empty message via a POST request + def log_empty(self, *, client=None, **kw): + """Log an empty message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + kw (Optional[dict]): additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self._do_log(client, LogEntry, **kw) - def log_text(self, text, client=None, **kw): - """API call: log a text message via a POST request + def log_text(self, text, *, client=None, **kw): + """Log a text message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write - :type text: str - :param text: the log message. - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + text (str): the log message + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + kw (Optional[dict]): additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self._do_log(client, TextEntry, text, **kw) - def log_struct(self, info, client=None, **kw): - """API call: log a structured message via a POST request + def log_struct(self, info, *, client=None, **kw): + """Log a structured message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write - :type info: dict - :param info: the log entry information - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + info (dict): the log entry information + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + kw (Optional[dict]): additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self._do_log(client, StructEntry, info, **kw) - def log_proto(self, message, client=None, **kw): - """API call: log a protobuf message via a POST request + def log_proto(self, message, *, client=None, **kw): + """Log a protobuf message via a POST request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list - :type message: :class:`~google.protobuf.message.Message` - :param message: The protobuf message to be logged. - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + message (google.protobuf.message.Message): + The protobuf message to be logged. + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + kw (Optional[dict]): additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self._do_log(client, ProtobufEntry, message, **kw) - def delete(self, client=None): - """API call: delete all entries in a logger via a DELETE request + def delete(self, logger_name=None, client=None): + """Delete all entries in a logger via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current logger. + Args: + logger_name (Optional[str]): The resource name of the log to delete: + + :: + + "projects/[PROJECT_ID]/logs/[LOG_ID]" + "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + "folders/[FOLDER_ID]/logs/[LOG_ID]" + + ``[LOG_ID]`` must be URL-encoded. For example, + ``"projects/my-project-id/logs/syslog"``, + ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. + If not passed, defaults to the project bound to the client. + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current logger. """ client = self._require_client(client) - client.logging_api.logger_delete(self.project, self.name) + if logger_name is None: + logger_name = self.full_name + client.logging_api.logger_delete(logger_name) def list_entries( self, - projects=None, + *, + resource_names=None, filter_=None, order_by=None, page_size=None, @@ -235,46 +238,48 @@ def list_entries( See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list - :type projects: list of strings - :param projects: project IDs to include. If not passed, - defaults to the project bound to the client. - - :type filter_: str - :param filter_: - a filter expression. See - https://cloud.google.com/logging/docs/view/advanced_filters - By default, a 24 hour filter is applied. - - :type order_by: str - :param order_by: One of :data:`~google.cloud.logging.ASCENDING` - or :data:`~google.cloud.logging.DESCENDING`. - - :type page_size: int - :param page_size: - Optional. The maximum number of entries in each page of results - from this request. Non-positive values are ignored. Defaults - to a sensible value set by the API. - - :type page_token: str - :param page_token: - Optional. If present, return the next batch of entries, using - the value, which must correspond to the ``nextPageToken`` value - returned in the previous response. Deprecated: use the ``pages`` - property of the returned iterator instead of manually passing - the token. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of log entries accessible to the current logger. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + resource_names (Optional[Sequence[str]]): Names of one or more parent resources + from which to retrieve log entries: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + If not passed, defaults to the project bound to the client. + filter_ (Optional[str]): a filter expression. See + https://cloud.google.com/logging/docs/view/advanced_filters + By default, a 24 hour filter is applied. + order_by (Optional[str]): One of :data:`~google.cloud.logging_v2.ASCENDING` + or :data:`~google.cloud.logging_v2.DESCENDING`. + page_size (Optional[int]): + Optional. The maximum number of entries in each page of results + from this request. Non-positive values are ignored. Defaults + to a sensible value set by the API. + page_token (Optional[str]): + Optional. If present, return the next batch of entries, using + the value, which must correspond to the ``nextPageToken`` value + returned in the previous response. Deprecated: use the ``pages`` + property of the returned iterator instead of manually passing + the token. + + Returns: + Iterator[google.cloud.logging_v2.entries.LogEntry] """ - log_filter = "logName=%s" % (self.full_name,) + + if resource_names is None: + resource_names = [f"projects/{self.project}"] + + log_filter = f"logName={self.full_name}" if filter_ is not None: - filter_ = "%s AND %s" % (filter_, log_filter) + filter_ = f"{filter_} AND {log_filter}" else: filter_ = log_filter filter_ = _add_defaults_to_filter(filter_) return self.client.list_entries( - projects=projects, + resource_names=resource_names, filter_=filter_, order_by=order_by, page_size=page_size, @@ -283,27 +288,26 @@ def list_entries( class Batch(object): - """Context manager: collect entries to log via a single API call. - - Helper returned by :meth:`Logger.batch` - - :type logger: :class:`google.cloud.logging.logger.Logger` - :param logger: the logger to which entries will be logged. - - :type client: :class:`google.cloud.logging.client.Client` - :param client: The client to use. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the batch, defaults - to None, which requires that every entry should have a - resource specified. Since the methods used to write - entries default the entry's resource to the global - resource type, this parameter is only required - if explicitly set to None. If no entries' resource are - set to None, this parameter will be ignored on the server. - """ def __init__(self, logger, client, resource=None): + """Context manager: collect entries to log via a single API call. + + Helper returned by :meth:`Logger.batch` + + Args: + logger (google.cloud.logging_v2.logger.Logger): + the logger to which entries will be logged. + client (google.cloud.logging_V2.client.Cilent): + The client to use. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the batch, defaults + to None, which requires that every entry should have a + resource specified. Since the methods used to write + entries default the entry's resource to the global + resource type, this parameter is only required + if explicitly set to None. If no entries' resource are + set to None, this parameter will be ignored on the server. + """ self.logger = logger self.entries = [] self.client = client @@ -319,55 +323,49 @@ def __exit__(self, exc_type, exc_val, exc_tb): def log_empty(self, **kw): """Add a entry without payload to be logged during :meth:`commit`. - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + kw (Optional[dict]): Additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self.entries.append(LogEntry(**kw)) def log_text(self, text, **kw): """Add a text entry to be logged during :meth:`commit`. - :type text: str - :param text: the text entry - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + text (str): the text entry + kw (Optional[dict]): Additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self.entries.append(TextEntry(payload=text, **kw)) def log_struct(self, info, **kw): """Add a struct entry to be logged during :meth:`commit`. - :type info: dict - :param info: the struct entry - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + info (dict): The struct entry, + kw (Optional[dict]): Additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self.entries.append(StructEntry(payload=info, **kw)) def log_proto(self, message, **kw): """Add a protobuf entry to be logged during :meth:`commit`. - :type message: protobuf message - :param message: the protobuf entry - - :type kw: dict - :param kw: (optional) additional keyword arguments for the entry. - See :class:`~google.cloud.logging.entries.LogEntry`. + Args: + message (google.protobuf.Message): The protobuf entry. + kw (Optional[dict]): Additional keyword arguments for the entry. + See :class:`~google.cloud.logging_v2.entries.LogEntry`. """ self.entries.append(ProtobufEntry(payload=message, **kw)) def commit(self, client=None): """Send saved log entries as a single API call. - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current batch. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current batch. """ if client is None: client = self.client diff --git a/google/cloud/logging_v2/metric.py b/google/cloud/logging_v2/metric.py index 3fb91bb52..fd3e3b62c 100644 --- a/google/cloud/logging_v2/metric.py +++ b/google/cloud/logging_v2/metric.py @@ -12,34 +12,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Define Stackdriver Logging API Metrics.""" +"""Define Cloud Logging API Metrics.""" from google.cloud.exceptions import NotFound class Metric(object): - """Metrics represent named filters for log entries. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics + def __init__(self, name, *, filter_=None, client=None, description=""): + """Metrics represent named filters for log entries. - :type name: str - :param name: the name of the metric + See + https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics - :type filter_: str - :param filter_: the advanced logs filter expression defining the entries + Args: + name (str): The name of the metric. + filter_ (str): the advanced logs filter expression defining the entries tracked by the metric. If not passed, the instance should already exist, to be refreshed via :meth:`reload`. + client (Optional[google.cloud.logging_v2.client.Client]): A client which holds + credentials and project configuration for the sink (which requires a project). + description (Optional[str]): An optional description of the metric. - :type client: :class:`google.cloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the metric (which requires a project). - - :type description: str - :param description: an optional description of the metric. - """ - - def __init__(self, name, filter_=None, client=None, description=""): + """ self.name = name self._client = client self.filter_ = filter_ @@ -58,76 +53,75 @@ def project(self): @property def full_name(self): """Fully-qualified name used in metric APIs""" - return "projects/%s/metrics/%s" % (self.project, self.name) + return f"projects/{self.project}/metrics/{self.name}" @property def path(self): """URL path for the metric's APIs""" - return "/%s" % (self.full_name,) + return f"/{self.full_name}" @classmethod def from_api_repr(cls, resource, client): - """Factory: construct a metric given its API representation + """Construct a metric given its API representation - :type resource: dict - :param resource: metric resource representation returned from the API + Args: + resource (dict): metric resource representation returned from the API + client (google.cloud.logging_v2.client.Client): Client which holds + credentials and project configuration for the sink. - :type client: :class:`google.cloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration for the metric. - - :rtype: :class:`google.cloud.logging.metric.Metric` - :returns: Metric parsed from ``resource``. + Returns: + google.cloud.logging_v2.metric.Metric """ metric_name = resource["name"] filter_ = resource["filter"] description = resource.get("description", "") - return cls(metric_name, filter_, client=client, description=description) + return cls(metric_name, filter_=filter_, client=client, description=description) def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. - - :rtype: :class:`google.cloud.logging.client.Client` - :returns: The client passed in or the currently bound client. + """Check client or verify over-ride. Also sets ``parent``. + + Args: + client (Union[None, google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + Returns: + google.cloud.logging_v2.client.Client: The client passed in + or the currently bound client. """ if client is None: client = self._client return client - def create(self, client=None): - """API call: create the metric via a PUT request + def create(self, *, client=None): + """Create the metric via a PUT request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) client.metrics_api.metric_create( self.project, self.name, self.filter_, self.description ) - def exists(self, client=None): - """API call: test for the existence of the metric via a GET request + def exists(self, *, client=None): + """Test for the existence of the metric via a GET request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. - :rtype: bool - :returns: Boolean indicating existence of the metric. + Returns: + bool: Boolean indicating existence of the metric. """ client = self._require_client(client) @@ -138,48 +132,48 @@ def exists(self, client=None): else: return True - def reload(self, client=None): + def reload(self,*, client=None): """API call: sync local metric configuration via a GET request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) data = client.metrics_api.metric_get(self.project, self.name) self.description = data.get("description", "") self.filter_ = data["filter"] - def update(self, client=None): + def update(self, *, client=None): """API call: update metric configuration via a PUT request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) client.metrics_api.metric_update( self.project, self.name, self.filter_, self.description ) - def delete(self, client=None): + def delete(self, *, client=None): """API call: delete a metric via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current metric. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) client.metrics_api.metric_delete(self.project, self.name) diff --git a/google/cloud/logging_v2/resource.py b/google/cloud/logging_v2/resource.py index dda59ca09..637b795f7 100644 --- a/google/cloud/logging_v2/resource.py +++ b/google/cloud/logging_v2/resource.py @@ -34,20 +34,20 @@ class Resource(collections.namedtuple("Resource", "type labels")): def _from_dict(cls, info): """Construct a resource object from the parsed JSON representation. - :type info: dict - :param info: - A ``dict`` parsed from the JSON wire-format representation. + Args: + info (dict): A ``dict`` parsed from the JSON wire-format representation. - :rtype: :class:`Resource` - :returns: A resource object. + Returns: + Resource: A resource object. """ return cls(type=info["type"], labels=info.get("labels", {})) def _to_dict(self): """Build a dictionary ready to be serialized to the JSON format. - :rtype: dict - :returns: A dict representation of the object that can be written to - the API. + Returns: + dict: + A dict representation of the object that can be written to + the API. """ return {"type": self.type, "labels": self.labels} diff --git a/google/cloud/logging_v2/sink.py b/google/cloud/logging_v2/sink.py index 2a7d46fdb..caa243746 100644 --- a/google/cloud/logging_v2/sink.py +++ b/google/cloud/logging_v2/sink.py @@ -12,39 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Define Stackdriver Logging API Sinks.""" +"""Define Cloud Logging API Sinks.""" from google.cloud.exceptions import NotFound class Sink(object): - """Sinks represent filtered exports for log entries. - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks + def __init__(self, name, *, filter_=None, parent=None, destination=None, client=None): + """Sinks represent filtered exports for log entries. - :type name: str - :param name: the name of the sink - - :type filter_: str - :param filter_: (optional) the advanced logs filter expression defining - the entries exported by the sink. - - :type destination: str - :param destination: destination URI for the entries exported by the sink. - If not passed, the instance should already exist, to - be refreshed via :meth:`reload`. - - :type client: :class:`google.cloud.logging.client.Client` - :param client: A client which holds credentials and project configuration - for the sink (which requires a project). - """ - - def __init__(self, name, filter_=None, destination=None, client=None): + See + https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks + + Args: + name (str): The name of the sink. + parent(Optional[str]): The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + Defaults to the project stored on the client. + filter_ (Optional[str]): The advanced logs filter expression defining + the entries exported by the sink. + destination (Optional[str]): Destination URI for the entries exported by the sink. + If not passed, the instance should already exist, to + be refreshed via :meth:`reload`. + client (Optional[google.cloud.logging_v2.client.Client]): A client which holds + credentials and project configuration for the sink (which requires a project). + """ self.name = name self.filter_ = filter_ self.destination = destination self._client = client + self._parent = parent self._writer_identity = None @property @@ -53,19 +57,21 @@ def client(self): return self._client @property - def project(self): - """Project bound to the sink.""" - return self._client.project - + def parent(self): + """Parent resource of the sink (project, organization, billingAccount, or folder).""" + if self._parent is None: + self._parent = f"projects/{self.client.project}" + return self._parent + @property def full_name(self): """Fully-qualified name used in sink APIs""" - return "projects/%s/sinks/%s" % (self.project, self.name) + return f"{self.parent}/sinks/{self.name}" @property def path(self): """URL path for the sink's APIs""" - return "/%s" % (self.full_name) + return f"/{self.full_name}" @property def writer_identity(self): @@ -79,61 +85,68 @@ def _update_from_api_repr(self, resource): self._writer_identity = resource.get("writerIdentity") @classmethod - def from_api_repr(cls, resource, client): - """Factory: construct a sink given its API representation - - :type resource: dict - :param resource: sink resource representation returned from the API - - :type client: :class:`google.cloud.logging.client.Client` - :param client: Client which holds credentials and project - configuration for the sink. - - :rtype: :class:`google.cloud.logging.sink.Sink` - :returns: Sink parsed from ``resource``. - :raises: :class:`ValueError` if ``client`` is not ``None`` and the - project from the resource does not agree with the project - from the client. + def from_api_repr(cls, resource, client, *, parent=None): + """Construct a sink given its API representation + + Args: + resource (dict): sink resource representation returned from the API + client (google.cloud.logging_v2.client.Client): Client which holds + credentials and project configuration for the sink. + parent(Optional[str]): The resource in which to create the sink: + + :: + + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]". + Defaults to the project stored on the client. + + Returns: + google.cloud.logging_v2.sink.Sink: Sink parsed from ``resource``. + + Raises: + ValueError: if ``client`` is not ``None`` and the + project from the resource does not agree with the project + from the client. """ sink_name = resource["name"] - instance = cls(sink_name, client=client) + instance = cls(sink_name, client=client, parent=parent) instance._update_from_api_repr(resource) return instance def _require_client(self, client): - """Check client or verify over-ride. - - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - - :rtype: :class:`google.cloud.logging.client.Client` - :returns: The client passed in or the currently bound client. + """Check client or verify over-ride. Also sets ``parent``. + + Args: + client (Union[None, google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + Returns: + google.cloud.logging_v2.client.Client: The client passed in + or the currently bound client. """ if client is None: client = self._client return client - def create(self, client=None, unique_writer_identity=False): - """API call: create the sink via a PUT request + def create(self, *, client=None, unique_writer_identity=False): + """Create the sink via a PUT request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + unique_writer_identity (Optional[bool]): Determines the kind of + IAM identity returned as writer_identity in the new sink. """ client = self._require_client(client) resource = client.sinks_api.sink_create( - self.project, + self.parent, self.name, self.filter_, self.destination, @@ -141,80 +154,76 @@ def create(self, client=None, unique_writer_identity=False): ) self._update_from_api_repr(resource) - def exists(self, client=None): - """API call: test for the existence of the sink via a GET request + def exists(self, *, client=None): + """Test for the existence of the sink via a GET request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - - :rtype: bool - :returns: Boolean indicating existence of the sink. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + Returns: + bool: Boolean indicating existence of the sink. """ client = self._require_client(client) try: - client.sinks_api.sink_get(self.project, self.name) + client.sinks_api.sink_get(self.full_name) except NotFound: return False else: return True - def reload(self, client=None): - """API call: sync local sink configuration via a GET request + def reload(self, *, client=None): + """Sync local sink configuration via a GET request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) - resource = client.sinks_api.sink_get(self.project, self.name) + resource = client.sinks_api.sink_get(self.full_name) self._update_from_api_repr(resource) - def update(self, client=None, unique_writer_identity=False): - """API call: update sink configuration via a PUT request + def update(self, *, client=None, unique_writer_identity=False): + """Update sink configuration via a PUT request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. - - :type unique_writer_identity: bool - :param unique_writer_identity: (Optional) determines the kind of - IAM identity returned as - writer_identity in the new sink. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. + unique_writer_identity (Optional[bool]): Determines the kind of + IAM identity returned as writer_identity in the new sink. """ client = self._require_client(client) resource = client.sinks_api.sink_update( - self.project, - self.name, + self.full_name, self.filter_, self.destination, unique_writer_identity=unique_writer_identity, ) self._update_from_api_repr(resource) - def delete(self, client=None): - """API call: delete a sink via a DELETE request + def delete(self, *, client=None): + """Delete a sink via a DELETE request See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete - :type client: :class:`~google.cloud.logging.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current sink. + Args: + client (Optional[google.cloud.logging_v2.client.Client]): + The client to use. If not passed, falls back to the + ``client`` stored on the current sink. """ client = self._require_client(client) - client.sinks_api.sink_delete(self.project, self.name) + client.sinks_api.sink_delete(self.full_name) diff --git a/samples/snippets/export.py b/samples/snippets/export.py index f7606ba6c..63e31abb3 100644 --- a/samples/snippets/export.py +++ b/samples/snippets/export.py @@ -55,8 +55,9 @@ def create_sink(sink_name, destination_bucket, filter_): sink = logging_client.sink( sink_name, - filter_, - destination) + filter_=filter_, + destination=destination + ) if sink.exists(): print('Sink {} already exists.'.format(sink.name)) diff --git a/samples/snippets/export_test.py b/samples/snippets/export_test.py index b787c066a..aea9889dc 100644 --- a/samples/snippets/export_test.py +++ b/samples/snippets/export_test.py @@ -40,8 +40,8 @@ def example_sink(): sink = client.sink( TEST_SINK_NAME_TMPL.format(_random_id()), - TEST_SINK_FILTER, - 'storage.googleapis.com/{bucket}'.format(bucket=BUCKET)) + filter_=TEST_SINK_FILTER, + destination='storage.googleapis.com/{bucket}'.format(bucket=BUCKET)) sink.create() diff --git a/tests/system/gapic/v2/test_system_logging_service_v2_v2.py b/tests/system/gapic/v2/test_system_logging_service_v2_v2.py index 6373f70b0..dfd81a5e8 100644 --- a/tests/system/gapic/v2/test_system_logging_service_v2_v2.py +++ b/tests/system/gapic/v2/test_system_logging_service_v2_v2.py @@ -18,19 +18,17 @@ import google.auth from google.api import monitored_resource_pb2 from google.cloud import logging_v2 -from google.cloud.logging_v2.proto import log_entry_pb2 -from google.cloud.logging_v2.proto import logging_pb2 class TestSystemLoggingServiceV2(object): def test_write_log_entries(self): _, project_id = google.auth.default() - client = logging_v2.gapic.logging_service_v2_client.LoggingServiceV2Client() + client = logging_v2.services.logging_service_v2.LoggingServiceV2Client() log_name = client.log_path(project_id, "test-{0}".format(time.time())) resource = {} labels = {} entries = [] response = client.write_log_entries( - entries, log_name=log_name, resource=resource, labels=labels + entries=entries, log_name=log_name, resource=resource, labels=labels ) diff --git a/tests/system/test_system.py b/tests/system/test_system.py index 0dbf5edce..56a3dd7fc 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -23,8 +23,8 @@ from google.api_core.exceptions import ResourceExhausted from google.api_core.exceptions import RetryError from google.api_core.exceptions import ServiceUnavailable -from google.cloud._helpers import UTC import google.cloud.logging +from google.cloud._helpers import UTC from google.cloud.logging_v2.handlers.handlers import CloudLoggingHandler from google.cloud.logging_v2.handlers.transports import SyncTransport from google.cloud.logging_v2 import client @@ -318,7 +318,7 @@ def test_log_root_handler(self): def test_create_metric(self): METRIC_NAME = "test-create-metric%s" % (_RESOURCE_ID,) - metric = Config.CLIENT.metric(METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) retry = RetryErrors(Conflict) @@ -329,7 +329,7 @@ def test_create_metric(self): def test_list_metrics(self): METRIC_NAME = "test-list-metrics%s" % (_RESOURCE_ID,) - metric = Config.CLIENT.metric(METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) before_metrics = list(Config.CLIENT.list_metrics()) before_names = set(before.name for before in before_metrics) @@ -347,7 +347,7 @@ def test_list_metrics(self): def test_reload_metric(self): METRIC_NAME = "test-reload-metric%s" % (_RESOURCE_ID,) retry = RetryErrors(Conflict) - metric = Config.CLIENT.metric(METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) retry(metric.create)() self.to_delete.append(metric) @@ -364,7 +364,7 @@ def test_update_metric(self): retry = RetryErrors(Conflict) NEW_FILTER = "logName:other" NEW_DESCRIPTION = "updated" - metric = Config.CLIENT.metric(METRIC_NAME, DEFAULT_FILTER, DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) self.assertFalse(metric.exists()) retry(metric.create)() self.to_delete.append(metric) @@ -405,7 +405,7 @@ def test_create_sink_storage_bucket(self): SINK_NAME = "test-create-sink-bucket%s" % (_RESOURCE_ID,) retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, uri) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=uri) self.assertFalse(sink.exists()) retry(sink.create)() @@ -433,7 +433,7 @@ def test_create_sink_pubsub_topic(self): TOPIC_URI = "pubsub.googleapis.com/%s" % (topic_path,) retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, TOPIC_URI) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=TOPIC_URI) self.assertFalse(sink.exists()) retry(sink.create)() @@ -468,7 +468,7 @@ def test_create_sink_bigquery_dataset(self): SINK_NAME = "test-create-sink-dataset%s" % (_RESOURCE_ID,) retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) uri = self._init_bigquery_dataset() - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, uri) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=uri) self.assertFalse(sink.exists()) retry(sink.create)() @@ -480,7 +480,7 @@ def test_list_sinks(self): SINK_NAME = "test-list-sinks%s" % (_RESOURCE_ID,) uri = self._init_storage_bucket() retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, uri) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=uri) self.assertFalse(sink.exists()) before_sinks = list(Config.CLIENT.list_sinks()) before_names = set(before.name for before in before_sinks) @@ -498,7 +498,7 @@ def test_reload_sink(self): SINK_NAME = "test-reload-sink%s" % (_RESOURCE_ID,) retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) uri = self._init_bigquery_dataset() - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, uri) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=uri) self.assertFalse(sink.exists()) retry(sink.create)() self.to_delete.append(sink) @@ -516,7 +516,7 @@ def test_update_sink(self): bucket_uri = self._init_storage_bucket() dataset_uri = self._init_bigquery_dataset() UPDATED_FILTER = "logName:syslog" - sink = Config.CLIENT.sink(SINK_NAME, DEFAULT_FILTER, bucket_uri) + sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=bucket_uri) self.assertFalse(sink.exists()) retry(sink.create)() self.to_delete.append(sink) diff --git a/tests/unit/gapic/logging_v2/test_logging_service_v2.py b/tests/unit/gapic/logging_v2/test_logging_service_v2.py index 5e318e02c..2c08f63b2 100644 --- a/tests/unit/gapic/logging_v2/test_logging_service_v2.py +++ b/tests/unit/gapic/logging_v2/test_logging_service_v2.py @@ -741,7 +741,8 @@ def test_write_log_entries_flattened(): assert args[0].log_name == "log_name_value" - assert args[0].resource == monitored_resource.MonitoredResource(type="type__value" + assert args[0].resource == monitored_resource.MonitoredResource( + type="type__value" ) assert args[0].labels == {"key_value": "value_value"} @@ -796,7 +797,8 @@ async def test_write_log_entries_flattened_async(): assert args[0].log_name == "log_name_value" - assert args[0].resource == monitored_resource.MonitoredResource(type="type__value" + assert args[0].resource == monitored_resource.MonitoredResource( + type="type__value" ) assert args[0].labels == {"key_value": "value_value"} diff --git a/tests/unit/handlers/transports/test_sync.py b/tests/unit/handlers/transports/test_sync.py index da990c43d..7bc2cd46f 100644 --- a/tests/unit/handlers/transports/test_sync.py +++ b/tests/unit/handlers/transports/test_sync.py @@ -49,7 +49,7 @@ def test_send(self): python_logger_name, logging.INFO, None, None, message, None, None ) - transport.send(record, message, _GLOBAL_RESOURCE) + transport.send(record, message, resource=_GLOBAL_RESOURCE) EXPECTED_STRUCT = {"message": message, "python_logger": python_logger_name} EXPECTED_SENT = ( EXPECTED_STRUCT, diff --git a/tests/unit/test__gapic.py b/tests/unit/test__gapic.py index 95934f6b3..59236fa65 100644 --- a/tests/unit/test__gapic.py +++ b/tests/unit/test__gapic.py @@ -14,9 +14,7 @@ import unittest -from google.api_core import grpc_helpers import google.auth.credentials -from google.protobuf import empty_pb2 import mock import google.cloud.logging_v2 @@ -26,7 +24,6 @@ from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client from google.cloud.logging_v2.services.metrics_service_v2 import MetricsServiceV2Client from google.cloud.logging_v2.types import LogSink -from google.cloud.logging_v2.types import LogMetric from google.cloud.logging_v2.types import LogEntry as LogEntryPB @@ -64,7 +61,7 @@ def test_list_entries(self): entries=[log_entry_msg] ) result = client.list_entries( - [PROJECT], filter_=FILTER, order_by=logging_v2.DESCENDING + [PROJECT_PATH], filter_=FILTER, order_by=logging_v2.DESCENDING ) entries = list(result) @@ -92,7 +89,7 @@ def test_list_entries_with_options(self): call.return_value = logging_v2.types.ListLogEntriesResponse(entries=[]) result = client.list_entries( - [PROJECT], + [PROJECT_PATH], filter_=FILTER, order_by=google.cloud.logging_v2.ASCENDING, page_size=42, @@ -122,7 +119,7 @@ def test_write_entries_single(self): "resource": {"type": "global"}, "textPayload": "text", } - result = client.write_entries([entry]) + client.write_entries([entry]) # Check the request call.assert_called_once() @@ -139,13 +136,14 @@ def test_logger_delete(self): with mock.patch.object( type(client._gapic_api.transport.delete_log), "__call__" ) as call: - client.logger_delete(PROJECT, self.LOG_NAME) + client.logger_delete(self.LOG_PATH) call.assert_called_once() assert call.call_args.args[0].log_name == self.LOG_PATH class Test_SinksAPI(object): SINK_NAME = "sink_name" + PARENT_PATH = f"projects/{PROJECT}" SINK_PATH = f"projects/{PROJECT}/sinks/{SINK_NAME}" DESTINATION_URI = "faux.googleapis.com/destination" SINK_WRITER_IDENTITY = "serviceAccount:project-123@example.com" @@ -167,14 +165,14 @@ def test_list_sinks(self): client = self.make_sinks_api() sink_msg = LogSink( - name=self.SINK_PATH, destination=self.DESTINATION_URI, filter=FILTER + name=self.SINK_NAME, destination=self.DESTINATION_URI, filter=FILTER ) with mock.patch.object( type(client._gapic_api.transport.list_sinks), "__call__" ) as call: call.return_value = logging_v2.types.ListSinksResponse(sinks=[sink_msg]) - result = client.list_sinks(PROJECT) + result = client.list_sinks(self.PARENT_PATH,) sinks = list(result) @@ -182,14 +180,14 @@ def test_list_sinks(self): assert len(sinks) == 1 sink = sinks[0] assert isinstance(sink, google.cloud.logging_v2.sink.Sink) - assert sink.name == self.SINK_PATH + assert sink.name == self.SINK_NAME assert sink.destination == self.DESTINATION_URI assert sink.filter_ == FILTER # Check the request call.assert_called_once() request = call.call_args.args[0] - assert request.parent == PROJECT_PATH + assert request.parent == self.PARENT_PATH def test_list_sinks_with_options(self): client = self.make_sinks_api() @@ -198,13 +196,13 @@ def test_list_sinks_with_options(self): type(client._gapic_api.transport.list_sinks), "__call__" ) as call: call.return_value = logging_v2.types.ListSinksResponse(sinks=[]) - result = client.list_sinks(PROJECT, page_size=42, page_token="token") + result = client.list_sinks(self.PARENT_PATH, page_size=42, page_token="token") list(result) # Check the request call.assert_called_once() request = call.call_args.args[0] - assert request.parent == f"projects/{PROJECT}" + assert request.parent == self.PARENT_PATH assert request.page_size == 42 assert request.page_token == "token" @@ -221,7 +219,7 @@ def test_sink_create(self): ) result = client.sink_create( - PROJECT, + self.PARENT_PATH, self.SINK_NAME, FILTER, self.DESTINATION_URI, @@ -238,7 +236,7 @@ def test_sink_create(self): # Check request call.assert_called_once() request = call.call_args.args[0] - assert request.parent == PROJECT_PATH + assert request.parent == self.PARENT_PATH assert request.unique_writer_identity is True assert request.sink.name == self.SINK_NAME assert request.sink.filter == FILTER @@ -250,14 +248,14 @@ def test_sink_get(self): type(client._gapic_api.transport.get_sink), "__call__" ) as call: call.return_value = logging_v2.types.LogSink( - name=self.SINK_PATH, destination=self.DESTINATION_URI, filter=FILTER + name=self.SINK_NAME, destination=self.DESTINATION_URI, filter=FILTER ) - response = client.sink_get(PROJECT, self.SINK_NAME) + response = client.sink_get(self.SINK_PATH) # Check response assert response == { - "name": self.SINK_PATH, + "name": self.SINK_NAME, "filter": FILTER, "destination": self.DESTINATION_URI, } @@ -280,8 +278,7 @@ def test_sink_update(self): ) result = client.sink_update( - PROJECT, - self.SINK_NAME, + self.SINK_PATH, FILTER, self.DESTINATION_URI, unique_writer_identity=True, @@ -300,7 +297,7 @@ def test_sink_update(self): request = call.call_args.args[0] assert request.sink_name == self.SINK_PATH assert request.unique_writer_identity is True - assert request.sink.name == self.SINK_PATH + assert request.sink.name == self.SINK_NAME assert request.sink.filter == FILTER assert request.sink.destination == self.DESTINATION_URI @@ -309,7 +306,7 @@ def test_sink_delete(self): with mock.patch.object( type(client._gapic_api.transport.get_sink), "__call__" ) as call: - client.sink_delete(PROJECT, self.SINK_NAME) + client.sink_delete(self.SINK_PATH) call.assert_called_once() request = call.call_args.args[0] @@ -365,9 +362,6 @@ def test_list_metrics(self): def test_list_metrics_options(self): client = self.make_metrics_api() - metric = logging_v2.types.LogMetric( - name=self.METRIC_PATH, description=self.DESCRIPTION, filter=FILTER - ) with mock.patch.object( type(client._gapic_api.transport.list_log_metrics), "__call__" ) as call: @@ -389,9 +383,7 @@ def test_metric_create(self): with mock.patch.object( type(client._gapic_api.transport.create_log_metric), "__call__" ) as call: - result = client.metric_create( - PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION - ) + client.metric_create(PROJECT, self.METRIC_NAME, FILTER, self.DESCRIPTION) # Check the request call.assert_called_once() @@ -486,7 +478,11 @@ def test_non_registry_failure(self, msg_to_dict_mock): self._call_fut(entry_pb) entry_pb.HasField.assert_called_once_with("proto_payload") - msg_to_dict_mock.assert_called_once_with(entry_pb, preserving_proto_field_name=False, including_default_value_fields=False) + msg_to_dict_mock.assert_called_once_with( + entry_pb, + preserving_proto_field_name=False, + including_default_value_fields=False, + ) def test_unregistered_type(self): from google.protobuf import any_pb2 @@ -607,8 +603,9 @@ def test_make_logging_api(gapic_client): assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options, ) @@ -619,8 +616,9 @@ def test_make_metrics_api(gapic_client): assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options, ) @@ -631,6 +629,7 @@ def test_make_sinks_api(gapic_client): assert api._client == client assert api._gapic_api == gapic_client.return_value gapic_client.assert_called_once_with( - credentials=client._credentials, client_info=client._client_info, - client_options=client._client_options + credentials=client._credentials, + client_info=client._client_info, + client_options=client._client_options, ) diff --git a/tests/unit/test__http.py b/tests/unit/test__http.py index 6616b0722..1408769a5 100644 --- a/tests/unit/test__http.py +++ b/tests/unit/test__http.py @@ -98,9 +98,11 @@ def test_extra_headers(self): class Test_LoggingAPI(unittest.TestCase): PROJECT = "project" + PROJECT_PATH = "projects/project" LIST_ENTRIES_PATH = "entries:list" WRITE_ENTRIES_PATH = "entries:write" LOGGER_NAME = "LOGGER_NAME" + LOGGER_PATH = "projects/project/logs/LOGGER_NAME" FILTER = "logName:syslog AND severity>=ERROR" @staticmethod @@ -136,7 +138,7 @@ def test_list_entries_no_paging(self): NOW, TIMESTAMP = self._make_timestamp() IID = "IID" TEXT = "TEXT" - SENT = {"projectIds": [self.PROJECT]} + SENT = {"resourceNames": [self.PROJECT_PATH]} TOKEN = "TOKEN" RETURNED = { "entries": [ @@ -145,7 +147,7 @@ def test_list_entries_no_paging(self): "insertId": IID, "resource": {"type": "global"}, "timestamp": TIMESTAMP, - "logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME), + "logName": f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}", } ], "nextPageToken": TOKEN, @@ -156,7 +158,7 @@ def test_list_entries_no_paging(self): client._connection = _Connection(RETURNED) api = self._make_one(client) - iterator = api.list_entries([self.PROJECT]) + iterator = api.list_entries([self.PROJECT_PATH]) page = six.next(iterator.pages) entries = list(page) token = iterator.next_page_token @@ -190,7 +192,9 @@ def test_list_entries_w_paging(self): from google.cloud.logging_v2.entries import StructEntry PROJECT1 = "PROJECT1" + PROJECT1_PATH = f"projects/{PROJECT1}" PROJECT2 = "PROJECT2" + PROJECT2_PATH = f"projects/{PROJECT2}" NOW, TIMESTAMP = self._make_timestamp() IID1 = "IID1" IID2 = "IID2" @@ -200,7 +204,7 @@ def test_list_entries_w_paging(self): TOKEN = "TOKEN" PAGE_SIZE = 42 SENT = { - "projectIds": [PROJECT1, PROJECT2], + "resourceNames": [PROJECT1_PATH, PROJECT2_PATH], "filter": self.FILTER, "orderBy": DESCENDING, "pageSize": PAGE_SIZE, @@ -231,7 +235,7 @@ def test_list_entries_w_paging(self): api = self._make_one(client) iterator = api.list_entries( - projects=[PROJECT1, PROJECT2], + resource_names=[PROJECT1_PATH, PROJECT2_PATH], filter_=self.FILTER, order_by=DESCENDING, page_size=PAGE_SIZE, @@ -277,9 +281,9 @@ def test_write_entries_single(self): ENTRY = { "textPayload": TEXT, "resource": {"type": "global"}, - "logName": "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME), + "logName": "projects/{self.PROJECT}/logs/{self.LOGGER_NAME}", } - SENT = {"entries": [ENTRY]} + SENT = {"entries": [ENTRY], "partialSuccess": False, "dry_run": False} conn = _Connection({}) client = _Client(conn) api = self._make_one(client) @@ -287,13 +291,13 @@ def test_write_entries_single(self): api.write_entries([ENTRY]) self.assertEqual(conn._called_with["method"], "POST") - path = "/%s" % self.WRITE_ENTRIES_PATH + path = f"/{self.WRITE_ENTRIES_PATH}" self.assertEqual(conn._called_with["path"], path) self.assertEqual(conn._called_with["data"], SENT) def test_write_entries_multiple(self): TEXT = "TEXT" - LOG_NAME = "projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME) + LOG_NAME = f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}" RESOURCE = {"type": "global"} LABELS = {"baz": "qux", "spam": "eggs"} ENTRY1 = {"textPayload": TEXT} @@ -303,25 +307,27 @@ def test_write_entries_multiple(self): "resource": RESOURCE, "labels": LABELS, "entries": [ENTRY1, ENTRY2], + "partialSuccess": False, + "dry_run": False, } conn = _Connection({}) client = _Client(conn) api = self._make_one(client) - api.write_entries([ENTRY1, ENTRY2], LOG_NAME, RESOURCE, LABELS) + api.write_entries([ENTRY1, ENTRY2], logger_name=LOG_NAME, resource=RESOURCE, labels=LABELS) self.assertEqual(conn._called_with["method"], "POST") - path = "/%s" % self.WRITE_ENTRIES_PATH + path = f"/{self.WRITE_ENTRIES_PATH}" self.assertEqual(conn._called_with["path"], path) self.assertEqual(conn._called_with["data"], SENT) def test_logger_delete(self): - path = "/projects/%s/logs/%s" % (self.PROJECT, self.LOGGER_NAME) + path = f"/projects/{self.PROJECT}/logs/{self.LOGGER_NAME}" conn = _Connection({}) client = _Client(conn) api = self._make_one(client) - api.logger_delete(self.PROJECT, self.LOGGER_NAME) + api.logger_delete(self.LOGGER_PATH) self.assertEqual(conn._called_with["method"], "DELETE") self.assertEqual(conn._called_with["path"], path) @@ -330,10 +336,11 @@ def test_logger_delete(self): class Test_SinksAPI(unittest.TestCase): PROJECT = "project" + PROJECT_PATH = "projects/project" FILTER = "logName:syslog AND severity>=ERROR" - LIST_SINKS_PATH = "projects/%s/sinks" % (PROJECT,) + LIST_SINKS_PATH = f"projects/{PROJECT}/sinks" SINK_NAME = "sink_name" - SINK_PATH = "projects/%s/sinks/%s" % (PROJECT, SINK_NAME) + SINK_PATH = f"projects/{PROJECT}/sinks/{SINK_NAME}" DESTINATION_URI = "faux.googleapis.com/destination" WRITER_IDENTITY = "serviceAccount:project-123@example.com" @@ -372,7 +379,7 @@ def test_list_sinks_no_paging(self): client = _Client(conn) api = self._make_one(client) - iterator = api.list_sinks(self.PROJECT) + iterator = api.list_sinks(self.PROJECT_PATH) page = six.next(iterator.pages) sinks = list(page) token = iterator.next_page_token @@ -389,7 +396,7 @@ def test_list_sinks_no_paging(self): self.assertIs(sink.client, client) called_with = conn._called_with - path = "/%s" % (self.LIST_SINKS_PATH,) + path = f"/{self.LIST_SINKS_PATH}" self.assertEqual( called_with, {"method": "GET", "path": path, "query_params": {}} ) @@ -412,7 +419,7 @@ def test_list_sinks_w_paging(self): client = _Client(conn) api = self._make_one(client) - iterator = api.list_sinks(self.PROJECT, page_size=PAGE_SIZE, page_token=TOKEN) + iterator = api.list_sinks(self.PROJECT_PATH, page_size=PAGE_SIZE, page_token=TOKEN) sinks = list(iterator) token = iterator.next_page_token @@ -428,7 +435,7 @@ def test_list_sinks_w_paging(self): self.assertIs(sink.client, client) called_with = conn._called_with - path = "/%s" % (self.LIST_SINKS_PATH,) + path = f"/{self.LIST_SINKS_PATH}" self.assertEqual( called_with, { @@ -453,10 +460,10 @@ def test_sink_create_conflict(self): with self.assertRaises(Conflict): api.sink_create( - self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI + self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI ) - path = "/projects/%s/sinks" % (self.PROJECT,) + path = f"/projects/{self.PROJECT}/sinks" expected = { "method": "POST", "path": path, @@ -478,7 +485,7 @@ def test_sink_create_ok(self): api = self._make_one(client) returned = api.sink_create( - self.PROJECT, + self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, @@ -486,7 +493,7 @@ def test_sink_create_ok(self): ) self.assertEqual(returned, after_create) - path = "/projects/%s/sinks" % (self.PROJECT,) + path = f"/projects/{self.PROJECT}/sinks" expected = { "method": "POST", "path": path, @@ -503,10 +510,10 @@ def test_sink_get_miss(self): api = self._make_one(client) with self.assertRaises(NotFound): - api.sink_get(self.PROJECT, self.SINK_NAME) + api.sink_get(self.SINK_PATH) self.assertEqual(conn._called_with["method"], "GET") - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" self.assertEqual(conn._called_with["path"], path) def test_sink_get_hit(self): @@ -519,11 +526,11 @@ def test_sink_get_hit(self): client = _Client(conn) api = self._make_one(client) - response = api.sink_get(self.PROJECT, self.SINK_NAME) + response = api.sink_get(self.SINK_PATH) self.assertEqual(response, RESPONSE) self.assertEqual(conn._called_with["method"], "GET") - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" self.assertEqual(conn._called_with["path"], path) def test_sink_update_miss(self): @@ -540,10 +547,10 @@ def test_sink_update_miss(self): with self.assertRaises(NotFound): api.sink_update( - self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI + self.SINK_PATH, self.FILTER, self.DESTINATION_URI ) - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" expected = { "method": "PUT", "path": path, @@ -565,15 +572,14 @@ def test_sink_update_hit(self): api = self._make_one(client) returned = api.sink_update( - self.PROJECT, - self.SINK_NAME, + self.SINK_PATH, self.FILTER, self.DESTINATION_URI, unique_writer_identity=True, ) self.assertEqual(returned, after_update) - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" expected = { "method": "PUT", "path": path, @@ -590,10 +596,10 @@ def test_sink_delete_miss(self): api = self._make_one(client) with self.assertRaises(NotFound): - api.sink_delete(self.PROJECT, self.SINK_NAME) + api.sink_delete(self.SINK_PATH) self.assertEqual(conn._called_with["method"], "DELETE") - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" self.assertEqual(conn._called_with["path"], path) def test_sink_delete_hit(self): @@ -601,10 +607,10 @@ def test_sink_delete_hit(self): client = _Client(conn) api = self._make_one(client) - api.sink_delete(self.PROJECT, self.SINK_NAME) + api.sink_delete(self.SINK_PATH) self.assertEqual(conn._called_with["method"], "DELETE") - path = "/projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" self.assertEqual(conn._called_with["path"], path) diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index cfa1ca5e0..7f762cf77 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -31,6 +31,7 @@ def _make_credentials(): class TestClient(unittest.TestCase): PROJECT = "PROJECT" + PROJECT_PATH = f"projects/{PROJECT}" LOGGER_NAME = "LOGGER_NAME" SINK_NAME = "SINK_NAME" FILTER = "logName:syslog AND severity>=ERROR" @@ -113,7 +114,7 @@ def test_logging_api_wo_gapic(self): from google.cloud.logging_v2._http import _LoggingAPI client = self._make_one( - self.PROJECT, credentials=_make_credentials(), _use_grpc=False + project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False ) conn = client._connection = _Connection() @@ -164,7 +165,7 @@ def test_sinks_api_wo_gapic(self): from google.cloud.logging_v2._http import _SinksAPI client = self._make_one( - self.PROJECT, credentials=_make_credentials(), _use_grpc=False + project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False ) conn = client._connection = _Connection() @@ -202,7 +203,7 @@ def test_metrics_api_wo_gapic(self): from google.cloud.logging_v2._http import _MetricsAPI client = self._make_one( - self.PROJECT, credentials=_make_credentials(), _use_grpc=False + project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False ) conn = client._connection = _Connection() @@ -293,7 +294,7 @@ def test_list_entries_defaults(self): { "path": "/entries:list", "method": "POST", - "data": {"filter": "removed", "projectIds": [self.PROJECT]}, + "data": {"filter": "removed", "resourceNames": [f"projects/{self.PROJECT}"]}, }, ) # verify that default filter is 24 hours @@ -335,13 +336,13 @@ def test_list_entries_explicit(self): }, ] client = self._make_one( - self.PROJECT, credentials=_make_credentials(), _use_grpc=False + project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False ) returned = {"entries": ENTRIES} client._connection = _Connection(returned) iterator = client.list_entries( - projects=[PROJECT1, PROJECT2], + resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"], filter_=INPUT_FILTER, order_by=DESCENDING, page_size=PAGE_SIZE, @@ -388,7 +389,7 @@ def test_list_entries_explicit(self): "orderBy": DESCENDING, "pageSize": PAGE_SIZE, "pageToken": TOKEN, - "projectIds": [PROJECT1, PROJECT2], + "resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"], }, }, ) @@ -431,13 +432,13 @@ def test_list_entries_explicit_timestamp(self): }, ] client = self._make_one( - self.PROJECT, credentials=_make_credentials(), _use_grpc=False + project=self.PROJECT, credentials=_make_credentials(), _use_grpc=False ) returned = {"entries": ENTRIES} client._connection = _Connection(returned) iterator = client.list_entries( - projects=[PROJECT1, PROJECT2], + resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"], filter_=INPUT_FILTER, order_by=DESCENDING, page_size=PAGE_SIZE, @@ -483,7 +484,7 @@ def test_list_entries_explicit_timestamp(self): "orderBy": DESCENDING, "pageSize": PAGE_SIZE, "pageToken": TOKEN, - "projectIds": [PROJECT1, PROJECT2], + "resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"], }, }, ) @@ -499,20 +500,20 @@ def test_sink_defaults(self): self.assertIsNone(sink.filter_) self.assertIsNone(sink.destination) self.assertIs(sink.client, client) - self.assertEqual(sink.project, self.PROJECT) + self.assertEqual(sink.parent, self.PROJECT_PATH) def test_sink_explicit(self): from google.cloud.logging_v2.sink import Sink creds = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=creds) - sink = client.sink(self.SINK_NAME, self.FILTER, self.DESTINATION_URI) + sink = client.sink(self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI) self.assertIsInstance(sink, Sink) self.assertEqual(sink.name, self.SINK_NAME) self.assertEqual(sink.filter_, self.FILTER) self.assertEqual(sink.destination, self.DESTINATION_URI) self.assertIs(sink.client, client) - self.assertEqual(sink.project, self.PROJECT) + self.assertEqual(sink.parent, self.PROJECT_PATH) def test_list_sinks_no_paging(self): import six @@ -571,7 +572,7 @@ def test_list_sinks_with_paging(self): returned = {"sinks": SINKS} client._connection = _Connection(returned) - iterator = client.list_sinks(PAGE_SIZE, TOKEN) + iterator = client.list_sinks(page_size=PAGE_SIZE, page_token=TOKEN) sinks = list(iterator) token = iterator.next_page_token @@ -619,7 +620,7 @@ def test_metric_explicit(self): client_obj = self._make_one(project=self.PROJECT, credentials=creds) metric = client_obj.metric( - self.METRIC_NAME, self.FILTER, description=self.DESCRIPTION + self.METRIC_NAME, filter_=self.FILTER, description=self.DESCRIPTION ) self.assertIsInstance(metric, Metric) self.assertEqual(metric.name, self.METRIC_NAME) @@ -685,7 +686,7 @@ def test_list_metrics_with_paging(self): client._connection = _Connection(returned) # Execute request. - iterator = client.list_metrics(page_size, token) + iterator = client.list_metrics(page_size=page_size, page_token=token) page = six.next(iterator.pages) metrics = list(page) diff --git a/tests/unit/test_logger.py b/tests/unit/test_logger.py index 74f7c4da3..05be18462 100644 --- a/tests/unit/test_logger.py +++ b/tests/unit/test_logger.py @@ -93,7 +93,7 @@ def test_batch_w_alternate_client(self): client1 = _Client(self.PROJECT, conn1) client2 = _Client(self.PROJECT, conn2) logger = self._make_one(self.LOGGER_NAME, client=client1) - batch = logger.batch(client2) + batch = logger.batch(client=client2) self.assertIsInstance(batch, Batch) self.assertIs(batch.logger, logger) self.assertIs(batch.client, client2) @@ -468,7 +468,7 @@ def test_delete_w_bound_client(self): logger.delete() self.assertEqual( - api._logger_delete_called_with, (self.PROJECT, self.LOGGER_NAME) + api._logger_delete_called_with, (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}") ) def test_delete_w_alternate_client(self): @@ -480,7 +480,7 @@ def test_delete_w_alternate_client(self): logger.delete(client=client2) self.assertEqual( - api._logger_delete_called_with, (self.PROJECT, self.LOGGER_NAME) + api._logger_delete_called_with, (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}") ) def test_list_entries_defaults(self): @@ -514,7 +514,7 @@ def test_list_entries_defaults(self): { "path": "/entries:list", "method": "POST", - "data": {"filter": "removed", "projectIds": [self.PROJECT]}, + "data": {"filter": "removed", "resourceNames": [f"projects/{self.PROJECT}"]}, }, ) # verify that default filter is 24 hours @@ -540,7 +540,7 @@ def test_list_entries_explicit(self): client._connection = _Connection({}) logger = self._make_one(self.LOGGER_NAME, client=client) iterator = logger.list_entries( - projects=[PROJECT1, PROJECT2], + resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"], filter_=INPUT_FILTER, order_by=DESCENDING, page_size=PAGE_SIZE, @@ -565,7 +565,7 @@ def test_list_entries_explicit(self): "orderBy": DESCENDING, "pageSize": PAGE_SIZE, "pageToken": TOKEN, - "projectIds": [PROJECT1, PROJECT2], + "resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"], }, }, ) @@ -600,7 +600,7 @@ def test_list_entries_explicit_timestamp(self): client._connection = _Connection({}) logger = self._make_one(self.LOGGER_NAME, client=client) iterator = logger.list_entries( - projects=[PROJECT1, PROJECT2], + resource_names=[f"projects/{PROJECT1}", f"projects/{PROJECT2}"], filter_=INPUT_FILTER, order_by=DESCENDING, page_size=PAGE_SIZE, @@ -625,7 +625,7 @@ def test_list_entries_explicit_timestamp(self): "orderBy": DESCENDING, "pageSize": PAGE_SIZE, "pageToken": TOKEN, - "projectIds": [PROJECT1, PROJECT2], + "resourceNames": [f"projects/{PROJECT1}", f"projects/{PROJECT2}"], }, }, ) @@ -1183,11 +1183,11 @@ class _DummyLoggingAPI(object): _write_entries_called_with = None - def write_entries(self, entries, logger_name=None, resource=None, labels=None): + def write_entries(self, entries, *, logger_name=None, resource=None, labels=None): self._write_entries_called_with = (entries, logger_name, resource, labels) - def logger_delete(self, project, logger_name): - self._logger_delete_called_with = (project, logger_name) + def logger_delete(self, logger_name): + self._logger_delete_called_with = (logger_name) class _Client(object): diff --git a/tests/unit/test_metric.py b/tests/unit/test_metric.py index 416a20044..217e5c46e 100644 --- a/tests/unit/test_metric.py +++ b/tests/unit/test_metric.py @@ -19,6 +19,7 @@ class TestMetric(unittest.TestCase): PROJECT = "test-project" METRIC_NAME = "metric-name" + FULL_METRIC_NAME = f"projects/{PROJECT}/metrics/{METRIC_NAME}" FILTER = "logName:syslog AND severity>=ERROR" DESCRIPTION = "DESCRIPTION" @@ -32,7 +33,6 @@ def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor_defaults(self): - FULL = "projects/%s/metrics/%s" % (self.PROJECT, self.METRIC_NAME) client = _Client(self.PROJECT) metric = self._make_one(self.METRIC_NAME, client=client) self.assertEqual(metric.name, self.METRIC_NAME) @@ -40,26 +40,24 @@ def test_ctor_defaults(self): self.assertEqual(metric.description, "") self.assertIs(metric.client, client) self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - self.assertEqual(metric.path, "/%s" % (FULL,)) + self.assertEqual(metric.full_name, self.FULL_METRIC_NAME) + self.assertEqual(metric.path, f"/{self.FULL_METRIC_NAME}") def test_ctor_explicit(self): - FULL = "projects/%s/metrics/%s" % (self.PROJECT, self.METRIC_NAME) client = _Client(self.PROJECT) metric = self._make_one( - self.METRIC_NAME, self.FILTER, client=client, description=self.DESCRIPTION + self.METRIC_NAME, filter_=self.FILTER, client=client, description=self.DESCRIPTION ) self.assertEqual(metric.name, self.METRIC_NAME) self.assertEqual(metric.filter_, self.FILTER) self.assertEqual(metric.description, self.DESCRIPTION) self.assertIs(metric.client, client) self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) - self.assertEqual(metric.path, "/%s" % (FULL,)) + self.assertEqual(metric.full_name, self.FULL_METRIC_NAME) + self.assertEqual(metric.path, f"/{self.FULL_METRIC_NAME}") def test_from_api_repr_minimal(self): client = _Client(project=self.PROJECT) - FULL = "projects/%s/metrics/%s" % (self.PROJECT, self.METRIC_NAME) RESOURCE = {"name": self.METRIC_NAME, "filter": self.FILTER} klass = self._get_target_class() metric = klass.from_api_repr(RESOURCE, client=client) @@ -68,11 +66,10 @@ def test_from_api_repr_minimal(self): self.assertEqual(metric.description, "") self.assertIs(metric._client, client) self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) + self.assertEqual(metric.full_name, self.FULL_METRIC_NAME) def test_from_api_repr_w_description(self): client = _Client(project=self.PROJECT) - FULL = "projects/%s/metrics/%s" % (self.PROJECT, self.METRIC_NAME) DESCRIPTION = "DESCRIPTION" RESOURCE = { "name": self.METRIC_NAME, @@ -86,12 +83,12 @@ def test_from_api_repr_w_description(self): self.assertEqual(metric.description, DESCRIPTION) self.assertIs(metric._client, client) self.assertEqual(metric.project, self.PROJECT) - self.assertEqual(metric.full_name, FULL) + self.assertEqual(metric.full_name, self.FULL_METRIC_NAME) def test_create_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.metrics_api = _DummyMetricsAPI() - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client) metric.create() @@ -105,7 +102,7 @@ def test_create_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() metric = self._make_one( - self.METRIC_NAME, self.FILTER, client=client1, description=self.DESCRIPTION + self.METRIC_NAME, filter_=self.FILTER, client=client1, description=self.DESCRIPTION ) metric.create(client=client2) @@ -118,7 +115,7 @@ def test_create_w_alternate_client(self): def test_exists_miss_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.metrics_api = _DummyMetricsAPI() - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client) self.assertFalse(metric.exists()) @@ -130,7 +127,7 @@ def test_exists_hit_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() api._metric_get_response = RESOURCE - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client1) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client1) self.assertTrue(metric.exists(client=client2)) @@ -143,7 +140,7 @@ def test_reload_w_bound_client(self): api = client.metrics_api = _DummyMetricsAPI() api._metric_get_response = RESOURCE metric = self._make_one( - self.METRIC_NAME, self.FILTER, client=client, description=self.DESCRIPTION + self.METRIC_NAME, filter_=self.FILTER, client=client, description=self.DESCRIPTION ) metric.reload() @@ -163,7 +160,7 @@ def test_reload_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() api._metric_get_response = RESOURCE - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client1) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client1) metric.reload(client=client2) @@ -174,7 +171,7 @@ def test_reload_w_alternate_client(self): def test_update_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.metrics_api = _DummyMetricsAPI() - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client) metric.update() @@ -188,7 +185,7 @@ def test_update_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() metric = self._make_one( - self.METRIC_NAME, self.FILTER, client=client1, description=self.DESCRIPTION + self.METRIC_NAME, filter_=self.FILTER, client=client1, description=self.DESCRIPTION ) metric.update(client=client2) @@ -201,7 +198,7 @@ def test_update_w_alternate_client(self): def test_delete_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.metrics_api = _DummyMetricsAPI() - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client) metric.delete() @@ -213,7 +210,7 @@ def test_delete_w_alternate_client(self): client1 = _Client(project=self.PROJECT) client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() - metric = self._make_one(self.METRIC_NAME, self.FILTER, client=client1) + metric = self._make_one(self.METRIC_NAME, filter_=self.FILTER, client=client1) metric.delete(client=client2) diff --git a/tests/unit/test_sink.py b/tests/unit/test_sink.py index b51d0da44..390fd2142 100644 --- a/tests/unit/test_sink.py +++ b/tests/unit/test_sink.py @@ -18,7 +18,9 @@ class TestSink(unittest.TestCase): PROJECT = "test-project" + PROJECT_PATH = f"projects/{PROJECT}" SINK_NAME = "sink-name" + FULL_NAME = f"projects/{PROJECT}/sinks/{SINK_NAME}" FILTER = "logName:syslog AND severity>=INFO" DESTINATION_URI = "faux.googleapis.com/destination" WRITER_IDENTITY = "serviceAccount:project-123@example.com" @@ -33,34 +35,33 @@ def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_ctor_defaults(self): - FULL = "projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) client = _Client(self.PROJECT) sink = self._make_one(self.SINK_NAME, client=client) self.assertEqual(sink.name, self.SINK_NAME) self.assertIsNone(sink.filter_) self.assertIsNone(sink.destination) self.assertIs(sink.client, client) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) - self.assertEqual(sink.path, "/%s" % (FULL,)) + self.assertEqual(sink.parent, self.PROJECT_PATH) + self.assertEqual(sink.full_name, self.FULL_NAME) + self.assertEqual(sink.path, f"/{self.FULL_NAME}") def test_ctor_explicit(self): - FULL = "projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) client = _Client(self.PROJECT) + parent = "folders/testFolder" sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client + self.SINK_NAME, filter_=self.FILTER, parent=parent, destination=self.DESTINATION_URI, client=client ) self.assertEqual(sink.name, self.SINK_NAME) self.assertEqual(sink.filter_, self.FILTER) self.assertEqual(sink.destination, self.DESTINATION_URI) self.assertIs(sink.client, client) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) - self.assertEqual(sink.path, "/%s" % (FULL,)) + self.assertEqual(sink.parent, parent) + self.assertEqual(sink.full_name, f"{parent}/sinks/{self.SINK_NAME}") + self.assertEqual(sink.path, f"/{parent}/sinks/{self.SINK_NAME}" ) def test_from_api_repr_minimal(self): client = _Client(project=self.PROJECT) - FULL = "projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + RESOURCE = {"name": self.SINK_NAME, "destination": self.DESTINATION_URI} klass = self._get_target_class() sink = klass.from_api_repr(RESOURCE, client=client) @@ -69,12 +70,12 @@ def test_from_api_repr_minimal(self): self.assertIsNone(sink.filter_) self.assertIsNone(sink.writer_identity) self.assertIs(sink._client, client) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) + self.assertEqual(sink.parent, self.PROJECT_PATH) + self.assertEqual(sink.full_name, self.FULL_NAME) def test_from_api_repr_full(self): client = _Client(project=self.PROJECT) - FULL = "projects/%s/sinks/%s" % (self.PROJECT, self.SINK_NAME) + parent="organizations/my_organization" RESOURCE = { "name": self.SINK_NAME, "destination": self.DESTINATION_URI, @@ -82,14 +83,14 @@ def test_from_api_repr_full(self): "writerIdentity": self.WRITER_IDENTITY, } klass = self._get_target_class() - sink = klass.from_api_repr(RESOURCE, client=client) + sink = klass.from_api_repr(RESOURCE, client=client, parent=parent) self.assertEqual(sink.name, self.SINK_NAME) self.assertEqual(sink.filter_, self.FILTER) self.assertEqual(sink.destination, self.DESTINATION_URI) self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertIs(sink._client, client) - self.assertEqual(sink.project, self.PROJECT) - self.assertEqual(sink.full_name, FULL) + self.assertEqual(sink.parent, parent) + self.assertEqual(sink.full_name, f"{parent}/sinks/{self.SINK_NAME}") def test_create_w_bound_client(self): client = _Client(project=self.PROJECT) @@ -101,7 +102,7 @@ def test_create_w_bound_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client ) sink.create() @@ -112,14 +113,14 @@ def test_create_w_bound_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_create_called_with, - (self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, False), + (self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, False), ) def test_create_w_alternate_client(self): client1 = _Client(project=self.PROJECT) client2 = _Client(project=self.PROJECT) sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client1 + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 ) api = client2.sinks_api = _DummySinksAPI() api._sink_create_response = { @@ -137,19 +138,19 @@ def test_create_w_alternate_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_create_called_with, - (self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, True), + (self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, True), ) def test_exists_miss_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client ) self.assertFalse(sink.exists()) - self.assertEqual(api._sink_get_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_get_called_with, (self.FULL_NAME)) def test_exists_hit_w_alternate_client(self): RESOURCE = { @@ -162,12 +163,12 @@ def test_exists_hit_w_alternate_client(self): api = client2.sinks_api = _DummySinksAPI() api._sink_get_response = RESOURCE sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client1 + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 ) self.assertTrue(sink.exists(client=client2)) - self.assertEqual(api._sink_get_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_get_called_with, (self.FULL_NAME)) def test_reload_w_bound_client(self): NEW_DESTINATION_URI = "faux.googleapis.com/other" @@ -182,7 +183,7 @@ def test_reload_w_bound_client(self): self.assertEqual(sink.destination, NEW_DESTINATION_URI) self.assertIsNone(sink.filter_) self.assertIsNone(sink.writer_identity) - self.assertEqual(api._sink_get_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_get_called_with, (self.FULL_NAME)) def test_reload_w_alternate_client(self): NEW_FILTER = "logName:syslog AND severity>=INFO" @@ -204,7 +205,7 @@ def test_reload_w_alternate_client(self): self.assertEqual(sink.destination, NEW_DESTINATION_URI) self.assertEqual(sink.filter_, NEW_FILTER) self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) - self.assertEqual(api._sink_get_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_get_called_with, (self.FULL_NAME)) def test_update_w_bound_client(self): client = _Client(project=self.PROJECT) @@ -216,7 +217,7 @@ def test_update_w_bound_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client ) sink.update() @@ -227,7 +228,7 @@ def test_update_w_bound_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_update_called_with, - (self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, False), + (self.FULL_NAME, self.FILTER, self.DESTINATION_URI, False), ) def test_update_w_alternate_client(self): @@ -241,7 +242,7 @@ def test_update_w_alternate_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client1 + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 ) sink.update(client=client2, unique_writer_identity=True) @@ -252,31 +253,31 @@ def test_update_w_alternate_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_update_called_with, - (self.PROJECT, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, True), + (self.FULL_NAME, self.FILTER, self.DESTINATION_URI, True), ) def test_delete_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client ) sink.delete() - self.assertEqual(api._sink_delete_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_delete_called_with, (self.FULL_NAME)) def test_delete_w_alternate_client(self): client1 = _Client(project=self.PROJECT) client2 = _Client(project=self.PROJECT) api = client2.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, self.FILTER, self.DESTINATION_URI, client=client1 + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 ) sink.delete(client=client2) - self.assertEqual(api._sink_delete_called_with, (self.PROJECT, self.SINK_NAME)) + self.assertEqual(api._sink_delete_called_with, (self.FULL_NAME)) class _Client(object): @@ -286,10 +287,10 @@ def __init__(self, project): class _DummySinksAPI(object): def sink_create( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, parent, sink_name, filter_, destination, *, unique_writer_identity=False ): self._sink_create_called_with = ( - project, + parent, sink_name, filter_, destination, @@ -297,20 +298,19 @@ def sink_create( ) return self._sink_create_response - def sink_get(self, project, sink_name): + def sink_get(self, sink_name): from google.cloud.exceptions import NotFound - self._sink_get_called_with = (project, sink_name) + self._sink_get_called_with = (sink_name) try: return self._sink_get_response except AttributeError: raise NotFound("miss") def sink_update( - self, project, sink_name, filter_, destination, unique_writer_identity=False + self, sink_name, filter_, destination, *, unique_writer_identity=False ): self._sink_update_called_with = ( - project, sink_name, filter_, destination, @@ -318,5 +318,6 @@ def sink_update( ) return self._sink_update_response - def sink_delete(self, project, sink_name): - self._sink_delete_called_with = (project, sink_name) + def sink_delete(self, sink_name): + self._sink_delete_called_with = (sink_name) + From 425c9ce31593c12dd21e7593a413ee6e0b33abdf Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Wed, 11 Nov 2020 05:36:14 +0000 Subject: [PATCH 53/58] fix: fix formatting --- google/cloud/logging_v2/_gapic.py | 27 ++--------- google/cloud/logging_v2/_helpers.py | 10 ++-- google/cloud/logging_v2/_http.py | 37 +++++++++++---- google/cloud/logging_v2/client.py | 21 +++++++-- google/cloud/logging_v2/entries.py | 6 +-- google/cloud/logging_v2/logger.py | 10 ++-- google/cloud/logging_v2/metric.py | 5 +- google/cloud/logging_v2/sink.py | 17 +++---- tests/system/test_system.py | 24 +++++++--- tests/unit/test__gapic.py | 4 +- tests/unit/test__http.py | 12 +++-- tests/unit/test_client.py | 9 +++- tests/unit/test_logger.py | 15 ++++-- tests/unit/test_metric.py | 20 ++++++-- tests/unit/test_sink.py | 73 ++++++++++++++++++++++------- 15 files changed, 189 insertions(+), 101 deletions(-) diff --git a/google/cloud/logging_v2/_gapic.py b/google/cloud/logging_v2/_gapic.py index 31bbfcbb5..7b033a89b 100644 --- a/google/cloud/logging_v2/_gapic.py +++ b/google/cloud/logging_v2/_gapic.py @@ -272,12 +272,7 @@ def sink_get(self, sink_name): ) def sink_update( - self, - sink_name, - filter_, - destination, - *, - unique_writer_identity=False, + self, sink_name, filter_, destination, *, unique_writer_identity=False, ): """Update a sink resource. @@ -304,11 +299,7 @@ def sink_update( protobuf to a dictionary). """ name = sink_name.split("/")[-1] # parse name out of full resoure name - sink_pb = LogSink( - name=name, - filter=filter_, - destination=destination, - ) + sink_pb = LogSink(name=name, filter=filter_, destination=destination,) request = UpdateSinkRequest( sink_name=sink_name, @@ -417,11 +408,7 @@ def metric_get(self, project, metric_name): ) def metric_update( - self, - project, - metric_name, - filter_, - description, + self, project, metric_name, filter_, description, ): """Update a metric resource. @@ -431,17 +418,13 @@ def metric_update( filter_ (str): the advanced logs filter expression defining the entries exported by the metric. description (str): description of the metric. - + Returns: The metric object returned from the API (converted from a protobuf to a dictionary). """ path = f"projects/{project}/metrics/{metric_name}" - metric_pb = LogMetric( - name=path, - filter=filter_, - description=description, - ) + metric_pb = LogMetric(name=path, filter=filter_, description=description,) metric_pb = self._gapic_api.update_log_metric( metric_name=path, metric=metric_pb ) diff --git a/google/cloud/logging_v2/_helpers.py b/google/cloud/logging_v2/_helpers.py index 0023c7a06..bed80d07e 100644 --- a/google/cloud/logging_v2/_helpers.py +++ b/google/cloud/logging_v2/_helpers.py @@ -68,7 +68,7 @@ def entry_from_resource(resource, client, loggers): resource (dict): One entry resource from API response. client (google.cloud.logging_v2.client.Client): Client that owns the log entry. - loggers (dict): + loggers (dict): A mapping of logger fullnames -> loggers. If the logger that owns the entry is not in ``loggers``, the entry will have a newly-created logger. @@ -95,12 +95,12 @@ def retrieve_metadata_server(metadata_key): See: https://cloud.google.com/compute/docs/storing-retrieving-metadata Args: - metadata_key (str): + metadata_key (str): Key of the metadata which will form the url. You can also supply query parameters after the metadata key. e.g. "tags?alt=json" - Returns: + Returns: str: The value of the metadata key returned by the metadata server. """ url = METADATA_URL + metadata_key @@ -124,7 +124,7 @@ def _normalize_severity(stdlib_level): Args: stdlib_level (int): 'levelno' from a :class:`logging.LogRecord` - + Returns: int: Corresponding Stackdriver severity. """ @@ -136,7 +136,7 @@ def _add_defaults_to_filter(filter_): Args: filter_ (str): The original filter expression - + Returns: str: sensible default filter string """ diff --git a/google/cloud/logging_v2/_http.py b/google/cloud/logging_v2/_http.py index 4c446e587..b7ee9c3aa 100644 --- a/google/cloud/logging_v2/_http.py +++ b/google/cloud/logging_v2/_http.py @@ -69,7 +69,13 @@ def __init__(self, client): self.api_request = client._connection.api_request def list_entries( - self, resource_names, *, filter_=None, order_by=None, page_size=None, page_token=None + self, + resource_names, + *, + filter_=None, + order_by=None, + page_size=None, + page_token=None, ): """Return a page of log entry resources. @@ -127,9 +133,18 @@ def list_entries( iterator._HTTP_METHOD = "POST" return iterator - def write_entries(self, entries, *, logger_name=None, resource=None, labels=None, partial_success=False, dry_run=False): + def write_entries( + self, + entries, + *, + logger_name=None, + resource=None, + labels=None, + partial_success=False, + dry_run=False, + ): """Log an entry resource via a POST request - + See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write @@ -155,7 +170,11 @@ def write_entries(self, entries, *, logger_name=None, resource=None, labels=None Useful for checking whether the logging API endpoints are working properly before sending valuable data. """ - data = {"entries": list(entries), "partialSuccess": partial_success, "dry_run": dry_run} + data = { + "entries": list(entries), + "partialSuccess": partial_success, + "dry_run": dry_run, + } if logger_name is not None: data["logName"] = logger_name @@ -438,7 +457,7 @@ def metric_update(self, project, metric_name, filter_, description): filter_ (str): the advanced logs filter expression defining the entries exported by the metric. description (str): description of the metric. - + Returns: dict: The returned (updated) resource. """ @@ -473,12 +492,12 @@ def _item_to_entry(iterator, resource, loggers): iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. resource (dict): Log entry JSON resource returned from the API. - loggers (Mapping[str, google.cloud.logging_v2.logger.Logger]): + loggers (Mapping[str, google.cloud.logging_v2.logger.Logger]): A mapping of logger fullnames -> loggers. If the logger that owns the entry is not in ``loggers``, the entry will have a newly-created logger. - Returns: + Returns: google.cloud.logging_v2.entries._BaseEntry: The next log entry in the page. """ return entry_from_resource(resource, iterator.client, loggers) @@ -492,7 +511,7 @@ def _item_to_sink(iterator, resource): is currently in use. resource (dict): Sink JSON resource returned from the API. - Returns: + Returns: google.cloud.logging_v2.sink.Sink: The next sink in the page. """ return Sink.from_api_repr(resource, iterator.client) @@ -506,7 +525,7 @@ def _item_to_metric(iterator, resource): is currently in use. resource (dict): Sink JSON resource returned from the API. - Returns: + Returns: google.cloud.logging_v2.metric.Metric: The next metric in the page. """ diff --git a/google/cloud/logging_v2/client.py b/google/cloud/logging_v2/client.py index 9633d0b7a..a74545b9d 100644 --- a/google/cloud/logging_v2/client.py +++ b/google/cloud/logging_v2/client.py @@ -189,14 +189,20 @@ def logger(self, name): Args: name (str): The name of the logger to be constructed. - - Returns: + + Returns: google.cloud.logging_v2.logger.Logger: Logger created with the current client. """ return Logger(name, client=self) def list_entries( - self, *, resource_names=None, filter_=None, order_by=None, page_size=None, page_token=None + self, + *, + resource_names=None, + filter_=None, + order_by=None, + page_size=None, + page_token=None, ): """Return a page of log entry resources. @@ -272,6 +278,7 @@ def list_sinks(self, *, parent=None, page_size=None, page_token=None): "organizations/[ORGANIZATION_ID]" "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]". + If not passed, defaults to the project bound to the API's client. page_size (Optional[int]): The maximum number of sinks in each page of results from this request. Non-positive values are ignored. Defaults to a @@ -287,7 +294,9 @@ def list_sinks(self, *, parent=None, page_size=None, page_token=None): """ if parent is None: parent = f"projects/{self.project}" - return self.sinks_api.list_sinks(parent=parent, page_size=page_size, page_token=page_token) + return self.sinks_api.list_sinks( + parent=parent, page_size=page_size, page_token=page_token + ) def metric(self, name, *, filter_=None, description=""): """Creates a metric bound to the current client. @@ -326,7 +335,9 @@ def list_metrics(self, *, page_size=None, page_token=None): Returns: Iterator[google.cloud.logging_v2.metric.Metric] """ - return self.metrics_api.list_metrics(self.project, page_size=page_size, page_token=page_token) + return self.metrics_api.list_metrics( + self.project, page_size=page_size, page_token=page_token + ) def get_default_handler(self, **kw): """Return the default logging handler based on the local environment. diff --git a/google/cloud/logging_v2/entries.py b/google/cloud/logging_v2/entries.py index d2faf0d04..bfed14d1d 100644 --- a/google/cloud/logging_v2/entries.py +++ b/google/cloud/logging_v2/entries.py @@ -50,7 +50,7 @@ def logger_name_from_path(path): Returns: str: Logger name parsed from ``path``. - + Raises: ValueError: If the ``path`` is ill-formed of if the project from ``path`` does not agree with the ``project`` passed in. @@ -147,12 +147,12 @@ def from_api_repr(cls, resource, client, *, loggers=None): Args: resource (dict): text entry resource representation returned from the API - client (google.cloud.logging_v2.client.Client): + client (google.cloud.logging_v2.client.Client): Client which holds credentials and project configuration. loggers (Optional[dict]): A mapping of logger fullnames -> loggers. If not passed, the entry will have a newly-created logger. - + Returns: google.cloud.logging.entries.LogEntry: Log entry parsed from ``resource``. """ diff --git a/google/cloud/logging_v2/logger.py b/google/cloud/logging_v2/logger.py index d60e5f58e..6f6234061 100644 --- a/google/cloud/logging_v2/logger.py +++ b/google/cloud/logging_v2/logger.py @@ -43,7 +43,6 @@ class Logger(object): - def __init__(self, name, client, *, labels=None): """Loggers represent named targets for log entries. @@ -91,7 +90,7 @@ def _require_client(self, client): client (Union[None, google.cloud.logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. - + Returns: google.cloud.logging_v2.client.Client: The client passed in or the currently bound client. @@ -254,7 +253,7 @@ def list_entries( By default, a 24 hour filter is applied. order_by (Optional[str]): One of :data:`~google.cloud.logging_v2.ASCENDING` or :data:`~google.cloud.logging_v2.DESCENDING`. - page_size (Optional[int]): + page_size (Optional[int]): Optional. The maximum number of entries in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. @@ -264,7 +263,7 @@ def list_entries( returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. - + Returns: Iterator[google.cloud.logging_v2.entries.LogEntry] """ @@ -288,7 +287,6 @@ def list_entries( class Batch(object): - def __init__(self, logger, client, resource=None): """Context manager: collect entries to log via a single API call. @@ -299,7 +297,7 @@ def __init__(self, logger, client, resource=None): the logger to which entries will be logged. client (google.cloud.logging_V2.client.Cilent): The client to use. - resource (Optional[google.cloud.logging_v2.resource.Resource]): + resource (Optional[google.cloud.logging_v2.resource.Resource]): Monitored resource of the batch, defaults to None, which requires that every entry should have a resource specified. Since the methods used to write diff --git a/google/cloud/logging_v2/metric.py b/google/cloud/logging_v2/metric.py index fd3e3b62c..8f9268ea7 100644 --- a/google/cloud/logging_v2/metric.py +++ b/google/cloud/logging_v2/metric.py @@ -18,7 +18,6 @@ class Metric(object): - def __init__(self, name, *, filter_=None, client=None, description=""): """Metrics represent named filters for log entries. @@ -84,7 +83,7 @@ def _require_client(self, client): client (Union[None, google.cloud.logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. - + Returns: google.cloud.logging_v2.client.Client: The client passed in or the currently bound client. @@ -132,7 +131,7 @@ def exists(self, *, client=None): else: return True - def reload(self,*, client=None): + def reload(self, *, client=None): """API call: sync local metric configuration via a GET request See diff --git a/google/cloud/logging_v2/sink.py b/google/cloud/logging_v2/sink.py index caa243746..83841c3dd 100644 --- a/google/cloud/logging_v2/sink.py +++ b/google/cloud/logging_v2/sink.py @@ -18,8 +18,9 @@ class Sink(object): - - def __init__(self, name, *, filter_=None, parent=None, destination=None, client=None): + def __init__( + self, name, *, filter_=None, parent=None, destination=None, client=None + ): """Sinks represent filtered exports for log entries. See @@ -43,7 +44,7 @@ def __init__(self, name, *, filter_=None, parent=None, destination=None, client= be refreshed via :meth:`reload`. client (Optional[google.cloud.logging_v2.client.Client]): A client which holds credentials and project configuration for the sink (which requires a project). - """ + """ self.name = name self.filter_ = filter_ self.destination = destination @@ -62,7 +63,7 @@ def parent(self): if self._parent is None: self._parent = f"projects/{self.client.project}" return self._parent - + @property def full_name(self): """Fully-qualified name used in sink APIs""" @@ -101,10 +102,10 @@ def from_api_repr(cls, resource, client, *, parent=None): "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]". Defaults to the project stored on the client. - + Returns: google.cloud.logging_v2.sink.Sink: Sink parsed from ``resource``. - + Raises: ValueError: if ``client`` is not ``None`` and the project from the resource does not agree with the project @@ -122,7 +123,7 @@ def _require_client(self, client): client (Union[None, google.cloud.logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. - + Returns: google.cloud.logging_v2.client.Client: The client passed in or the currently bound client. @@ -164,7 +165,7 @@ def exists(self, *, client=None): client (Optional[google.cloud.logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. - + Returns: bool: Boolean indicating existence of the sink. """ diff --git a/tests/system/test_system.py b/tests/system/test_system.py index 56a3dd7fc..10896adf7 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -318,7 +318,9 @@ def test_log_root_handler(self): def test_create_metric(self): METRIC_NAME = "test-create-metric%s" % (_RESOURCE_ID,) - metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric( + METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION + ) self.assertFalse(metric.exists()) retry = RetryErrors(Conflict) @@ -329,7 +331,9 @@ def test_create_metric(self): def test_list_metrics(self): METRIC_NAME = "test-list-metrics%s" % (_RESOURCE_ID,) - metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric( + METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION + ) self.assertFalse(metric.exists()) before_metrics = list(Config.CLIENT.list_metrics()) before_names = set(before.name for before in before_metrics) @@ -347,7 +351,9 @@ def test_list_metrics(self): def test_reload_metric(self): METRIC_NAME = "test-reload-metric%s" % (_RESOURCE_ID,) retry = RetryErrors(Conflict) - metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric( + METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION + ) self.assertFalse(metric.exists()) retry(metric.create)() self.to_delete.append(metric) @@ -364,7 +370,9 @@ def test_update_metric(self): retry = RetryErrors(Conflict) NEW_FILTER = "logName:other" NEW_DESCRIPTION = "updated" - metric = Config.CLIENT.metric(METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION) + metric = Config.CLIENT.metric( + METRIC_NAME, filter_=DEFAULT_FILTER, description=DEFAULT_DESCRIPTION + ) self.assertFalse(metric.exists()) retry(metric.create)() self.to_delete.append(metric) @@ -433,7 +441,9 @@ def test_create_sink_pubsub_topic(self): TOPIC_URI = "pubsub.googleapis.com/%s" % (topic_path,) retry = RetryErrors((Conflict, ServiceUnavailable), max_tries=10) - sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=TOPIC_URI) + sink = Config.CLIENT.sink( + SINK_NAME, filter_=DEFAULT_FILTER, destination=TOPIC_URI + ) self.assertFalse(sink.exists()) retry(sink.create)() @@ -516,7 +526,9 @@ def test_update_sink(self): bucket_uri = self._init_storage_bucket() dataset_uri = self._init_bigquery_dataset() UPDATED_FILTER = "logName:syslog" - sink = Config.CLIENT.sink(SINK_NAME, filter_=DEFAULT_FILTER, destination=bucket_uri) + sink = Config.CLIENT.sink( + SINK_NAME, filter_=DEFAULT_FILTER, destination=bucket_uri + ) self.assertFalse(sink.exists()) retry(sink.create)() self.to_delete.append(sink) diff --git a/tests/unit/test__gapic.py b/tests/unit/test__gapic.py index 59236fa65..75aa20d46 100644 --- a/tests/unit/test__gapic.py +++ b/tests/unit/test__gapic.py @@ -196,7 +196,9 @@ def test_list_sinks_with_options(self): type(client._gapic_api.transport.list_sinks), "__call__" ) as call: call.return_value = logging_v2.types.ListSinksResponse(sinks=[]) - result = client.list_sinks(self.PARENT_PATH, page_size=42, page_token="token") + result = client.list_sinks( + self.PARENT_PATH, page_size=42, page_token="token" + ) list(result) # Check the request diff --git a/tests/unit/test__http.py b/tests/unit/test__http.py index 1408769a5..23e018cab 100644 --- a/tests/unit/test__http.py +++ b/tests/unit/test__http.py @@ -314,7 +314,9 @@ def test_write_entries_multiple(self): client = _Client(conn) api = self._make_one(client) - api.write_entries([ENTRY1, ENTRY2], logger_name=LOG_NAME, resource=RESOURCE, labels=LABELS) + api.write_entries( + [ENTRY1, ENTRY2], logger_name=LOG_NAME, resource=RESOURCE, labels=LABELS + ) self.assertEqual(conn._called_with["method"], "POST") path = f"/{self.WRITE_ENTRIES_PATH}" @@ -419,7 +421,9 @@ def test_list_sinks_w_paging(self): client = _Client(conn) api = self._make_one(client) - iterator = api.list_sinks(self.PROJECT_PATH, page_size=PAGE_SIZE, page_token=TOKEN) + iterator = api.list_sinks( + self.PROJECT_PATH, page_size=PAGE_SIZE, page_token=TOKEN + ) sinks = list(iterator) token = iterator.next_page_token @@ -546,9 +550,7 @@ def test_sink_update_miss(self): api = self._make_one(client) with self.assertRaises(NotFound): - api.sink_update( - self.SINK_PATH, self.FILTER, self.DESTINATION_URI - ) + api.sink_update(self.SINK_PATH, self.FILTER, self.DESTINATION_URI) path = f"/projects/{self.PROJECT}/sinks/{self.SINK_NAME}" expected = { diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 7f762cf77..2d20b71c2 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -294,7 +294,10 @@ def test_list_entries_defaults(self): { "path": "/entries:list", "method": "POST", - "data": {"filter": "removed", "resourceNames": [f"projects/{self.PROJECT}"]}, + "data": { + "filter": "removed", + "resourceNames": [f"projects/{self.PROJECT}"], + }, }, ) # verify that default filter is 24 hours @@ -507,7 +510,9 @@ def test_sink_explicit(self): creds = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=creds) - sink = client.sink(self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI) + sink = client.sink( + self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI + ) self.assertIsInstance(sink, Sink) self.assertEqual(sink.name, self.SINK_NAME) self.assertEqual(sink.filter_, self.FILTER) diff --git a/tests/unit/test_logger.py b/tests/unit/test_logger.py index 05be18462..a96df5464 100644 --- a/tests/unit/test_logger.py +++ b/tests/unit/test_logger.py @@ -187,7 +187,7 @@ def test_log_text_defaults(self): self.assertEqual(api._write_entries_called_with, (ENTRIES, None, None, None)) def test_log_text_w_unicode_and_default_labels(self): - TEXT = u"TEXT" + TEXT = "TEXT" DEFAULT_LABELS = {"foo": "spam"} ENTRIES = [ { @@ -468,7 +468,8 @@ def test_delete_w_bound_client(self): logger.delete() self.assertEqual( - api._logger_delete_called_with, (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}") + api._logger_delete_called_with, + (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}"), ) def test_delete_w_alternate_client(self): @@ -480,7 +481,8 @@ def test_delete_w_alternate_client(self): logger.delete(client=client2) self.assertEqual( - api._logger_delete_called_with, (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}") + api._logger_delete_called_with, + (f"projects/{self.PROJECT}/logs/{self.LOGGER_NAME}"), ) def test_list_entries_defaults(self): @@ -514,7 +516,10 @@ def test_list_entries_defaults(self): { "path": "/entries:list", "method": "POST", - "data": {"filter": "removed", "resourceNames": [f"projects/{self.PROJECT}"]}, + "data": { + "filter": "removed", + "resourceNames": [f"projects/{self.PROJECT}"], + }, }, ) # verify that default filter is 24 hours @@ -1187,7 +1192,7 @@ def write_entries(self, entries, *, logger_name=None, resource=None, labels=None self._write_entries_called_with = (entries, logger_name, resource, labels) def logger_delete(self, logger_name): - self._logger_delete_called_with = (logger_name) + self._logger_delete_called_with = logger_name class _Client(object): diff --git a/tests/unit/test_metric.py b/tests/unit/test_metric.py index 217e5c46e..a71fd763f 100644 --- a/tests/unit/test_metric.py +++ b/tests/unit/test_metric.py @@ -46,7 +46,10 @@ def test_ctor_defaults(self): def test_ctor_explicit(self): client = _Client(self.PROJECT) metric = self._make_one( - self.METRIC_NAME, filter_=self.FILTER, client=client, description=self.DESCRIPTION + self.METRIC_NAME, + filter_=self.FILTER, + client=client, + description=self.DESCRIPTION, ) self.assertEqual(metric.name, self.METRIC_NAME) self.assertEqual(metric.filter_, self.FILTER) @@ -102,7 +105,10 @@ def test_create_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() metric = self._make_one( - self.METRIC_NAME, filter_=self.FILTER, client=client1, description=self.DESCRIPTION + self.METRIC_NAME, + filter_=self.FILTER, + client=client1, + description=self.DESCRIPTION, ) metric.create(client=client2) @@ -140,7 +146,10 @@ def test_reload_w_bound_client(self): api = client.metrics_api = _DummyMetricsAPI() api._metric_get_response = RESOURCE metric = self._make_one( - self.METRIC_NAME, filter_=self.FILTER, client=client, description=self.DESCRIPTION + self.METRIC_NAME, + filter_=self.FILTER, + client=client, + description=self.DESCRIPTION, ) metric.reload() @@ -185,7 +194,10 @@ def test_update_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.metrics_api = _DummyMetricsAPI() metric = self._make_one( - self.METRIC_NAME, filter_=self.FILTER, client=client1, description=self.DESCRIPTION + self.METRIC_NAME, + filter_=self.FILTER, + client=client1, + description=self.DESCRIPTION, ) metric.update(client=client2) diff --git a/tests/unit/test_sink.py b/tests/unit/test_sink.py index 390fd2142..cac604058 100644 --- a/tests/unit/test_sink.py +++ b/tests/unit/test_sink.py @@ -49,7 +49,11 @@ def test_ctor_explicit(self): client = _Client(self.PROJECT) parent = "folders/testFolder" sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, parent=parent, destination=self.DESTINATION_URI, client=client + self.SINK_NAME, + filter_=self.FILTER, + parent=parent, + destination=self.DESTINATION_URI, + client=client, ) self.assertEqual(sink.name, self.SINK_NAME) self.assertEqual(sink.filter_, self.FILTER) @@ -57,11 +61,11 @@ def test_ctor_explicit(self): self.assertIs(sink.client, client) self.assertEqual(sink.parent, parent) self.assertEqual(sink.full_name, f"{parent}/sinks/{self.SINK_NAME}") - self.assertEqual(sink.path, f"/{parent}/sinks/{self.SINK_NAME}" ) + self.assertEqual(sink.path, f"/{parent}/sinks/{self.SINK_NAME}") def test_from_api_repr_minimal(self): client = _Client(project=self.PROJECT) - + RESOURCE = {"name": self.SINK_NAME, "destination": self.DESTINATION_URI} klass = self._get_target_class() sink = klass.from_api_repr(RESOURCE, client=client) @@ -75,7 +79,7 @@ def test_from_api_repr_minimal(self): def test_from_api_repr_full(self): client = _Client(project=self.PROJECT) - parent="organizations/my_organization" + parent = "organizations/my_organization" RESOURCE = { "name": self.SINK_NAME, "destination": self.DESTINATION_URI, @@ -102,7 +106,10 @@ def test_create_w_bound_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client, ) sink.create() @@ -113,14 +120,23 @@ def test_create_w_bound_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_create_called_with, - (self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, False), + ( + self.PROJECT_PATH, + self.SINK_NAME, + self.FILTER, + self.DESTINATION_URI, + False, + ), ) def test_create_w_alternate_client(self): client1 = _Client(project=self.PROJECT) client2 = _Client(project=self.PROJECT) sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client1, ) api = client2.sinks_api = _DummySinksAPI() api._sink_create_response = { @@ -138,14 +154,23 @@ def test_create_w_alternate_client(self): self.assertEqual(sink.writer_identity, self.WRITER_IDENTITY) self.assertEqual( api._sink_create_called_with, - (self.PROJECT_PATH, self.SINK_NAME, self.FILTER, self.DESTINATION_URI, True), + ( + self.PROJECT_PATH, + self.SINK_NAME, + self.FILTER, + self.DESTINATION_URI, + True, + ), ) def test_exists_miss_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client, ) self.assertFalse(sink.exists()) @@ -163,7 +188,10 @@ def test_exists_hit_w_alternate_client(self): api = client2.sinks_api = _DummySinksAPI() api._sink_get_response = RESOURCE sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client1, ) self.assertTrue(sink.exists(client=client2)) @@ -217,7 +245,10 @@ def test_update_w_bound_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client, ) sink.update() @@ -242,7 +273,10 @@ def test_update_w_alternate_client(self): "writerIdentity": self.WRITER_IDENTITY, } sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client1, ) sink.update(client=client2, unique_writer_identity=True) @@ -260,7 +294,10 @@ def test_delete_w_bound_client(self): client = _Client(project=self.PROJECT) api = client.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client, ) sink.delete() @@ -272,7 +309,10 @@ def test_delete_w_alternate_client(self): client2 = _Client(project=self.PROJECT) api = client2.sinks_api = _DummySinksAPI() sink = self._make_one( - self.SINK_NAME, filter_=self.FILTER, destination=self.DESTINATION_URI, client=client1 + self.SINK_NAME, + filter_=self.FILTER, + destination=self.DESTINATION_URI, + client=client1, ) sink.delete(client=client2) @@ -301,7 +341,7 @@ def sink_create( def sink_get(self, sink_name): from google.cloud.exceptions import NotFound - self._sink_get_called_with = (sink_name) + self._sink_get_called_with = sink_name try: return self._sink_get_response except AttributeError: @@ -319,5 +359,4 @@ def sink_update( return self._sink_update_response def sink_delete(self, sink_name): - self._sink_delete_called_with = (sink_name) - + self._sink_delete_called_with = sink_name From 8466c62f459af6c2d89b411297df06988e45b522 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Wed, 11 Nov 2020 21:09:32 +0000 Subject: [PATCH 54/58] docs: make docstring fixes --- README.rst | 8 +- UPGRADING.md | 361 ++++++++++++++++++ docs/resource.rst | 6 + docs/snippets.py | 4 +- docs/stdlib-usage.rst | 10 +- docs/v2.rst | 1 + google/cloud/logging_v2/_gapic.py | 22 +- google/cloud/logging_v2/_helpers.py | 2 +- google/cloud/logging_v2/_http.py | 25 +- google/cloud/logging_v2/client.py | 18 +- google/cloud/logging_v2/entries.py | 4 +- google/cloud/logging_v2/handlers/_helpers.py | 16 +- .../cloud/logging_v2/handlers/app_engine.py | 40 +- .../logging_v2/handlers/container_engine.py | 22 +- google/cloud/logging_v2/handlers/handlers.py | 72 ++-- .../handlers/transports/background_thread.py | 174 ++++----- .../logging_v2/handlers/transports/base.py | 21 +- .../logging_v2/handlers/transports/sync.py | 2 +- google/cloud/logging_v2/logger.py | 54 +-- google/cloud/logging_v2/metric.py | 22 +- google/cloud/logging_v2/resource.py | 10 +- google/cloud/logging_v2/sink.py | 33 +- setup.py | 3 +- .../transports/test_background_thread.py | 29 +- tests/unit/handlers/transports/test_base.py | 2 +- tests/unit/test__http.py | 13 +- tests/unit/test_client.py | 9 +- tests/unit/test_logger.py | 3 +- 28 files changed, 658 insertions(+), 328 deletions(-) create mode 100644 UPGRADING.md create mode 100644 docs/resource.rst diff --git a/README.rst b/README.rst index a81f28e85..e5017619e 100644 --- a/README.rst +++ b/README.rst @@ -50,11 +50,11 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 +Python >= 3.6 -Deprecated Python Versions -^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support was removed on January 1, 2020. +Unsupported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python == 2.7. The last version of the library compatible with Python 2.7 is `google-cloud-logging==1.15.1`. Mac/Linux diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 000000000..2e50d1d53 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,361 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-logging` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-logging/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Primary Changes + +This section lists the most relevant breaking changes in `google.cloud.logging`. +See 'Changes in GAPIC Layer' if you were directly using `google.cloud.logging_v2.proto` or `google.cloud.logging_v2.gapic`. + + +### Optional arguments *must* be passed as keyword arguments. + +Optional arguments are keyword-only arguments and *must* be passed by name. +See [PEP 3102](https://www.python.org/dev/peps/pep-3102/). + +```Before +from google.cloud import logging + +logging_client = logging.Client() +logging_client.sink("my-sink") + +``` + +### Support for non-project resources + +Where appropriate, sinks, entries, and metrics can be associated with non-project resources like folders and organizations. + +Methods generally default to the project bound to the client. + +This resulted in breaking changes to some methods which now expect full resource paths instead of just the name, to +disambiguate the location of a resource. + + +#### `google.cloud.logging_v2.client.Client` + +`list_entries` accepts `resource_names`. + + +**After**: +```py +from google.cloud import logging_v2 + +client = logging_v2.Client() +client.list_entries(resource_names=["folders/myFolder", "projects/myProject"]) +client.list_entries() # defaults to project bound to client +``` + +`list_sinks` accept a `parent` parameter which is expected to be a single resource path. + + + +**After**: +```py +from google.cloud import logging_v2 + +client = loggign_v2.Client() +client.list_sinks(parent="folder/myFolder") +client.list_sinks() # defaults to project bound to client +``` + +#### `google.cloud.logging_v2.logger.Logger` + +`list_entries` accepts `resource_names`. + +**After**: +```py +from google.cloud import logging_v2 + +client = logging_v2.Client() +logger = logging_v2.Logger("myLog", client) +logger.list_entries(resource_names=["folders/myFolder", "projects/myProject"]) +logger.list_entries() # defaults to project bound to client +``` + + + +#### `google.cloud.loggign_v2.sinks.Sink` + + +> **WARNING**: Breaking change +* Sinks no longer have a `project` property. The attribute is replaced by `parent` (e.g. `projects/my-project`) + + +### `google.cloud.logging` is an alias for `google.cloud.logging_v2` + +`google.cloud.logging` serves as a default alias for `google.cloud.logging_v2`. + +All library code has been moved to `google.cloud.logging_v2`. + + + +## Changes in GAPIC layer + +This section describes changes in the GAPIC layer (produced by the generator) that previously lived in `google.cloud.logging_v2.proto` / `google.cloud.logging_v2.gapic`. +Most users are unlikely to have been using this layer directly. + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library and `libcst`. `libcst` is required to run the fixup script. + +```py +python3 -m pip install google-cloud-logging libcst +``` + +* The script `fixup_automl_{version}_keywords.py` is shipped with the library. It expects +an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_automl_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import automl + +project_id = "YOUR_PROJECT_ID" +model_id = "YOUR_MODEL_ID" + +client = automl.AutoMlClient() +# Get the full path of the model. +model_full_id = client.model_path(project_id, "us-central1", model_id) +response = client.deploy_model(model_full_id) +``` + + +**After:** +```py +from google.cloud import automl + +project_id = "YOUR_PROJECT_ID" +model_id = "YOUR_MODEL_ID" + +client = automl.AutoMlClient() +# Get the full path of the model. +model_full_id = client.model_path(project_id, "us-central1", model_id) +response = client.deploy_model(name=model_full_id) +``` + +### More Details + +In `google-cloud-logging<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def batch_predict( + self, + name, + input_config, + output_config, + params=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/2db5725bf898b544a0cf951e1694d3b0fce5eda3/google/cloud/automl/v1/prediction_service.proto#L86) specified by the API producer. + + +**After:** +```py +def batch_predict( + self, + request: prediction_service.BatchPredictRequest = None, + *, + name: str = None, + input_config: io.BatchPredictInputConfig = None, + output_config: io.BatchPredictOutputConfig = None, + params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.batch_predict( + request={ + "name": name, + "input_config": input_config, + "output_config": output_config, + "params": params, + } +) +``` + +```py +response = client.batch_predict( + name=name, + input_config=input_config, + output_config=output_config, + params=params, +) +``` + +This call is invalid because it mixes `request` with a keyword argument `params`. Executing this code +will result in an error. + +```py +response = client.batch_predict( + request={ + "name": name, + "input_config": input_config, + "output_config": output_config, + }, + params=params, +) +``` + + +The method `list_datasets` takes an argument `filter` instead of `filter_`. + +**Before** +```py +from google.cloud import automl + +project_id = "PROJECT_ID" + +client = automl.AutoMlClient() +project_location = client.location_path(project_id, "us-central1") + +# List all the datasets available in the region. +response = client.list_datasets(project_location, filter_="") +``` + +**After** +```py +from google.cloud import automl + +project_id = "PROJECT_ID" +client = automl.AutoMlClient() +# A resource that represents Google Cloud Platform location. +project_location = f"projects/{project_id}/locations/us-central1" + +# List all the datasets available in the region. +response = client.list_datasets(parent=project_location, filter="") +``` + +### Changes to v1beta1 Tables Client + +Optional arguments are now keyword-only arguments and *must* be passed by name. +See [PEP 3102](https://www.python.org/dev/peps/pep-3102/). + +***Before** +```py + def predict( + self, + inputs, + model=None, + model_name=None, + model_display_name=None, + feature_importance=False, + project=None, + region=None, + **kwargs + ): +``` + +**After** +```py + def predict( + self, + inputs, + *, + model=None, + model_name=None, + model_display_name=None, + feature_importance=False, + project=None, + region=None, + **kwargs, + ): +``` + +**kwargs passed to methods must be either (1) kwargs on the underlying method (`retry`, `timeout`, or `metadata`) or (2) attributes of the request object. + +The following call is valid because `filter` is an attribute of `automl_v1beta1.ListDatasetsRequest`. + +```py +from google.cloud import automl_v1beta1 as automl + +client = automl.TablesClient(project=project_id, region=compute_region) + +# List all the datasets available in the region by applying filter. +response = client.list_datasets(filter=filter) +``` + + + +## Enums and types + + +> **WARNING**: Breaking change + +The submodule `enums` and `types` have been removed. + +**Before:** +```py + +from google.cloud import automl + +gcs_source = automl.types.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) +deployment_state = automl.enums.Model.DeploymentState.DEPLOYED +``` + + +**After:** +```py +from google.cloud import automl + +gcs_source = automl.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) +deployment_state = automl.Model.DeploymentState.DEPLOYED +``` + + +## Resource Path Helper Methods + +The following resource name helpers have been removed. Please construct the strings manually. + +```py +from google.cloud import automl + +project = "my-project" +location = "us-central1" +dataset = "my-dataset" +model = "my-model" +annotation_spec = "test-annotation" +model_evaluation = "test-evaluation" + +# AutoMlClient +annotation_spec_path = f"projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" +location_path = f"projects/{project}/locations/{location}" +model_evaluation_path = f"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", + +# PredictionServiceClient +model_path = f"projects/{project}/locations/{location}/models/{model}" +# alternatively you can use `model_path` from AutoMlClient +model_path = automl.AutoMlClient.model_path(project_id, location, model_id) + +``` \ No newline at end of file diff --git a/docs/resource.rst b/docs/resource.rst new file mode 100644 index 000000000..c5de1a540 --- /dev/null +++ b/docs/resource.rst @@ -0,0 +1,6 @@ +Resource +========= + +.. automodule:: google.cloud.logging_v2.resource + :members: + :show-inheritance: diff --git a/docs/snippets.py b/docs/snippets.py index 7a8621347..da9ba9b2d 100644 --- a/docs/snippets.py +++ b/docs/snippets.py @@ -107,8 +107,8 @@ def client_list_entries_multi_project( """List entries via client across multiple projects.""" # [START client_list_entries_multi_project] - PROJECT_IDS = ["one-project", "another-project"] - for entry in client.list_entries(projects=PROJECT_IDS): # API call(s) + resource_names = ["projects/one-project", "projects/another-project"] + for entry in client.list_entries(resource_names=resource_names): # API call(s) do_something_with(entry) # [END client_list_entries_multi_project] diff --git a/docs/stdlib-usage.rst b/docs/stdlib-usage.rst index cba4080b5..375b41ddf 100644 --- a/docs/stdlib-usage.rst +++ b/docs/stdlib-usage.rst @@ -3,7 +3,7 @@ Integration with Python logging module It's possible to tie the Python :mod:`logging` module directly into Google Cloud Logging. To use it, -create a :class:`CloudLoggingHandler ` instance from your +create a :class:`CloudLoggingHandler ` instance from your Logging client. .. code-block:: python @@ -35,7 +35,7 @@ change it by providing a name to the handler: It is also possible to attach the handler to the root Python logger, so that for example a plain `logging.warn` call would be sent to Cloud Logging, as well as any other loggers created. However, you must avoid infinite recursion from the logging calls the client itself makes. A helper -method :meth:`setup_logging ` is provided to configure +method :meth:`setup_logging ` is provided to configure this automatically: .. code-block:: python @@ -61,10 +61,10 @@ Python logging handler transports ================================== The Python logging handler can use different transports. The default is -:class:`google.cloud.logging.handlers.BackgroundThreadTransport`. +:class:`google.cloud.logging_v2.handlers.BackgroundThreadTransport`. - 1. :class:`google.cloud.logging.handlers.BackgroundThreadTransport` this is the default. It writes + 1. :class:`google.cloud.logging_V2.handlers.BackgroundThreadTransport` this is the default. It writes entries on a background :class:`python.threading.Thread`. - 1. :class:`google.cloud.logging.handlers.SyncTransport` this handler does a direct API call on each + 1. :class:`google.cloud.logging_V2.handlers.SyncTransport` this handler does a direct API call on each logging statement to write the entry. diff --git a/docs/v2.rst b/docs/v2.rst index 567eabd7a..823097bd7 100644 --- a/docs/v2.rst +++ b/docs/v2.rst @@ -8,6 +8,7 @@ v2 logger entries metric + resource sink stdlib-usage handlers diff --git a/google/cloud/logging_v2/_gapic.py b/google/cloud/logging_v2/_gapic.py index 7b033a89b..7a6d70650 100644 --- a/google/cloud/logging_v2/_gapic.py +++ b/google/cloud/logging_v2/_gapic.py @@ -44,7 +44,13 @@ def __init__(self, gapic_api, client): self._client = client def list_entries( - self, resource_names, *, filter_="", order_by="", page_size=0, page_token=None + self, + resource_names, + *, + filter_=None, + order_by=None, + page_size=None, + page_token=None, ): """Return a page of log entry resources. @@ -61,8 +67,8 @@ def list_entries( filter_ (str): a filter expression. See https://cloud.google.com/logging/docs/view/advanced_filters - order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` - or :data:`~google.cloud.logging_v2.DESCENDING`. + order_by (str) One of :data:`~logging_v2.ASCENDING` + or :data:`~logging_v2.DESCENDING`. page_size (int): maximum number of entries to return, If not passed, defaults to a value set by the API. page_token (str): opaque marker for the next "page" of entries. If not @@ -70,7 +76,7 @@ def list_entries( entries. Returns: - Iterator[google.cloud.logging_v2.LogEntry] + Iterator[~logging_v2.LogEntry] """ # full resource names are expected by the API resource_names = resource_names @@ -188,7 +194,7 @@ def list_sinks(self, parent, *, page_size=0, page_token=None): sinks. Returns: - Iterator[google.cloud.logging_v2.Sink] + Iterator[~logging_v2.Sink] """ request = ListSinksRequest( parent=parent, page_size=page_size, page_token=page_token @@ -506,7 +512,7 @@ def make_logging_api(client): """Create an instance of the Logging API adapter. Args: - client (google.cloud.logging_v2.client.Client): The client + client (~logging_v2.client.Client): The client that holds configuration details. Returns: @@ -524,7 +530,7 @@ def make_metrics_api(client): """Create an instance of the Metrics API adapter. Args: - client (google.cloud.logging_v2.client.Client): The client + client (~logging_v2.client.Client): The client that holds configuration details. Returns: @@ -542,7 +548,7 @@ def make_sinks_api(client): """Create an instance of the Sinks API adapter. Args: - client (google.cloud.logging_v2.client.Client): The client + client (~logging_v2.client.Client): The client that holds configuration details. Returns: diff --git a/google/cloud/logging_v2/_helpers.py b/google/cloud/logging_v2/_helpers.py index bed80d07e..555d8ed8c 100644 --- a/google/cloud/logging_v2/_helpers.py +++ b/google/cloud/logging_v2/_helpers.py @@ -66,7 +66,7 @@ def entry_from_resource(resource, client, loggers): Args: resource (dict): One entry resource from API response. - client (google.cloud.logging_v2.client.Client): + client (~logging_v2.client.Client): Client that owns the log entry. loggers (dict): A mapping of logger fullnames -> loggers. If the logger diff --git a/google/cloud/logging_v2/_http.py b/google/cloud/logging_v2/_http.py index b7ee9c3aa..68bde346a 100644 --- a/google/cloud/logging_v2/_http.py +++ b/google/cloud/logging_v2/_http.py @@ -92,8 +92,8 @@ def list_entries( filter_ (str): a filter expression. See https://cloud.google.com/logging/docs/view/advanced_filters - order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` - or :data:`~google.cloud.logging_v2.DESCENDING`. + order_by (str) One of :data:`~logging_v2.ASCENDING` + or :data:`~logging_v2.DESCENDING`. page_size (int): maximum number of entries to return, If not passed, defaults to a value set by the API. page_token (str): opaque marker for the next "page" of entries. If not @@ -101,7 +101,7 @@ def list_entries( entries. Returns: - Iterator[google.cloud.logging_v2.LogEntry] + Iterator[~logging_v2.LogEntry] """ extra_params = {"resourceNames": resource_names} @@ -241,7 +241,7 @@ def list_sinks(self, parent, *, page_size=None, page_token=None): sinks. Returns: - Iterator[google.cloud.logging_v2.Sink] + Iterator[~logging_v2.Sink] """ extra_params = {} @@ -367,14 +367,7 @@ def sink_delete(self, sink_name): class _MetricsAPI(object): - """Helper mapping sink-related APIs. - - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The client used to make API requests. - """ + """Helper mapping sink-related APIs.""" def __init__(self, client): self._client = client @@ -492,13 +485,13 @@ def _item_to_entry(iterator, resource, loggers): iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. resource (dict): Log entry JSON resource returned from the API. - loggers (Mapping[str, google.cloud.logging_v2.logger.Logger]): + loggers (Mapping[str, logging_v2.logger.Logger]): A mapping of logger fullnames -> loggers. If the logger that owns the entry is not in ``loggers``, the entry will have a newly-created logger. Returns: - google.cloud.logging_v2.entries._BaseEntry: The next log entry in the page. + ~logging_v2.entries._BaseEntry: The next log entry in the page. """ return entry_from_resource(resource, iterator.client, loggers) @@ -512,7 +505,7 @@ def _item_to_sink(iterator, resource): resource (dict): Sink JSON resource returned from the API. Returns: - google.cloud.logging_v2.sink.Sink: The next sink in the page. + ~logging_v2.sink.Sink: The next sink in the page. """ return Sink.from_api_repr(resource, iterator.client) @@ -526,7 +519,7 @@ def _item_to_metric(iterator, resource): resource (dict): Sink JSON resource returned from the API. Returns: - google.cloud.logging_v2.metric.Metric: + ~logging_v2.metric.Metric: The next metric in the page. """ return Metric.from_api_repr(resource, iterator.client) diff --git a/google/cloud/logging_v2/client.py b/google/cloud/logging_v2/client.py index a74545b9d..ee65d288a 100644 --- a/google/cloud/logging_v2/client.py +++ b/google/cloud/logging_v2/client.py @@ -191,7 +191,7 @@ def logger(self, name): name (str): The name of the logger to be constructed. Returns: - google.cloud.logging_v2.logger.Logger: Logger created with the current client. + ~logging_v2.logger.Logger: Logger created with the current client. """ return Logger(name, client=self) @@ -221,8 +221,8 @@ def list_entries( filter_ (str): a filter expression. See https://cloud.google.com/logging/docs/view/advanced_filters - order_by (str) One of :data:`~google.cloud.logging_v2.ASCENDING` - or :data:`~google.cloud.logging_v2.DESCENDING`. + order_by (str) One of :data:`~logging_v2.ASCENDING` + or :data:`~logging_v2.DESCENDING`. page_size (int): maximum number of entries to return, If not passed, defaults to a value set by the API. page_token (str): opaque marker for the next "page" of entries. If not @@ -230,7 +230,7 @@ def list_entries( entries. Returns: - Iterable[LogEntry] + Iterator[~logging_v2.LogEntry] """ if resource_names is None: resource_names = [f"projects/{self.project}"] @@ -259,7 +259,7 @@ def sink(self, name, *, filter_=None, destination=None): :meth:`Sink.reload`. Returns: - google.cloud.logging_v2.sink.Sink: Sink created with the current client. + ~logging_v2.sink.Sink: Sink created with the current client. """ return Sink(name, filter_=filter_, destination=destination, client=self) @@ -290,7 +290,7 @@ def list_sinks(self, *, parent=None, page_size=None, page_token=None): token. Returns: - Iterator[google.cloud.logging_v2.sink.Sink] + Iterator[~logging_v2.sink.Sink] """ if parent is None: parent = f"projects/{self.project}" @@ -312,7 +312,7 @@ def metric(self, name, *, filter_=None, description=""): to be refreshed via :meth:`Metric.reload`. Returns: - google.cloud.logging.metric.Metric: Metric created with the current client. + ~logging_v2.metric.Metric: Metric created with the current client. """ return Metric(name, filter_=filter_, client=self, description=description) @@ -333,7 +333,7 @@ def list_metrics(self, *, page_size=None, page_token=None): token. Returns: - Iterator[google.cloud.logging_v2.metric.Metric] + Iterator[~logging_v2.metric.Metric] """ return self.metrics_api.list_metrics( self.project, page_size=page_size, page_token=page_token @@ -373,7 +373,7 @@ def setup_logging( Args: log_level (Optional[int]): Python logging log level. Defaults to :const:`logging.INFO`. - excluded_loggers (Optional[Tuple[]]): The loggers to not attach the + excluded_loggers (Optional[Tuple[str]]): The loggers to not attach the handler to. This will always include the loggers in the path of the logging client itself. diff --git a/google/cloud/logging_v2/entries.py b/google/cloud/logging_v2/entries.py index bfed14d1d..87e042018 100644 --- a/google/cloud/logging_v2/entries.py +++ b/google/cloud/logging_v2/entries.py @@ -112,7 +112,7 @@ def _int_or_none(value): the entry was emitted. operation (Optional[dict]): Additional information about a potentially long-running operation associated with the log entry. - logger (google.cloud.logging_v2.logger.Logger): the logger used + logger (logging_v2.logger.Logger): the logger used to write the entry. """ @@ -147,7 +147,7 @@ def from_api_repr(cls, resource, client, *, loggers=None): Args: resource (dict): text entry resource representation returned from the API - client (google.cloud.logging_v2.client.Client): + client (~logging_v2.client.Client): Client which holds credentials and project configuration. loggers (Optional[dict]): A mapping of logger fullnames -> loggers. If not diff --git a/google/cloud/logging_v2/handlers/_helpers.py b/google/cloud/logging_v2/handlers/_helpers.py index e4408a448..3150e46c3 100644 --- a/google/cloud/logging_v2/handlers/_helpers.py +++ b/google/cloud/logging_v2/handlers/_helpers.py @@ -31,8 +31,8 @@ def format_stackdriver_json(record, message): """Helper to format a LogRecord in in Stackdriver fluentd format. - :rtype: str - :returns: JSON str to be written to the log file. + Returns: + str: JSON str to be written to the log file. """ subsecond, second = math.modf(record.created) @@ -49,8 +49,8 @@ def format_stackdriver_json(record, message): def get_trace_id_from_flask(): """Get trace_id from flask request headers. - :rtype: str - :returns: TraceID in HTTP request headers. + Returns: + str: TraceID in HTTP request headers. """ if flask is None or not flask.request: return None @@ -68,8 +68,8 @@ def get_trace_id_from_flask(): def get_trace_id_from_django(): """Get trace_id from django request headers. - :rtype: str - :returns: TraceID in HTTP request headers. + Returns: + str: TraceID in HTTP request headers. """ request = _get_django_request() @@ -88,8 +88,8 @@ def get_trace_id_from_django(): def get_trace_id(): """Helper to get trace_id from web application request header. - :rtype: str - :returns: TraceID in HTTP request headers. + Returns: + str: TraceID in HTTP request headers. """ checkers = ( get_trace_id_from_django, diff --git a/google/cloud/logging_v2/handlers/app_engine.py b/google/cloud/logging_v2/handlers/app_engine.py index a269b3e61..fed9bd205 100644 --- a/google/cloud/logging_v2/handlers/app_engine.py +++ b/google/cloud/logging_v2/handlers/app_engine.py @@ -36,28 +36,28 @@ class AppEngineHandler(logging.StreamHandler): - """A logging handler that sends App Engine-formatted logs to Stackdriver. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The authenticated Google Cloud Logging client for this - handler to use. - - :type transport: :class:`type` - :param transport: The transport class. It should be a subclass - of :class:`.Transport`. If unspecified, - :class:`.BackgroundThreadTransport` will be used. - - :type stream: file-like object - :param stream: (optional) stream to be used by the handler. - """ + """A logging handler that sends App Engine-formatted logs to Stackdriver.""" def __init__( self, client, + *, name=_DEFAULT_GAE_LOGGER_NAME, transport=BackgroundThreadTransport, stream=None, ): + """ + Args: + client (~logging_v2.client.Client): The authenticated + Google Cloud Logging client for this handler to use. + name (Optional[str]): Name for the logger. + transport (Optional[~logging_v2.transports.Transport]): + The transport class. It should be a subclass + of :class:`.Transport`. If unspecified, + :class:`.BackgroundThreadTransport` will be used. + stream (Optional[IO]): Stream to be used by the handler. + + """ super(AppEngineHandler, self).__init__(stream) self.name = name self.client = client @@ -72,8 +72,8 @@ def __init__( def get_gae_resource(self): """Return the GAE resource using the environment variables. - :rtype: :class:`~google.cloud.logging.resource.Resource` - :returns: Monitored resource for GAE. + Returns: + google.cloud.logging_v2.resource.Resource: Monitored resource for GAE. """ gae_resource = Resource( type="gae_app", @@ -91,8 +91,8 @@ def get_gae_labels(self): If the trace ID can be detected, it will be included as a label. Currently, no other labels are included. - :rtype: dict - :returns: Labels for GAE app. + Returns: + dict: Labels for GAE app. """ gae_labels = {} @@ -109,8 +109,8 @@ def emit(self, record): See https://docs.python.org/2/library/logging.html#handler-objects - :type record: :class:`logging.LogRecord` - :param record: The record to be logged. + Args: + record (logging.LogRecord): The record to be logged. """ message = super(AppEngineHandler, self).format(record) gae_labels = self.get_gae_labels() diff --git a/google/cloud/logging_v2/handlers/container_engine.py b/google/cloud/logging_v2/handlers/container_engine.py index f2356fe96..a4bd0f848 100644 --- a/google/cloud/logging_v2/handlers/container_engine.py +++ b/google/cloud/logging_v2/handlers/container_engine.py @@ -29,26 +29,26 @@ class ContainerEngineHandler(logging.StreamHandler): This handler is written to format messages for the Google Container Engine (GKE) fluentd plugin, so that metadata such as log level are properly set. - - :type name: str - :param name: (optional) the name of the custom log in Cloud Logging. - - :type stream: file-like object - :param stream: (optional) stream to be used by the handler. """ - def __init__(self, name=None, stream=None): + def __init__(self, *, name=None, stream=None): + """ + Args: + name (Optional[str]): The name of the custom log in Cloud Logging. + stream (Optional[IO]): Stream to be used by the handler. + + """ super(ContainerEngineHandler, self).__init__(stream=stream) self.name = name def format(self, record): """Format the message into JSON expected by fluentd. - :type record: :class:`~logging.LogRecord` - :param record: the log record + Args: + record (logging.LogRecord): The log record. - :rtype: str - :returns: A JSON string formatted for GKE fluentd. + Returns: + str: A JSON string formatted for GKE fluentd. """ message = super(ContainerEngineHandler, self).format(record) return format_stackdriver_json(record, message) diff --git a/google/cloud/logging_v2/handlers/handlers.py b/google/cloud/logging_v2/handlers/handlers.py index e7c781d05..2d79c7f8a 100644 --- a/google/cloud/logging_v2/handlers/handlers.py +++ b/google/cloud/logging_v2/handlers/handlers.py @@ -35,32 +35,6 @@ class CloudLoggingHandler(logging.StreamHandler): This handler supports both an asynchronous and synchronous transport. - :type client: :class:`google.cloud.logging.client.Client` - :param client: the authenticated Google Cloud Logging client for this - handler to use - - :type name: str - :param name: the name of the custom log in Cloud Logging. Defaults - to 'python'. The name of the Python logger will be represented - in the ``python_logger`` field. - - :type transport: :class:`type` - :param transport: Class for creating new transport objects. It should - extend from the base :class:`.Transport` type and - implement :meth`.Transport.send`. Defaults to - :class:`.BackgroundThreadTransport`. The other - option is :class:`.SyncTransport`. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry, defaults - to the global resource type. - - :type labels: dict - :param labels: (Optional) Mapping of labels for the entry. - - :type stream: file-like object - :param stream: (optional) stream to be used by the handler. - Example: .. code-block:: python @@ -82,12 +56,33 @@ class CloudLoggingHandler(logging.StreamHandler): def __init__( self, client, + *, name=DEFAULT_LOGGER_NAME, transport=BackgroundThreadTransport, resource=_GLOBAL_RESOURCE, labels=None, stream=None, ): + """ + Args: + client (~logging_v2.client.Client): + The authenticated Google Cloud Logging client for this + handler to use. + name (str): the name of the custom log in Cloud Logging. + Defaults to 'python'. The name of the Python logger will be represented + in the ``python_logger`` field. + transport (~logging_v2.transports.Transport): + Class for creating new transport objects. It should + extend from the base :class:`.Transport` type and + implement :meth`.Transport.send`. Defaults to + :class:`.BackgroundThreadTransport`. The other + option is :class:`.SyncTransport`. + resource (~logging_v2.resource.Resource): + Resource for this Handler. Defaults to ``GLOBAL_RESOURCE``. + labels (Optional[dict]): Monitored resource of the entry, defaults + to the global resource type. + stream (Optional[IO]): Stream to be used by the handler. + """ super(CloudLoggingHandler, self).__init__(stream) self.name = name self.client = client @@ -102,33 +97,21 @@ def emit(self, record): See https://docs.python.org/2/library/logging.html#handler-objects - :type record: :class:`logging.LogRecord` - :param record: The record to be logged. + Args: + record (logging.LogRecord): The record to be logged. """ message = super(CloudLoggingHandler, self).format(record) self.transport.send(record, message, resource=self.resource, labels=self.labels) def setup_logging( - handler, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, log_level=logging.INFO + handler, *, excluded_loggers=EXCLUDED_LOGGER_DEFAULTS, log_level=logging.INFO ): """Attach a logging handler to the Python root logger Excludes loggers that this library itself uses to avoid infinite recursion. - :type handler: :class:`logging.handler` - :param handler: the handler to attach to the global handler - - :type excluded_loggers: tuple - :param excluded_loggers: (Optional) The loggers to not attach the handler - to. This will always include the loggers in the - path of the logging client itself. - - :type log_level: int - :param log_level: (Optional) Python logging log level. Defaults to - :const:`logging.INFO`. - Example: .. code-block:: python @@ -144,6 +127,13 @@ def setup_logging( logging.error('bad news') # API call + Args: + handler (logging.handler): the handler to attach to the global handler + excluded_loggers (Optional[Tuple[str]]): The loggers to not attach the handler + to. This will always include the loggers in the + path of the logging client itself. + log_level (Optional[int]): Python logging log level. Defaults to + :const:`logging.INFO`. """ all_excluded_loggers = set(excluded_loggers + EXCLUDED_LOGGER_DEFAULTS) logger = logging.getLogger() diff --git a/google/cloud/logging_v2/handlers/transports/background_thread.py b/google/cloud/logging_v2/handlers/transports/background_thread.py index 986f88f43..873fa452d 100644 --- a/google/cloud/logging_v2/handlers/transports/background_thread.py +++ b/google/cloud/logging_v2/handlers/transports/background_thread.py @@ -39,26 +39,22 @@ _LOGGER = logging.getLogger(__name__) -def _get_many(queue_, max_items=None, max_latency=0): +def _get_many(queue_, *, max_items=None, max_latency=0): """Get multiple items from a Queue. Gets at least one (blocking) and at most ``max_items`` items (non-blocking) from a given Queue. Does not mark the items as done. - :type queue_: :class:`~queue.Queue` - :param queue_: The Queue to get items from. + Args: + queue_ (queue.Queue): The Queue to get items from. + max_items (Optional[int]): The maximum number of items to get. + If ``None``, then all available items in the queue are returned. + max_latency (Optional[float]): The maximum number of seconds to wait + for more than one item from a queue. This number includes + the time required to retrieve the first item. - :type max_items: int - :param max_items: The maximum number of items to get. If ``None``, then all - available items in the queue are returned. - - :type max_latency: float - :param max_latency: The maximum number of seconds to wait for more than one - item from a queue. This number includes the time required to retrieve - the first item. - - :rtype: list - :returns: items retrieved from the queue. + Returns: + list: items retrieved from the queue """ start = time.time() # Always return at least one item. @@ -74,34 +70,30 @@ def _get_many(queue_, max_items=None, max_latency=0): class _Worker(object): - """A background thread that writes batches of log entries. - - :type cloud_logger: :class:`~google.cloud.logging.logger.Logger` - :param cloud_logger: The logger to send entries to. - - :type grace_period: float - :param grace_period: The amount of time to wait for pending logs to - be submitted when the process is shutting down. - - :type max_batch_size: int - :param max_batch_size: The maximum number of items to send at a time - in the background thread. - - :type max_latency: float - :param max_latency: The amount of time to wait for new logs before - sending a new batch. It is strongly recommended to keep this smaller - than the grace_period. This means this is effectively the longest - amount of time the background thread will hold onto log entries - before sending them to the server. - """ + """A background thread that writes batches of log entries.""" def __init__( self, cloud_logger, + *, grace_period=_DEFAULT_GRACE_PERIOD, max_batch_size=_DEFAULT_MAX_BATCH_SIZE, max_latency=_DEFAULT_MAX_LATENCY, ): + """ + Args: + cloud_logger (logging_v2.logger.Logger): + The logger to send entries to. + grace_period (Optional[float]): The amount of time to wait for pending logs to + be submitted when the process is shutting down. + max_batch (Optional[int]): The maximum number of items to send at a time + in the background thread. + max_latency (Optional[float]): The amount of time to wait for new logs before + sending a new batch. It is strongly recommended to keep this smaller + than the grace_period. This means this is effectively the longest + amount of time the background thread will hold onto log entries + before sending them to the server. + """ self._cloud_logger = cloud_logger self._grace_period = grace_period self._max_batch_size = max_batch_size @@ -172,7 +164,7 @@ def start(self): self._thread.start() atexit.register(self._main_thread_terminated) - def stop(self, grace_period=None): + def stop(self, *, grace_period=None): """Signals the background thread to stop. This does not terminate the background thread. It simply queues the @@ -181,13 +173,13 @@ def stop(self, grace_period=None): work. The ``grace_period`` parameter will give the background thread some time to finish processing before this function returns. - :type grace_period: float - :param grace_period: If specified, this method will block up to this - many seconds to allow the background thread to finish work before - returning. + Args: + grace_period (Optional[float]): If specified, this method will + block up to this many seconds to allow the background thread + to finish work before returning. - :rtype: bool - :returns: True if the thread terminated. False if the thread is still + Returns: + bool: True if the thread terminated. False if the thread is still running. """ if not self.is_alive: @@ -222,7 +214,7 @@ def _main_thread_terminated(self): file=sys.stderr, ) - if self.stop(self._grace_period): + if self.stop(grace_period=self._grace_period): print("Sent all pending logs.", file=sys.stderr) else: print( @@ -231,29 +223,20 @@ def _main_thread_terminated(self): ) def enqueue( - self, record, message, resource=None, labels=None, trace=None, span_id=None + self, record, message, *, resource=None, labels=None, trace=None, span_id=None ): """Queues a log entry to be written by the background thread. - :type record: :class:`logging.LogRecord` - :param record: Python log record that the handler was called with. - - :type message: str - :param message: The message from the ``LogRecord`` after being + Args: + record (logging.LogRecord): Python log record that the handler was called with. + message (str): The message from the ``LogRecord`` after being formatted by the associated log formatters. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry - - :type labels: dict - :param labels: (Optional) Mapping of labels for the entry. - - :type trace: str - :param trace: (optional) traceid to apply to the logging entry. - - :type span_id: str - :param span_id: (optional) span_id within the trace for the log entry. - Specify the trace parameter if span_id is set. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the entry + labels (Optional[dict]): Mapping of labels for the entry. + trace (Optional[str]): TraceID to apply to the logging entry. + span_id (Optional[str]): Span_id within the trace for the log entry. + Specify the trace parameter if span_id is set. """ queue_entry = { "info": {"message": message, "python_logger": record.name}, @@ -272,38 +255,32 @@ def flush(self): class BackgroundThreadTransport(Transport): - """Asynchronous transport that uses a background thread. - - :type client: :class:`~google.cloud.logging.client.Client` - :param client: The Logging client. - - :type name: str - :param name: the name of the logger. - - :type grace_period: float - :param grace_period: The amount of time to wait for pending logs to - be submitted when the process is shutting down. - - :type batch_size: int - :param batch_size: The maximum number of items to send at a time in the - background thread. - - :type max_latency: float - :param max_latency: The amount of time to wait for new logs before - sending a new batch. It is strongly recommended to keep this smaller - than the grace_period. This means this is effectively the longest - amount of time the background thread will hold onto log entries - before sending them to the server. - """ + """Asynchronous transport that uses a background thread.""" def __init__( self, client, name, + *, grace_period=_DEFAULT_GRACE_PERIOD, batch_size=_DEFAULT_MAX_BATCH_SIZE, max_latency=_DEFAULT_MAX_LATENCY, ): + """ + Args: + client (~logging_v2.client.Client): + The Logging client. + name (str): The name of the lgoger. + grace_period (Optional[float]): The amount of time to wait for pending logs to + be submitted when the process is shutting down. + batch_size (Optional[int]): The maximum number of items to send at a time in the + background thread. + max_latency (Optional[float]): The amount of time to wait for new logs before + sending a new batch. It is strongly recommended to keep this smaller + than the grace_period. This means this is effectively the longest + amount of time the background thread will hold onto log entries + before sending them to the server. + """ self.client = client logger = self.client.logger(name) self.worker = _Worker( @@ -319,25 +296,16 @@ def send( ): """Overrides Transport.send(). - :type record: :class:`logging.LogRecord` - :param record: Python log record that the handler was called with. - - :type message: str - :param message: The message from the ``LogRecord`` after being - formatted by the associated log formatters. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry. - - :type labels: dict - :param labels: (Optional) Mapping of labels for the entry. - - :type trace: str - :param trace: (optional) traceid to apply to the logging entry. - - :type span_id: str - :param span_id: (optional) span_id within the trace for the log entry. - Specify the trace parameter if span_id is set. + Args: + record (logging.LogRecord): Python log record that the handler was called with. + message (str): The message from the ``LogRecord`` after being + formatted by the associated log formatters. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the entry. + labels (Optional[dict]): Mapping of labels for the entry. + trace (Optional[str]): TraceID to apply to the logging entry. + span_id (Optional[str]): span_id within the trace for the log entry. + Specify the trace parameter if span_id is set. """ self.worker.enqueue( record, diff --git a/google/cloud/logging_v2/handlers/transports/base.py b/google/cloud/logging_v2/handlers/transports/base.py index 7e24cc020..c94c7ad70 100644 --- a/google/cloud/logging_v2/handlers/transports/base.py +++ b/google/cloud/logging_v2/handlers/transports/base.py @@ -23,22 +23,17 @@ class Transport(object): """ def send( - self, record, message, resource=None, labels=None, trace=None, span_id=None + self, record, message, *, resource=None, labels=None, trace=None, span_id=None ): """Transport send to be implemented by subclasses. - :type record: :class:`logging.LogRecord` - :param record: Python log record that the handler was called with. - - :type message: str - :param message: The message from the ``LogRecord`` after being - formatted by the associated log formatters. - - :type resource: :class:`~google.cloud.logging.resource.Resource` - :param resource: (Optional) Monitored resource of the entry. - - :type labels: dict - :param labels: (Optional) Mapping of labels for the entry. + Args: + record (logging.LogRecord): Python log record that the handler was called with. + message (str): The message from the ``LogRecord`` after being + formatted by the associated log formatters. + resource (Optional[google.cloud.logging_v2.resource.Resource]): + Monitored resource of the entry. + labels (Optional[dict]): Mapping of labels for the entry. """ raise NotImplementedError diff --git a/google/cloud/logging_v2/handlers/transports/sync.py b/google/cloud/logging_v2/handlers/transports/sync.py index f8e4a05bd..550c29391 100644 --- a/google/cloud/logging_v2/handlers/transports/sync.py +++ b/google/cloud/logging_v2/handlers/transports/sync.py @@ -40,7 +40,7 @@ def send( Python log record that the handler was called with. message (str): The message from the ``LogRecord`` after being formatted by the associated log formatters. - resource (Optional[google.cloud.logging_v2.resource.Resource]): + resource (Optional[~logging_v2.resource.Resource]): Monitored resource of the entry. labels (Optional[dict]): Mapping of labels for the entry. """ diff --git a/google/cloud/logging_v2/logger.py b/google/cloud/logging_v2/logger.py index 6f6234061..89202bcbd 100644 --- a/google/cloud/logging_v2/logger.py +++ b/google/cloud/logging_v2/logger.py @@ -51,7 +51,7 @@ def __init__(self, name, client, *, labels=None): Args: name (str): The name of the logger. - client (google.cloud.logging_v2.client.Client): + client (~logging_v2.client.Client): A client which holds credentials and project configuration for the logger (which requires a project). labels (Optional[dict]): Mapping of default labels for entries written @@ -87,12 +87,12 @@ def _require_client(self, client): """Check client or verify over-ride. Also sets ``parent``. Args: - client (Union[None, google.cloud.logging_v2.client.Client]): + client (Union[None, ~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. Returns: - google.cloud.logging_v2.client.Client: The client passed in + ~logging_v2.client.Client: The client passed in or the currently bound client. """ if client is None: @@ -103,7 +103,7 @@ def batch(self, *, client=None): """Return a batch to use as a context manager. Args: - client (Union[None, google.cloud.logging_v2.client.Client]): + client (Union[None, ~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. @@ -137,11 +137,11 @@ def log_empty(self, *, client=None, **kw): https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. kw (Optional[dict]): additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self._do_log(client, LogEntry, **kw) @@ -153,11 +153,11 @@ def log_text(self, text, *, client=None, **kw): Args: text (str): the log message - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. kw (Optional[dict]): additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self._do_log(client, TextEntry, text, **kw) @@ -169,11 +169,11 @@ def log_struct(self, info, *, client=None, **kw): Args: info (dict): the log entry information - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. kw (Optional[dict]): additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self._do_log(client, StructEntry, info, **kw) @@ -186,15 +186,15 @@ def log_proto(self, message, *, client=None, **kw): Args: message (google.protobuf.message.Message): The protobuf message to be logged. - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. kw (Optional[dict]): additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self._do_log(client, ProtobufEntry, message, **kw) - def delete(self, logger_name=None, client=None): + def delete(self, logger_name=None, *, client=None): """Delete all entries in a logger via a DELETE request See @@ -214,7 +214,7 @@ def delete(self, logger_name=None, client=None): ``"projects/my-project-id/logs/syslog"``, ``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``. If not passed, defaults to the project bound to the client. - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current logger. """ @@ -251,8 +251,8 @@ def list_entries( filter_ (Optional[str]): a filter expression. See https://cloud.google.com/logging/docs/view/advanced_filters By default, a 24 hour filter is applied. - order_by (Optional[str]): One of :data:`~google.cloud.logging_v2.ASCENDING` - or :data:`~google.cloud.logging_v2.DESCENDING`. + order_by (Optional[str]): One of :data:`~logging_v2.ASCENDING` + or :data:`~logging_v2.DESCENDING`. page_size (Optional[int]): Optional. The maximum number of entries in each page of results from this request. Non-positive values are ignored. Defaults @@ -265,7 +265,7 @@ def list_entries( the token. Returns: - Iterator[google.cloud.logging_v2.entries.LogEntry] + Iterator[~logging_v2.entries.LogEntry] """ if resource_names is None: @@ -287,17 +287,17 @@ def list_entries( class Batch(object): - def __init__(self, logger, client, resource=None): + def __init__(self, logger, client, *, resource=None): """Context manager: collect entries to log via a single API call. Helper returned by :meth:`Logger.batch` Args: - logger (google.cloud.logging_v2.logger.Logger): + logger (logging_v2.logger.Logger): the logger to which entries will be logged. - client (google.cloud.logging_V2.client.Cilent): + client (~logging_V2.client.Cilent): The client to use. - resource (Optional[google.cloud.logging_v2.resource.Resource]): + resource (Optional[~logging_v2.resource.Resource]): Monitored resource of the batch, defaults to None, which requires that every entry should have a resource specified. Since the methods used to write @@ -323,7 +323,7 @@ def log_empty(self, **kw): Args: kw (Optional[dict]): Additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self.entries.append(LogEntry(**kw)) @@ -333,7 +333,7 @@ def log_text(self, text, **kw): Args: text (str): the text entry kw (Optional[dict]): Additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self.entries.append(TextEntry(payload=text, **kw)) @@ -343,7 +343,7 @@ def log_struct(self, info, **kw): Args: info (dict): The struct entry, kw (Optional[dict]): Additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self.entries.append(StructEntry(payload=info, **kw)) @@ -353,15 +353,15 @@ def log_proto(self, message, **kw): Args: message (google.protobuf.Message): The protobuf entry. kw (Optional[dict]): Additional keyword arguments for the entry. - See :class:`~google.cloud.logging_v2.entries.LogEntry`. + See :class:`~logging_v2.entries.LogEntry`. """ self.entries.append(ProtobufEntry(payload=message, **kw)) - def commit(self, client=None): + def commit(self, *, client=None): """Send saved log entries as a single API call. Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current batch. """ diff --git a/google/cloud/logging_v2/metric.py b/google/cloud/logging_v2/metric.py index 8f9268ea7..2959bacc2 100644 --- a/google/cloud/logging_v2/metric.py +++ b/google/cloud/logging_v2/metric.py @@ -18,18 +18,20 @@ class Metric(object): - def __init__(self, name, *, filter_=None, client=None, description=""): - """Metrics represent named filters for log entries. + """Metrics represent named filters for log entries. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics + """ + def __init__(self, name, *, filter_=None, client=None, description=""): + """ Args: name (str): The name of the metric. filter_ (str): the advanced logs filter expression defining the entries tracked by the metric. If not passed, the instance should already exist, to be refreshed via :meth:`reload`. - client (Optional[google.cloud.logging_v2.client.Client]): A client which holds + client (Optional[~logging_v2.client.Client]): A client which holds credentials and project configuration for the sink (which requires a project). description (Optional[str]): An optional description of the metric. @@ -65,7 +67,7 @@ def from_api_repr(cls, resource, client): Args: resource (dict): metric resource representation returned from the API - client (google.cloud.logging_v2.client.Client): Client which holds + client (~logging_v2.client.Client): Client which holds credentials and project configuration for the sink. Returns: @@ -80,7 +82,7 @@ def _require_client(self, client): """Check client or verify over-ride. Also sets ``parent``. Args: - client (Union[None, google.cloud.logging_v2.client.Client]): + client (Union[None, ~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. @@ -99,7 +101,7 @@ def create(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ @@ -115,7 +117,7 @@ def exists(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. @@ -138,7 +140,7 @@ def reload(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ @@ -154,7 +156,7 @@ def update(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ @@ -170,7 +172,7 @@ def delete(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ diff --git a/google/cloud/logging_v2/resource.py b/google/cloud/logging_v2/resource.py index 637b795f7..eed5ca5fa 100644 --- a/google/cloud/logging_v2/resource.py +++ b/google/cloud/logging_v2/resource.py @@ -20,12 +20,10 @@ class Resource(collections.namedtuple("Resource", "type labels")): """A monitored resource identified by specifying values for all labels. - :type type: str - :param type: The resource type name. - - :type labels: dict - :param labels: A mapping from label names to values for all labels - enumerated in the associated :class:`ResourceDescriptor`. + Attributes: + type (str): The resource type name. + labels (dict): A mapping from label names to values for all labels + enumerated in the associated :class:`ResourceDescriptor`. """ __slots__ = () diff --git a/google/cloud/logging_v2/sink.py b/google/cloud/logging_v2/sink.py index 83841c3dd..43dd2208c 100644 --- a/google/cloud/logging_v2/sink.py +++ b/google/cloud/logging_v2/sink.py @@ -18,14 +18,15 @@ class Sink(object): + """Sinks represent filtered exports for log entries. + + See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks + """ + def __init__( self, name, *, filter_=None, parent=None, destination=None, client=None ): - """Sinks represent filtered exports for log entries. - - See - https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks - + """ Args: name (str): The name of the sink. parent(Optional[str]): The resource in which to create the sink: @@ -36,13 +37,14 @@ def __init__( "organizations/[ORGANIZATION_ID]" "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]". + Defaults to the project stored on the client. filter_ (Optional[str]): The advanced logs filter expression defining the entries exported by the sink. destination (Optional[str]): Destination URI for the entries exported by the sink. If not passed, the instance should already exist, to be refreshed via :meth:`reload`. - client (Optional[google.cloud.logging_v2.client.Client]): A client which holds + client (Optional[~logging_v2.client.Client]): A client which holds credentials and project configuration for the sink (which requires a project). """ self.name = name @@ -91,7 +93,7 @@ def from_api_repr(cls, resource, client, *, parent=None): Args: resource (dict): sink resource representation returned from the API - client (google.cloud.logging_v2.client.Client): Client which holds + client (~logging_v2.client.Client): Client which holds credentials and project configuration for the sink. parent(Optional[str]): The resource in which to create the sink: @@ -101,10 +103,11 @@ def from_api_repr(cls, resource, client, *, parent=None): "organizations/[ORGANIZATION_ID]" "billingAccounts/[BILLING_ACCOUNT_ID]" "folders/[FOLDER_ID]". + Defaults to the project stored on the client. Returns: - google.cloud.logging_v2.sink.Sink: Sink parsed from ``resource``. + ~logging_v2.sink.Sink: Sink parsed from ``resource``. Raises: ValueError: if ``client`` is not ``None`` and the @@ -120,12 +123,12 @@ def _require_client(self, client): """Check client or verify over-ride. Also sets ``parent``. Args: - client (Union[None, google.cloud.logging_v2.client.Client]): + client (Union[None, ~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. Returns: - google.cloud.logging_v2.client.Client: The client passed in + ~logging_v2.client.Client: The client passed in or the currently bound client. """ if client is None: @@ -139,7 +142,7 @@ def create(self, *, client=None, unique_writer_identity=False): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. unique_writer_identity (Optional[bool]): Determines the kind of @@ -162,7 +165,7 @@ def exists(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. @@ -185,7 +188,7 @@ def reload(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ @@ -200,7 +203,7 @@ def update(self, *, client=None, unique_writer_identity=False): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. unique_writer_identity (Optional[bool]): Determines the kind of @@ -222,7 +225,7 @@ def delete(self, *, client=None): https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete Args: - client (Optional[google.cloud.logging_v2.client.Client]): + client (Optional[~logging_v2.client.Client]): The client to use. If not passed, falls back to the ``client`` stored on the current sink. """ diff --git a/setup.py b/setup.py index 69b2b439b..87cb8f7f8 100644 --- a/setup.py +++ b/setup.py @@ -71,7 +71,6 @@ "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", @@ -83,7 +82,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=3.5", + python_requires=">=3.6", include_package_data=True, zip_safe=False, ) diff --git a/tests/unit/handlers/transports/test_background_thread.py b/tests/unit/handlers/transports/test_background_thread.py index db431637c..71d868d86 100644 --- a/tests/unit/handlers/transports/test_background_thread.py +++ b/tests/unit/handlers/transports/test_background_thread.py @@ -63,10 +63,15 @@ def test_send(self): python_logger_name, logging.INFO, None, None, message, None, None ) - transport.send(record, message, _GLOBAL_RESOURCE) + transport.send(record, message, resource=_GLOBAL_RESOURCE) transport.worker.enqueue.assert_called_once_with( - record, message, _GLOBAL_RESOURCE, None, trace=None, span_id=None + record, + message, + resource=_GLOBAL_RESOURCE, + labels=None, + trace=None, + span_id=None, ) def test_trace_send(self): @@ -85,10 +90,15 @@ def test_trace_send(self): python_logger_name, logging.INFO, None, None, message, None, None ) - transport.send(record, message, _GLOBAL_RESOURCE, trace=trace) + transport.send(record, message, resource=_GLOBAL_RESOURCE, trace=trace) transport.worker.enqueue.assert_called_once_with( - record, message, _GLOBAL_RESOURCE, None, trace=trace, span_id=None + record, + message, + resource=_GLOBAL_RESOURCE, + labels=None, + trace=trace, + span_id=None, ) def test_span_send(self): @@ -107,10 +117,15 @@ def test_span_send(self): python_logger_name, logging.INFO, None, None, message, None, None ) - transport.send(record, message, _GLOBAL_RESOURCE, span_id=span_id) + transport.send(record, message, resource=_GLOBAL_RESOURCE, span_id=span_id) transport.worker.enqueue.assert_called_once_with( - record, message, _GLOBAL_RESOURCE, None, trace=None, span_id=span_id + record, + message, + resource=_GLOBAL_RESOURCE, + labels=None, + trace=None, + span_id=span_id, ) def test_flush(self): @@ -210,7 +225,7 @@ def test_stop(self): self._start_with_thread_patch(worker) thread = worker._thread - worker.stop(grace_period) + worker.stop(grace_period=grace_period) self.assertEqual(worker._queue.qsize(), 1) self.assertEqual(worker._queue.get(), background_thread._WORKER_TERMINATOR) diff --git a/tests/unit/handlers/transports/test_base.py b/tests/unit/handlers/transports/test_base.py index b8977ace5..bff253f94 100644 --- a/tests/unit/handlers/transports/test_base.py +++ b/tests/unit/handlers/transports/test_base.py @@ -31,7 +31,7 @@ def _make_one(self, *args, **kw): def test_send_is_abstract(self): target = self._make_one() with self.assertRaises(NotImplementedError): - target.send(None, None, None) + target.send(None, None, resource=None) def test_flush_is_abstract_and_optional(self): target = self._make_one() diff --git a/tests/unit/test__http.py b/tests/unit/test__http.py index 23e018cab..0cf8dcfdd 100644 --- a/tests/unit/test__http.py +++ b/tests/unit/test__http.py @@ -49,8 +49,8 @@ def test_default_url(self): self.assertIs(conn._client, client) def test_build_api_url_w_custom_endpoint(self): - from six.moves.urllib.parse import parse_qsl - from six.moves.urllib.parse import urlsplit + from urllib.parse import parse_qsl + from urllib.parse import urlsplit custom_endpoint = "https://foo-logging.googleapis.com" conn = self._make_one(object(), api_endpoint=custom_endpoint) @@ -130,7 +130,6 @@ def _make_timestamp(): return NOW, _datetime_to_rfc3339_w_nanos(NOW) def test_list_entries_no_paging(self): - import six from google.cloud.logging_v2.client import Client from google.cloud.logging_v2.entries import TextEntry from google.cloud.logging_v2.logger import Logger @@ -159,7 +158,7 @@ def test_list_entries_no_paging(self): api = self._make_one(client) iterator = api.list_entries([self.PROJECT_PATH]) - page = six.next(iterator.pages) + page = next(iterator.pages) entries = list(page) token = iterator.next_page_token @@ -363,7 +362,6 @@ def test_ctor(self): self.assertEqual(api.api_request, connection.api_request) def test_list_sinks_no_paging(self): - import six from google.cloud.logging_v2.sink import Sink TOKEN = "TOKEN" @@ -382,7 +380,7 @@ def test_list_sinks_no_paging(self): api = self._make_one(client) iterator = api.list_sinks(self.PROJECT_PATH) - page = six.next(iterator.pages) + page = next(iterator.pages) sinks = list(page) token = iterator.next_page_token @@ -635,7 +633,6 @@ def _make_one(self, *args, **kw): return self._get_target_class()(*args, **kw) def test_list_metrics_no_paging(self): - import six from google.cloud.logging_v2.metric import Metric TOKEN = "TOKEN" @@ -648,7 +645,7 @@ def test_list_metrics_no_paging(self): api = self._make_one(client) iterator = api.list_metrics(self.PROJECT) - page = six.next(iterator.pages) + page = next(iterator.pages) metrics = list(page) token = iterator.next_page_token diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 2d20b71c2..29934c389 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -249,7 +249,6 @@ def test_logger(self): self.assertEqual(logger.project, self.PROJECT) def test_list_entries_defaults(self): - import six from google.cloud.logging_v2.entries import TextEntry IID = "IID" @@ -271,7 +270,7 @@ def test_list_entries_defaults(self): client._connection = _Connection(returned) iterator = client.list_entries() - page = six.next(iterator.pages) + page = next(iterator.pages) entries = list(page) token = iterator.next_page_token @@ -521,7 +520,6 @@ def test_sink_explicit(self): self.assertEqual(sink.parent, self.PROJECT_PATH) def test_list_sinks_no_paging(self): - import six from google.cloud.logging_v2.sink import Sink PROJECT = "PROJECT" @@ -538,7 +536,7 @@ def test_list_sinks_no_paging(self): client._connection = _Connection(returned) iterator = client.list_sinks() - page = six.next(iterator.pages) + page = next(iterator.pages) sinks = list(page) token = iterator.next_page_token @@ -671,7 +669,6 @@ def test_list_metrics_no_paging(self): ) def test_list_metrics_with_paging(self): - import six from google.cloud.logging_v2.metric import Metric token = "TOKEN" @@ -692,7 +689,7 @@ def test_list_metrics_with_paging(self): # Execute request. iterator = client.list_metrics(page_size=page_size, page_token=token) - page = six.next(iterator.pages) + page = next(iterator.pages) metrics = list(page) # First check the token. diff --git a/tests/unit/test_logger.py b/tests/unit/test_logger.py index a96df5464..853bcce22 100644 --- a/tests/unit/test_logger.py +++ b/tests/unit/test_logger.py @@ -486,7 +486,6 @@ def test_delete_w_alternate_client(self): ) def test_list_entries_defaults(self): - import six from google.cloud.logging_v2.client import Client TOKEN = "TOKEN" @@ -500,7 +499,7 @@ def test_list_entries_defaults(self): logger = self._make_one(self.LOGGER_NAME, client=client) iterator = logger.list_entries() - page = six.next(iterator.pages) + page = next(iterator.pages) entries = list(page) token = iterator.next_page_token From 24a25796c81e4dcc42b0816d3eebe38624fd3504 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Thu, 12 Nov 2020 03:29:33 +0000 Subject: [PATCH 55/58] docs: update migration guide, code-format sample --- UPGRADING.md | 331 ++++++++++------------ docs/UPGRADING.md | 1 + docs/index.rst | 11 + google/cloud/logging_v2/types/__init__.py | 35 +-- samples/snippets/README.rst | 12 +- samples/snippets/README.rst.in | 6 +- samples/snippets/export.py | 80 +++--- samples/snippets/export_test.py | 22 +- samples/snippets/handler.py | 6 +- samples/snippets/handler_test.py | 2 +- samples/snippets/noxfile.py | 47 ++- samples/snippets/quickstart.py | 8 +- samples/snippets/quickstart_test.py | 2 +- samples/snippets/snippets.py | 53 ++-- samples/snippets/snippets_test.py | 4 +- synth.metadata | 10 +- synth.py | 3 +- 17 files changed, 315 insertions(+), 318 deletions(-) create mode 120000 docs/UPGRADING.md diff --git a/UPGRADING.md b/UPGRADING.md index 2e50d1d53..b10c22443 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -13,7 +13,7 @@ The 2.0.0 release requires Python 3.6+. ## Primary Changes -This section lists the most relevant breaking changes in `google.cloud.logging`. +This section lists the most relevant changes in `google.cloud.logging`. See 'Changes in GAPIC Layer' if you were directly using `google.cloud.logging_v2.proto` or `google.cloud.logging_v2.gapic`. @@ -22,88 +22,123 @@ See 'Changes in GAPIC Layer' if you were directly using `google.cloud.logging_v2 Optional arguments are keyword-only arguments and *must* be passed by name. See [PEP 3102](https://www.python.org/dev/peps/pep-3102/). -```Before +```diff from google.cloud import logging +filter_ = "severity>=CRITICAL" +destination = "storage.googleapis.com/{bucket}".format(bucket=destination_bucket) logging_client = logging.Client() -logging_client.sink("my-sink") - +-sink = logging_client.sink(sink_name, filter_, destination) ++sink = logging_client.sink(sink_name, filter_=filter_, destination=destination) ``` ### Support for non-project resources -Where appropriate, sinks, entries, and metrics can be associated with non-project resources like folders and organizations. +Where appropriate, the library supports additional resource names. https://google.aip.dev/122 -Methods generally default to the project bound to the client. +**Valid Resource Names**: -This resulted in breaking changes to some methods which now expect full resource paths instead of just the name, to -disambiguate the location of a resource. +* `"projects/[PROJECT_ID]"` +* `"organizations/[ORGANIZATION_ID]"` +* `"billingAccounts/[BILLING_ACCOUNT_ID]"` +* `"folders/[FOLDER_ID]"` #### `google.cloud.logging_v2.client.Client` -`list_entries` accepts `resource_names`. +> **WARNING**: Breaking change -**After**: -```py +`list_entries` accepts an optional `resource_names` parameter. `projects` has been removed. + + +```diff from google.cloud import logging_v2 client = logging_v2.Client() -client.list_entries(resource_names=["folders/myFolder", "projects/myProject"]) -client.list_entries() # defaults to project bound to client +-client.list_entries(projects="myProject"]) ++client.list_entries(resource_names=["projects/myProject", "folders/myFolder"]) +client.list_entries() # defaults to project bound to client ``` -`list_sinks` accept a `parent` parameter which is expected to be a single resource path. +`list_sinks` accepts an optional `parent` parameter. - - -**After**: ```py from google.cloud import logging_v2 -client = loggign_v2.Client() -client.list_sinks(parent="folder/myFolder") -client.list_sinks() # defaults to project bound to client +client = logging_v2.Client() +client.list_sinks() # lists sinks in current project +client.list_sinks(parent="folders/myFolder") # specify a different parent resource ``` #### `google.cloud.logging_v2.logger.Logger` -`list_entries` accepts `resource_names`. +> **WARNING**: Breaking change -**After**: -```py +`list_entries` accepts an optional `resource_names` parameter. `projects` has been removed. + +```diff from google.cloud import logging_v2 client = logging_v2.Client() logger = logging_v2.Logger("myLog", client) -logger.list_entries(resource_names=["folders/myFolder", "projects/myProject"]) -logger.list_entries() # defaults to project bound to client +- logger.list_entries(projects="myProject"]) ++ logger.list_entries(resource_names=["projects/myProject", "folders/myFolder"]) +logger.list_entries() # defaults to project bound to client ``` +#### `google.cloud.logging_v2.sinks.Sink` +> **WARNING**: Breaking change +* Sinks no longer have a `project` property. The attribute is replaced by `parent`. -#### `google.cloud.loggign_v2.sinks.Sink` - +```diff +from google.cloud import logging -> **WARNING**: Breaking change -* Sinks no longer have a `project` property. The attribute is replaced by `parent` (e.g. `projects/my-project`) +client = logging_v2.Client(project="myProject") +sink = logging.Sink("mySink", client=client) +-project = sink.project # myProject ++parent = sink.parent # projects/myProject +``` ### `google.cloud.logging` is an alias for `google.cloud.logging_v2` -`google.cloud.logging` serves as a default alias for `google.cloud.logging_v2`. +> **WARNING**: Breaking change All library code has been moved to `google.cloud.logging_v2`. +`google.cloud.logging` serves as a default alias for `google.cloud.logging_v2`. + ## Changes in GAPIC layer This section describes changes in the GAPIC layer (produced by the generator) that previously lived in `google.cloud.logging_v2.proto` / `google.cloud.logging_v2.gapic`. -Most users are unlikely to have been using this layer directly. -## Method Calls +**NOTE**: Most users are unlikely to have been using this layer directly. + +### Import path + +> **WARNING**: Breaking change + +The generated client is no longer exposed at `logging_v2`. +**Before** +```py +from google.cloud import logging_v2 +logging_client = logging_v2.LoggingServiceV2Client() +``` + +**After** + +```py +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client +from google.cloud.logging_v2.types import LogSink + +logging_client = LoggingServiceV2Client() +sink = LogSink() +``` +### Method Calls > **WARNING**: Breaking change @@ -115,52 +150,43 @@ Methods expect request objects. We provide a script that will convert most commo python3 -m pip install google-cloud-logging libcst ``` -* The script `fixup_automl_{version}_keywords.py` is shipped with the library. It expects +* The script `fixup_logging_v2_keywords.py` is shipped with the library. It expects an input directory (with the code to convert) and an empty destination directory. ```sh -$ fixup_automl_v1_keywords.py --input-directory .samples/ --output-directory samples/ +$ fixup_logging_v2_keywords.py --input-directory .samples/ --output-directory samples/ ``` **Before:** ```py -from google.cloud import automl - -project_id = "YOUR_PROJECT_ID" -model_id = "YOUR_MODEL_ID" +from google.cloud import logging_v2 -client = automl.AutoMlClient() -# Get the full path of the model. -model_full_id = client.model_path(project_id, "us-central1", model_id) -response = client.deploy_model(model_full_id) +client = logging_v2.LoggingServiceV2Client() +client.list_log_entries(["projects/myProject"], filter_ = "severity>=CRITICAL") ``` **After:** ```py -from google.cloud import automl - -project_id = "YOUR_PROJECT_ID" -model_id = "YOUR_MODEL_ID" +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client -client = automl.AutoMlClient() -# Get the full path of the model. -model_full_id = client.model_path(project_id, "us-central1", model_id) -response = client.deploy_model(name=model_full_id) +client = LoggingServiceV2Client() +client.list_log_entries({"resource_names": ["projects/myProject"], "filter": "severity>=CRITICAL"}) ``` -### More Details +#### More Details In `google-cloud-logging<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. **Before:** ```py - def batch_predict( + def list_log_entries( self, - name, - input_config, - output_config, - params=None, + resource_names, + project_ids=None, + filter_=None, + order_by=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -174,18 +200,17 @@ Some methods have additional keyword only parameters. The available parameters d **After:** ```py -def batch_predict( + def list_log_entries( self, - request: prediction_service.BatchPredictRequest = None, + request: logging.ListLogEntriesRequest = None, *, - name: str = None, - input_config: io.BatchPredictInputConfig = None, - output_config: io.BatchPredictOutputConfig = None, - params: Sequence[prediction_service.BatchPredictRequest.ParamsEntry] = None, + resource_names: Sequence[str] = None, + filter: str = None, + order_by: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: + ) -> pagers.ListLogEntriesPager: ``` > **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. @@ -195,167 +220,115 @@ def batch_predict( Both of these calls are valid: ```py -response = client.batch_predict( +response = client.list_log_entries( request={ - "name": name, - "input_config": input_config, - "output_config": output_config, - "params": params, + "resource_names": resource_names, + "filter": filter_, + "order_by": order_by, } ) ``` ```py -response = client.batch_predict( - name=name, - input_config=input_config, - output_config=output_config, - params=params, +response = client.list_log_entries( + resource_names=resource_names, + filter=filter_, + order_by=order_by, ) ``` -This call is invalid because it mixes `request` with a keyword argument `params`. Executing this code -will result in an error. +This call is invalid because it mixes `request` with a keyword argument `order_by`. Executing this code will result in an error. ```py -response = client.batch_predict( +response = client.list_log_entries( request={ - "name": name, - "input_config": input_config, - "output_config": output_config, - }, - params=params, + "resource_names": resource_names, + "filter": filter_, + } + order_by=order_by ) ``` +### `filter` parameter -The method `list_datasets` takes an argument `filter` instead of `filter_`. +Methods that took parameter `filter_` now expect `filter`. **Before** ```py -from google.cloud import automl - -project_id = "PROJECT_ID" - -client = automl.AutoMlClient() -project_location = client.location_path(project_id, "us-central1") - -# List all the datasets available in the region. -response = client.list_datasets(project_location, filter_="") -``` - -**After** -```py -from google.cloud import automl - -project_id = "PROJECT_ID" -client = automl.AutoMlClient() -# A resource that represents Google Cloud Platform location. -project_location = f"projects/{project_id}/locations/us-central1" - -# List all the datasets available in the region. -response = client.list_datasets(parent=project_location, filter="") -``` - -### Changes to v1beta1 Tables Client - -Optional arguments are now keyword-only arguments and *must* be passed by name. -See [PEP 3102](https://www.python.org/dev/peps/pep-3102/). - -***Before** -```py - def predict( - self, - inputs, - model=None, - model_name=None, - model_display_name=None, - feature_importance=False, - project=None, - region=None, - **kwargs - ): -``` +from google.cloud import logging_v2 -**After** -```py - def predict( - self, - inputs, - *, - model=None, - model_name=None, - model_display_name=None, - feature_importance=False, - project=None, - region=None, - **kwargs, - ): +client = logging_v2.LoggingServiceV2Client() +client.list_log_entries(["projects/myProject"], filter_="severity>=CRITICAL") ``` -**kwargs passed to methods must be either (1) kwargs on the underlying method (`retry`, `timeout`, or `metadata`) or (2) attributes of the request object. - -The following call is valid because `filter` is an attribute of `automl_v1beta1.ListDatasetsRequest`. +**Before** ```py -from google.cloud import automl_v1beta1 as automl - -client = automl.TablesClient(project=project_id, region=compute_region) +from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client -# List all the datasets available in the region by applying filter. -response = client.list_datasets(filter=filter) +client = LoggingServiceV2Client() +client.list_log_entries(resource_names=["projects/myProject"], filter="severity>=CRITICAL") ``` - - -## Enums and types +### Enums > **WARNING**: Breaking change -The submodule `enums` and `types` have been removed. +The submodule `enums` has been removed. Enums can be accessed under `types`. **Before:** ```py +from google.cloud import logging_v2 -from google.cloud import automl - -gcs_source = automl.types.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) -deployment_state = automl.enums.Model.DeploymentState.DEPLOYED +severity = logging_v2.enums.LogSeverity.DEFAULT ``` **After:** ```py -from google.cloud import automl +from google.cloud import logging_v2 -gcs_source = automl.GcsSource(input_uris=["gs://YOUR_BUCKET_ID/path/to/your/input/csv_or_jsonl"]) -deployment_state = automl.Model.DeploymentState.DEPLOYED +severity = logging_v2.types.LogSeverity.DEFAULT ``` -## Resource Path Helper Methods +### Resource Path Helper Methods The following resource name helpers have been removed. Please construct the strings manually. ```py -from google.cloud import automl - -project = "my-project" -location = "us-central1" -dataset = "my-dataset" -model = "my-model" -annotation_spec = "test-annotation" -model_evaluation = "test-evaluation" - -# AutoMlClient -annotation_spec_path = f"projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" -location_path = f"projects/{project}/locations/{location}" -model_evaluation_path = f"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", - -# PredictionServiceClient -model_path = f"projects/{project}/locations/{location}/models/{model}" -# alternatively you can use `model_path` from AutoMlClient -model_path = automl.AutoMlClient.model_path(project_id, location, model_id) - -``` \ No newline at end of file +billing_account = "my-billing-account" +folder = "my-folder" +organization = "my-organization" +log = "my-log" + +exclusion = "exclusion" +sink = "my-sink" + +# LoggingServiceV2Client +billing_log_path = f"billingAccounts/{billing_account}/logs/{log}" +folder_log_path = f"folders/{folder}/logs/{log}" +organization_log_path = f"organizations/{organization}/logs/{log}" + +# ConfigServiceV2Client +billing_exclusion_path = f"billingAccounts/{billing_account}/exclusions/{exclusion}" +billing_sink_path = f"billingAccounts/{billing_account}/sinks/{sink}" +exclusion_path = f"projects/{project}/exclusions/{exclusion}" +folder_exclusion_path = f"folders/{folder}/exclusions/{exclusion}" +folder_sink_path = f"folders/{folder}/sinks/{sink}" +organization_exclusion_path = f"organizations/{organization}/exclusions/{exclusion}" +organization_sink_path = f"organizations/{organization}/sinks/{sink}" +``` + +The following resource name helpers have been renamed. + +**All Clients** +* `billing_path` -> `common_billing_account_path` +* `folder_path` -> `common_folder_path` +* `organization_path` -> `common_organization_path` +* `project_path` -> `common_project_path` + +**`ConfigServiceV2Client`** +* `sink_path` -> `log_sink_path` +* `exclusion_path` -> `log_exclusion_path` \ No newline at end of file diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 000000000..01097c8c0 --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 347dc9f81..64c2dcd1e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,6 +6,17 @@ Documentation :maxdepth: 3 v2 + + +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING Changelog ~~~~~~~~~ diff --git a/google/cloud/logging_v2/types/__init__.py b/google/cloud/logging_v2/types/__init__.py index ab5f9c8c1..55161ba5f 100644 --- a/google/cloud/logging_v2/types/__init__.py +++ b/google/cloud/logging_v2/types/__init__.py @@ -44,6 +44,16 @@ GetCmekSettingsRequest, UpdateCmekSettingsRequest, CmekSettings, + LifecycleState, +) +from .logging_metrics import ( + LogMetric, + ListLogMetricsRequest, + ListLogMetricsResponse, + GetLogMetricRequest, + CreateLogMetricRequest, + UpdateLogMetricRequest, + DeleteLogMetricRequest, ) from .logging import ( DeleteLogRequest, @@ -57,16 +67,6 @@ ListLogsRequest, ListLogsResponse, ) -from .logging_metrics import ( - LogMetric, - ListLogMetricsRequest, - ListLogMetricsResponse, - GetLogMetricRequest, - CreateLogMetricRequest, - UpdateLogMetricRequest, - DeleteLogMetricRequest, -) - __all__ = ( "LogEntry", @@ -95,6 +95,14 @@ "GetCmekSettingsRequest", "UpdateCmekSettingsRequest", "CmekSettings", + "LifecycleState", + "LogMetric", + "ListLogMetricsRequest", + "ListLogMetricsResponse", + "GetLogMetricRequest", + "CreateLogMetricRequest", + "UpdateLogMetricRequest", + "DeleteLogMetricRequest", "DeleteLogRequest", "WriteLogEntriesRequest", "WriteLogEntriesResponse", @@ -105,11 +113,4 @@ "ListMonitoredResourceDescriptorsResponse", "ListLogsRequest", "ListLogsResponse", - "LogMetric", - "ListLogMetricsRequest", - "ListLogMetricsResponse", - "GetLogMetricRequest", - "CreateLogMetricRequest", - "UpdateLogMetricRequest", - "DeleteLogMetricRequest", ) diff --git a/samples/snippets/README.rst b/samples/snippets/README.rst index d60cd0a3b..6990573d9 100644 --- a/samples/snippets/README.rst +++ b/samples/snippets/README.rst @@ -1,19 +1,19 @@ .. This file is automatically generated. Do not edit this file directly. -Stackdriver Logging Python Samples +Cloud Logging Python Samples =============================================================================== .. image:: https://gstatic.com/cloudssh/images/open-btn.png :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=logging/cloud-client/README.rst -This directory contains samples for Stackdriver Logging. `Stackdriver Logging`_ allows you to store, search, analyze, monitor, and alert on log data and events from Google Cloud Platform and Amazon Web Services. +This directory contains samples for Cloud Logging. `Cloud Logging`_ allows you to store, search, analyze, monitor, and alert on log data and events from Google Cloud Platform and Amazon Web Services. -.. _Stackdriver Logging: https://cloud.google.com/logging/docs +.. _Cloud Logging: https://cloud.google.com/logging/docs Setup @@ -37,11 +37,11 @@ credentials for applications. Install Dependencies ++++++++++++++++++++ -#. Clone python-docs-samples and change directory to the sample directory you want to use. +#. Clone python-logging and change directory to the sample directory you want to use. .. code-block:: bash - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + $ git clone https://github.com/googleapis/python-logging.git #. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. @@ -110,7 +110,7 @@ To run this sample: usage: snippets.py [-h] logger_name {list,write,delete} ... This application demonstrates how to perform basic operations on logs and - log entries with Stackdriver Logging. + log entries with Cloud Logging. For more information, see the README.md under /logging and the documentation at https://cloud.google.com/logging/docs. diff --git a/samples/snippets/README.rst.in b/samples/snippets/README.rst.in index 00fa4b6b8..ff243c1ce 100644 --- a/samples/snippets/README.rst.in +++ b/samples/snippets/README.rst.in @@ -1,11 +1,11 @@ # This file is used to generate README.rst product: - name: Stackdriver Logging - short_name: Stackdriver Logging + name: Cloud Logging + short_name: Cloud Logging url: https://cloud.google.com/logging/docs description: > - `Stackdriver Logging`_ allows you to store, search, analyze, monitor, + `Cloud Logging`_ allows you to store, search, analyze, monitor, and alert on log data and events from Google Cloud Platform and Amazon Web Services. diff --git a/samples/snippets/export.py b/samples/snippets/export.py index 63e31abb3..9a0673ee7 100644 --- a/samples/snippets/export.py +++ b/samples/snippets/export.py @@ -27,10 +27,12 @@ def list_sinks(): sinks = list(logging_client.list_sinks()) if not sinks: - print('No sinks.') + print("No sinks.") for sink in sinks: - print('{}: {} -> {}'.format(sink.name, sink.filter_, sink.destination)) + print("{}: {} -> {}".format(sink.name, sink.filter_, sink.destination)) + + # [END logging_list_sinks] @@ -50,21 +52,18 @@ def create_sink(sink_name, destination_bucket, filter_): # or a BigQuery dataset. In this case, it is a Cloud Storage Bucket. # See https://cloud.google.com/logging/docs/api/tasks/exporting-logs for # information on the destination format. - destination = 'storage.googleapis.com/{bucket}'.format( - bucket=destination_bucket) + destination = "storage.googleapis.com/{bucket}".format(bucket=destination_bucket) - sink = logging_client.sink( - sink_name, - filter_=filter_, - destination=destination - ) + sink = logging_client.sink(sink_name, filter_=filter_, destination=destination) if sink.exists(): - print('Sink {} already exists.'.format(sink.name)) + print("Sink {} already exists.".format(sink.name)) return sink.create() - print('Created sink {}'.format(sink.name)) + print("Created sink {}".format(sink.name)) + + # [END logging_create_sink] @@ -84,8 +83,10 @@ def update_sink(sink_name, filter_): sink.reload() sink.filter_ = filter_ - print('Updated sink {}'.format(sink.name)) + print("Updated sink {}".format(sink.name)) sink.update() + + # [END logging_update_sink] @@ -97,50 +98,41 @@ def delete_sink(sink_name): sink.delete() - print('Deleted sink {}'.format(sink.name)) + print("Deleted sink {}".format(sink.name)) + + # [END logging_delete_sink] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) - subparsers = parser.add_subparsers(dest='command') - subparsers.add_parser('list', help=list_sinks.__doc__) + subparsers = parser.add_subparsers(dest="command") + subparsers.add_parser("list", help=list_sinks.__doc__) - create_parser = subparsers.add_parser('create', help=list_sinks.__doc__) - create_parser.add_argument( - 'sink_name', - help='Name of the log export sink.') - create_parser.add_argument( - 'destination_bucket', - help='Cloud Storage bucket where logs will be exported.') + create_parser = subparsers.add_parser("create", help=list_sinks.__doc__) + create_parser.add_argument("sink_name", help="Name of the log export sink.") create_parser.add_argument( - 'filter', - help='The filter used to match logs.') - - update_parser = subparsers.add_parser('update', help=update_sink.__doc__) - update_parser.add_argument( - 'sink_name', - help='Name of the log export sink.') - update_parser.add_argument( - 'filter', - help='The filter used to match logs.') - - delete_parser = subparsers.add_parser('delete', help=delete_sink.__doc__) - delete_parser.add_argument( - 'sink_name', - help='Name of the log export sink.') + "destination_bucket", help="Cloud Storage bucket where logs will be exported." + ) + create_parser.add_argument("filter", help="The filter used to match logs.") + + update_parser = subparsers.add_parser("update", help=update_sink.__doc__) + update_parser.add_argument("sink_name", help="Name of the log export sink.") + update_parser.add_argument("filter", help="The filter used to match logs.") + + delete_parser = subparsers.add_parser("delete", help=delete_sink.__doc__) + delete_parser.add_argument("sink_name", help="Name of the log export sink.") args = parser.parse_args() - if args.command == 'list': + if args.command == "list": list_sinks() - elif args.command == 'create': + elif args.command == "create": create_sink(args.sink_name, args.destination_bucket, args.filter) - elif args.command == 'update': + elif args.command == "update": update_sink(args.sink_name, args.filter) - elif args.command == 'delete': + elif args.command == "delete": delete_sink(args.sink_name) diff --git a/samples/snippets/export_test.py b/samples/snippets/export_test.py index aea9889dc..b1ecf4923 100644 --- a/samples/snippets/export_test.py +++ b/samples/snippets/export_test.py @@ -23,15 +23,15 @@ import export -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -TEST_SINK_NAME_TMPL = 'example_sink_{}' -TEST_SINK_FILTER = 'severity>=CRITICAL' +BUCKET = os.environ["CLOUD_STORAGE_BUCKET"] +TEST_SINK_NAME_TMPL = "example_sink_{}" +TEST_SINK_FILTER = "severity>=CRITICAL" def _random_id(): - return ''.join( - random.choice(string.ascii_uppercase + string.digits) - for _ in range(6)) + return "".join( + random.choice(string.ascii_uppercase + string.digits) for _ in range(6) + ) @pytest.yield_fixture @@ -41,7 +41,8 @@ def example_sink(): sink = client.sink( TEST_SINK_NAME_TMPL.format(_random_id()), filter_=TEST_SINK_FILTER, - destination='storage.googleapis.com/{bucket}'.format(bucket=BUCKET)) + destination="storage.googleapis.com/{bucket}".format(bucket=BUCKET), + ) sink.create() @@ -67,10 +68,7 @@ def test_create(capsys): sink_name = TEST_SINK_NAME_TMPL.format(_random_id()) try: - export.create_sink( - sink_name, - BUCKET, - TEST_SINK_FILTER) + export.create_sink(sink_name, BUCKET, TEST_SINK_FILTER) # Clean-up the temporary sink. finally: try: @@ -83,7 +81,7 @@ def test_create(capsys): def test_update(example_sink, capsys): - updated_filter = 'severity>=INFO' + updated_filter = "severity>=INFO" export.update_sink(example_sink.name, updated_filter) example_sink.reload() diff --git a/samples/snippets/handler.py b/samples/snippets/handler.py index d59458425..9a63d022f 100644 --- a/samples/snippets/handler.py +++ b/samples/snippets/handler.py @@ -36,14 +36,14 @@ def use_logging_handler(): import logging # The data to log - text = 'Hello, world!' + text = "Hello, world!" # Emits the data using the standard logging module logging.warning(text) # [END logging_handler_usage] - print('Logged: {}'.format(text)) + print("Logged: {}".format(text)) -if __name__ == '__main__': +if __name__ == "__main__": use_logging_handler() diff --git a/samples/snippets/handler_test.py b/samples/snippets/handler_test.py index d48ee2e20..9d635806a 100644 --- a/samples/snippets/handler_test.py +++ b/samples/snippets/handler_test.py @@ -19,4 +19,4 @@ def test_handler(capsys): handler.use_logging_handler() out, _ = capsys.readouterr() - assert 'Logged' in out + assert "Logged" in out diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index 01686e4a0..ab2c49227 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -37,24 +37,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -69,12 +70,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -83,7 +84,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -132,17 +133,33 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - session.install("flake8", "flake8-import-order") + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session): + session.install("black") + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -182,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/samples/snippets/quickstart.py b/samples/snippets/quickstart.py index 19409c776..7c38ea6fa 100644 --- a/samples/snippets/quickstart.py +++ b/samples/snippets/quickstart.py @@ -24,19 +24,19 @@ def run_quickstart(): logging_client = logging.Client() # The name of the log to write to - log_name = 'my-log' + log_name = "my-log" # Selects the log to write to logger = logging_client.logger(log_name) # The data to log - text = 'Hello, world!' + text = "Hello, world!" # Writes the log entry logger.log_text(text) - print('Logged: {}'.format(text)) + print("Logged: {}".format(text)) # [END logging_quickstart] -if __name__ == '__main__': +if __name__ == "__main__": run_quickstart() diff --git a/samples/snippets/quickstart_test.py b/samples/snippets/quickstart_test.py index 1b49cd126..d8ace2cbc 100644 --- a/samples/snippets/quickstart_test.py +++ b/samples/snippets/quickstart_test.py @@ -19,4 +19,4 @@ def test_quickstart(capsys): quickstart.run_quickstart() out, _ = capsys.readouterr() - assert 'Logged' in out + assert "Logged" in out diff --git a/samples/snippets/snippets.py b/samples/snippets/snippets.py index 78f67e8a9..af52c462c 100644 --- a/samples/snippets/snippets.py +++ b/samples/snippets/snippets.py @@ -35,19 +35,23 @@ def write_entry(logger_name): logger = logging_client.logger(logger_name) # Make a simple text log - logger.log_text('Hello, world!') + logger.log_text("Hello, world!") # Simple text log with severity. - logger.log_text('Goodbye, world!', severity='ERROR') + logger.log_text("Goodbye, world!", severity="ERROR") # Struct log. The struct can be any JSON-serializable dictionary. - logger.log_struct({ - 'name': 'King Arthur', - 'quest': 'Find the Holy Grail', - 'favorite_color': 'Blue' - }) + logger.log_struct( + { + "name": "King Arthur", + "quest": "Find the Holy Grail", + "favorite_color": "Blue", + } + ) + + print("Wrote logs to {}.".format(logger.name)) + - print('Wrote logs to {}.'.format(logger.name)) # [END logging_write_log_entry] @@ -57,12 +61,13 @@ def list_entries(logger_name): logging_client = logging.Client() logger = logging_client.logger(logger_name) - print('Listing entries for logger {}:'.format(logger.name)) + print("Listing entries for logger {}:".format(logger.name)) for entry in logger.list_entries(): timestamp = entry.timestamp.isoformat() - print('* {}: {}'.format - (timestamp, entry.payload)) + print("* {}: {}".format(timestamp, entry.payload)) + + # [END logging_list_log_entries] @@ -77,27 +82,27 @@ def delete_logger(logger_name): logger.delete() - print('Deleted all logging entries for {}'.format(logger.name)) + print("Deleted all logging entries for {}".format(logger.name)) + + # [END logging_delete_log] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) - parser.add_argument( - 'logger_name', help='Logger name', default='example_log') - subparsers = parser.add_subparsers(dest='command') - subparsers.add_parser('list', help=list_entries.__doc__) - subparsers.add_parser('write', help=write_entry.__doc__) - subparsers.add_parser('delete', help=delete_logger.__doc__) + parser.add_argument("logger_name", help="Logger name", default="example_log") + subparsers = parser.add_subparsers(dest="command") + subparsers.add_parser("list", help=list_entries.__doc__) + subparsers.add_parser("write", help=write_entry.__doc__) + subparsers.add_parser("delete", help=delete_logger.__doc__) args = parser.parse_args() - if args.command == 'list': + if args.command == "list": list_entries(args.logger_name) - elif args.command == 'write': + elif args.command == "write": write_entry(args.logger_name) - elif args.command == 'delete': + elif args.command == "delete": delete_logger(args.logger_name) diff --git a/samples/snippets/snippets_test.py b/samples/snippets/snippets_test.py index 1d1d01972..479f742ae 100644 --- a/samples/snippets/snippets_test.py +++ b/samples/snippets/snippets_test.py @@ -22,14 +22,14 @@ import snippets -TEST_LOGGER_NAME = 'example_log_{}'.format(uuid.uuid4().hex) +TEST_LOGGER_NAME = "example_log_{}".format(uuid.uuid4().hex) @pytest.fixture def example_log(): client = logging.Client() logger = client.logger(TEST_LOGGER_NAME) - text = 'Hello, world.' + text = "Hello, world." logger.log_text(text) return text diff --git a/synth.metadata b/synth.metadata index 8c0cf4d5f..493b1a2b5 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,29 +4,29 @@ "git": { "name": ".", "remote": "git@github.com:googleapis/python-logging", - "sha": "231474afcd5a84549d68e8b7ae9c0c912b43431b" + "sha": "8466c62f459af6c2d89b411297df06988e45b522" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "7f31f40209008ad24058579e7112e45fc9d5715e", - "internalRef": "339939234" + "sha": "4b0ad15b0ff483486ae90d73092e7be00f8c1848", + "internalRef": "341842584" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "b19b401571e77192f8dd38eab5fb2300a0de9324" + "sha": "7db8a6c5ffb12a6e4c2f799c18f00f7f3d60e279" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "b19b401571e77192f8dd38eab5fb2300a0de9324" + "sha": "7db8a6c5ffb12a6e4c2f799c18f00f7f3d60e279" } } ], diff --git a/synth.py b/synth.py index ed90592e7..cbfc901d4 100644 --- a/synth.py +++ b/synth.py @@ -38,7 +38,6 @@ "google/cloud/logging/__init__.py", # generated types are hidden from users "google/cloud/logging_v2/__init__.py", "docs/index.rst", - "docs/multiprocessing.rst", "docs/logging_v2", # Don't include gapic library docs. Users should use the hand-written layer instead "scripts/fixup_logging_v2_keywords.py", # don't include script since it only works for generated layer ], @@ -67,7 +66,7 @@ unit_test_external_dependencies=["flask", "webob", "django"], samples=True, ) -s.move(templated_files, excludes=[".coveragerc"]) +s.move(templated_files, excludes=[".coveragerc", "docs/multiprocessing.rst"]) # -------------------------------------------------------------------------- # Samples templates From c45712e67c372694dc9cb6480df520d9a0560304 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Thu, 12 Nov 2020 03:35:49 +0000 Subject: [PATCH 56/58] docs: minor tweaks --- UPGRADING.md | 7 ++++--- google/cloud/logging_v2/_helpers.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index b10c22443..f9fa75cfd 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -116,13 +116,14 @@ All library code has been moved to `google.cloud.logging_v2`. This section describes changes in the GAPIC layer (produced by the generator) that previously lived in `google.cloud.logging_v2.proto` / `google.cloud.logging_v2.gapic`. -**NOTE**: Most users are unlikely to have been using this layer directly. +> **NOTE**: Most users are unlikely to have been using this layer directly. ### Import path > **WARNING**: Breaking change The generated client is no longer exposed at `logging_v2`. + **Before** ```py from google.cloud import logging_v2 @@ -281,7 +282,7 @@ The submodule `enums` has been removed. Enums can be accessed under `types`. ```py from google.cloud import logging_v2 -severity = logging_v2.enums.LogSeverity.DEFAULT +severity = logging_v2.enums.LifecycleState.ACTIVE ``` @@ -289,7 +290,7 @@ severity = logging_v2.enums.LogSeverity.DEFAULT ```py from google.cloud import logging_v2 -severity = logging_v2.types.LogSeverity.DEFAULT +severity = logging_v2.types.LifecycleState.ACTIVE ``` diff --git a/google/cloud/logging_v2/_helpers.py b/google/cloud/logging_v2/_helpers.py index 555d8ed8c..51cc64868 100644 --- a/google/cloud/logging_v2/_helpers.py +++ b/google/cloud/logging_v2/_helpers.py @@ -28,7 +28,7 @@ from google.cloud.logging_v2.entries import TextEntry try: - from google.cloud.logging_v2.gapic.enums import LogSeverity + from google.cloud.logging_v2.types import LogSeverity except ImportError: # pragma: NO COVER class LogSeverity(object): From 8fccca85d2ce9bed5479839af7b72968f7b5e57d Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Tue, 17 Nov 2020 21:39:47 +0000 Subject: [PATCH 57/58] docs: address reviewer feedback for migration doc --- UPGRADING.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index f9fa75cfd..af7461dda 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -122,7 +122,9 @@ This section describes changes in the GAPIC layer (produced by the generator) th > **WARNING**: Breaking change -The generated client is no longer exposed at `logging_v2`. +The generated client is no longer exposed at `google.cloud.logging_v2`. This is because we expect most users to use the handwritten surface exposed at `google.cloud.logging_v2`. See the [Cloud Logging How-to Guides](https://cloud.google.com/logging/docs/how-to). + +If you would like to continue using the generated surface, adjust your imports: **Before** ```py @@ -143,7 +145,7 @@ sink = LogSink() > **WARNING**: Breaking change -Methods expect request objects. We provide a script that will convert most common use cases. +Methods expect request objects. We provide a script that will convert most common use cases. This script will *only* convert code written for the generated clients previously exposed at `google.cloud.logging_v2` like `LoggingServiceV2Client`. * Install the library and `libcst`. `libcst` is required to run the fixup script. @@ -263,7 +265,7 @@ client.list_log_entries(["projects/myProject"], filter_="severity>=CRITICAL") ``` -**Before** +**After** ```py from google.cloud.logging_v2.services.logging_service_v2 import LoggingServiceV2Client From bcea58faf14998c2115f40348df787bf192fdf4a Mon Sep 17 00:00:00 2001 From: Bu Sun Kim Date: Tue, 17 Nov 2020 21:55:41 +0000 Subject: [PATCH 58/58] chore: fix lint in export.py --- samples/snippets/export.py | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/snippets/export.py b/samples/snippets/export.py index 9826ff51b..9a0673ee7 100644 --- a/samples/snippets/export.py +++ b/samples/snippets/export.py @@ -56,7 +56,6 @@ def create_sink(sink_name, destination_bucket, filter_): sink = logging_client.sink(sink_name, filter_=filter_, destination=destination) - if sink.exists(): print("Sink {} already exists.".format(sink.name)) return