From 913253d3883f01be63876f44b7cbdccc2d0654b8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 12 Jul 2019 13:17:53 -0400 Subject: [PATCH] Remove face detection feature from V1 client (via synth). Closes #8661. --- .../cloud/videointelligence_v1/gapic/enums.py | 2 - ...ideo_intelligence_service_client_config.py | 2 +- .../proto/video_intelligence.proto | 227 +++--- .../proto/video_intelligence_pb2.py | 667 +++++------------- videointelligence/synth.metadata | 10 +- videointelligence/synth.py | 2 +- 6 files changed, 273 insertions(+), 637 deletions(-) diff --git a/videointelligence/google/cloud/videointelligence_v1/gapic/enums.py b/videointelligence/google/cloud/videointelligence_v1/gapic/enums.py index 269d8829128a..3f675dbb84e9 100644 --- a/videointelligence/google/cloud/videointelligence_v1/gapic/enums.py +++ b/videointelligence/google/cloud/videointelligence_v1/gapic/enums.py @@ -28,7 +28,6 @@ class Feature(enum.IntEnum): LABEL_DETECTION (int): Label detection. Detect objects, such as dog or flower. SHOT_CHANGE_DETECTION (int): Shot change detection. EXPLICIT_CONTENT_DETECTION (int): Explicit content detection. - FACE_DETECTION (int): Human face detection and tracking. SPEECH_TRANSCRIPTION (int): Speech transcription. TEXT_DETECTION (int): OCR text detection and tracking. OBJECT_TRACKING (int): Object detection and tracking. @@ -38,7 +37,6 @@ class Feature(enum.IntEnum): LABEL_DETECTION = 1 SHOT_CHANGE_DETECTION = 2 EXPLICIT_CONTENT_DETECTION = 3 - FACE_DETECTION = 4 SPEECH_TRANSCRIPTION = 6 TEXT_DETECTION = 7 OBJECT_TRACKING = 9 diff --git a/videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py b/videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py index b9eef5e2a6a7..74dc2121caff 100644 --- a/videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py +++ b/videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py @@ -18,7 +18,7 @@ }, "methods": { "AnnotateVideo": { - "timeout_millis": 600000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default", } diff --git a/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto b/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto index ce3d8f8c2d6b..ef530364c35e 100644 --- a/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto +++ b/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; +import "google/api/client.proto"; option csharp_namespace = "Google.Cloud.VideoIntelligence.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence"; @@ -33,12 +34,14 @@ option ruby_package = "Google::Cloud::VideoIntelligence::V1"; // Service that implements Google Cloud Video Intelligence API. service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). - rpc AnnotateVideo(AnnotateVideoRequest) - returns (google.longrunning.Operation) { + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/videos:annotate" body: "*" @@ -52,10 +55,10 @@ message AnnotateVideoRequest { // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). A video - // URI may include wildcards in `object-id`, and thus identify multiple - // videos. Supported wildcards: '*' to match 0 or more characters; + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. string input_uri = 1; @@ -75,8 +78,8 @@ message AnnotateVideoRequest { // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return - // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For - // more information, see [Request URIs](/storage/docs/reference-uris). + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](/storage/docs/reference-uris). string output_uri = 4; // Optional cloud region where annotation should take place. Supported cloud @@ -101,9 +104,6 @@ message VideoContext { // Config for EXPLICIT_CONTENT_DETECTION. ExplicitContentDetectionConfig explicit_content_detection_config = 4; - // Config for FACE_DETECTION. - FaceDetectionConfig face_detection_config = 5; - // Config for SPEECH_TRANSCRIPTION. SpeechTranscriptionConfig speech_transcription_config = 6; @@ -114,6 +114,66 @@ message VideoContext { ObjectTrackingConfig object_tracking_config = 13; } +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + // Config for LABEL_DETECTION. message LabelDetectionConfig { // What labels should be detected with LABEL_DETECTION, in addition to @@ -156,28 +216,17 @@ message ShotChangeDetectionConfig { string model = 1; } -// Config for EXPLICIT_CONTENT_DETECTION. -message ExplicitContentDetectionConfig { - // Model to use for explicit content detection. - // Supported values: "builtin/stable" (the default if unset) and - // "builtin/latest". - string model = 1; -} - -// Config for FACE_DETECTION. -message FaceDetectionConfig { - // Model to use for face detection. +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". string model = 1; - - // Whether bounding boxes be included in the face annotation output. - bool include_bounding_boxes = 2; } -// Config for OBJECT_TRACKING. -message ObjectTrackingConfig { - // Model to use for object tracking. +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". string model = 1; @@ -295,47 +344,17 @@ message NormalizedBoundingBox { float bottom = 4; } -// Video segment level annotation results for face detection. -message FaceSegment { - // Video segment where a face was detected. - VideoSegment segment = 1; -} - -// Video frame level annotation results for face detection. -message FaceFrame { - // Normalized Bounding boxes in a frame. - // There can be more than one boxes if the same face is detected in multiple - // locations within the current frame. - repeated NormalizedBoundingBox normalized_bounding_boxes = 1; - - // Time-offset, relative to the beginning of the video, - // corresponding to the video frame for this location. - google.protobuf.Duration time_offset = 2; -} - -// Face annotation. -message FaceAnnotation { - // Thumbnail of a representative face view (in JPEG format). - bytes thumbnail = 1; - - // All video segments where a face was detected. - repeated FaceSegment segments = 2; - - // All video frames where a face was detected. - repeated FaceFrame frames = 3; -} - // Annotation results for a single video. message VideoAnnotationResults { // Video file location in // [Google Cloud Storage](https://cloud.google.com/storage/). string input_uri = 1; - // Label annotations on video level or user specified segment level. + // Topical label annotations on video level or user specified segment level. // There is exactly one element for each unique label. repeated LabelAnnotation segment_label_annotations = 2; - // Label annotations on shot level. + // Topical label annotations on shot level. // There is exactly one element for each unique label. repeated LabelAnnotation shot_label_annotations = 3; @@ -343,9 +362,6 @@ message VideoAnnotationResults { // There is exactly one element for each unique label. repeated LabelAnnotation frame_label_annotations = 4; - // Face annotations. There is exactly one element for each unique face. - repeated FaceAnnotation face_annotations = 5; - // Shot annotations. Each shot is represented as a video segment. repeated VideoSegment shot_annotations = 6; @@ -391,6 +407,14 @@ message VideoAnnotationProgress { // Time of the most recent update. google.protobuf.Timestamp update_time = 4; + + // Specifies which feature is being tracked if the request contains more than + // one features. + Feature feature = 5; + + // Specifies which segment is being tracked if the request contains more than + // one segments. + VideoSegment segment = 6; } // Video annotation progress. Included in the `metadata` @@ -491,15 +515,17 @@ message SpeechRecognitionAlternative { // Transcript text representing the words that the user spoke. string transcript = 1; - // The confidence estimate between 0.0 and 1.0. A higher number + // Output only. The confidence estimate between 0.0 and 1.0. A higher number // indicates an estimated greater likelihood that the recognized words are - // correct. This field is typically provided only for the top hypothesis, and - // only for `is_final=true` results. Clients should not rely on the - // `confidence` field as it is not guaranteed to be accurate or consistent. + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. // The default of 0.0 is a sentinel value indicating `confidence` was not set. float confidence = 2; - // A list of word-specific information for each recognized word. + // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is true, you will see all the words + // from the beginning of the audio. repeated WordInfo words = 3; } @@ -645,66 +671,3 @@ message ObjectTrackingAnnotation { // Streaming mode: it can only be one ObjectTrackingFrame message in frames. repeated ObjectTrackingFrame frames = 2; } - -// Video annotation feature. -enum Feature { - // Unspecified. - FEATURE_UNSPECIFIED = 0; - - // Label detection. Detect objects, such as dog or flower. - LABEL_DETECTION = 1; - - // Shot change detection. - SHOT_CHANGE_DETECTION = 2; - - // Explicit content detection. - EXPLICIT_CONTENT_DETECTION = 3; - - // Human face detection and tracking. - FACE_DETECTION = 4; - - // Speech transcription. - SPEECH_TRANSCRIPTION = 6; - - // OCR text detection and tracking. - TEXT_DETECTION = 7; - - // Object detection and tracking. - OBJECT_TRACKING = 9; -} - -// Label detection mode. -enum LabelDetectionMode { - // Unspecified. - LABEL_DETECTION_MODE_UNSPECIFIED = 0; - - // Detect shot-level labels. - SHOT_MODE = 1; - - // Detect frame-level labels. - FRAME_MODE = 2; - - // Detect both shot-level and frame-level labels. - SHOT_AND_FRAME_MODE = 3; -} - -// Bucketized representation of likelihood. -enum Likelihood { - // Unspecified likelihood. - LIKELIHOOD_UNSPECIFIED = 0; - - // Very unlikely. - VERY_UNLIKELY = 1; - - // Unlikely. - UNLIKELY = 2; - - // Possible. - POSSIBLE = 3; - - // Likely. - LIKELY = 4; - - // Very likely. - VERY_LIKELY = 5; -} diff --git a/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py b/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py index 2d2122a4afb6..1db072b6f507 100644 --- a/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py +++ b/videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py @@ -23,6 +23,7 @@ from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 DESCRIPTOR = _descriptor.FileDescriptor( @@ -33,7 +34,7 @@ "\n%com.google.cloud.videointelligence.v1B\035VideoIntelligenceServiceProtoP\001ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\252\002!Google.Cloud.VideoIntelligence.V1\312\002!Google\\Cloud\\VideoIntelligence\\V1\352\002$Google::Cloud::VideoIntelligence::V1" ), serialized_pb=_b( - '\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\xef\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12<\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x12\n\noutput_uri\x18\x04 \x01(\t\x12\x13\n\x0blocation_id\x18\x05 \x01(\t"\xe6\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12U\n\x15\x66\x61\x63\x65_detection_config\x18\x05 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.FaceDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"D\n\x13\x46\x61\x63\x65\x44\x65tectionConfig\x12\r\n\x05model\x18\x01 \x01(\t\x12\x1e\n\x16include_bounding_boxes\x18\x02 \x01(\x08"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"O\n\x0b\x46\x61\x63\x65Segment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"\x98\x01\n\tFaceFrame\x12[\n\x19normalized_bounding_boxes\x18\x01 \x03(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\xa3\x01\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x11\n\tthumbnail\x18\x01 \x01(\x0c\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.FaceSegment\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.FaceFrame"\xbe\x06\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12K\n\x10\x66\x61\x63\x65_annotations\x18\x05 \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.FaceAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa7\x01\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\xd4\x02\n\x19SpeechTranscriptionConfig\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x18\n\x10max_alternatives\x18\x02 \x01(\x05\x12\x18\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x12I\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContext\x12$\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x12\x14\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x12"\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x12!\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x12\x1e\n\x16\x65nable_word_confidence\x18\t \x01(\x08" \n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t"\x83\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x15\n\rlanguage_code\x18\x02 \x01(\t"\x82\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12:\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfo"\x9d\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x13\n\x0bspeaker_tag\x18\x05 \x01(\x05"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info*\xc9\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x04\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xa4\x01\n\x18VideoIntelligenceService\x12\x87\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"\x1e\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*B\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3' + '\n@google/cloud/videointelligence_v1/proto/video_intelligence.proto\x12!google.cloud.videointelligence.v1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x17google/api/client.proto"\xef\x01\n\x14\x41nnotateVideoRequest\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x15\n\rinput_content\x18\x06 \x01(\x0c\x12<\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12\x46\n\rvideo_context\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoContext\x12\x12\n\noutput_uri\x18\x04 \x01(\t\x12\x13\n\x0blocation_id\x18\x05 \x01(\t"\x8f\x05\n\x0cVideoContext\x12\x41\n\x08segments\x18\x01 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12W\n\x16label_detection_config\x18\x02 \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.LabelDetectionConfig\x12\x62\n\x1cshot_change_detection_config\x18\x03 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ShotChangeDetectionConfig\x12l\n!explicit_content_detection_config\x18\x04 \x01(\x0b\x32\x41.google.cloud.videointelligence.v1.ExplicitContentDetectionConfig\x12\x61\n\x1bspeech_transcription_config\x18\x06 \x01(\x0b\x32<.google.cloud.videointelligence.v1.SpeechTranscriptionConfig\x12U\n\x15text_detection_config\x18\x08 \x01(\x0b\x32\x36.google.cloud.videointelligence.v1.TextDetectionConfig\x12W\n\x16object_tracking_config\x18\r \x01(\x0b\x32\x37.google.cloud.videointelligence.v1.ObjectTrackingConfig"\xdd\x01\n\x14LabelDetectionConfig\x12S\n\x14label_detection_mode\x18\x01 \x01(\x0e\x32\x35.google.cloud.videointelligence.v1.LabelDetectionMode\x12\x19\n\x11stationary_camera\x18\x02 \x01(\x08\x12\r\n\x05model\x18\x03 \x01(\t\x12"\n\x1a\x66rame_confidence_threshold\x18\x04 \x01(\x02\x12"\n\x1avideo_confidence_threshold\x18\x05 \x01(\x02"*\n\x19ShotChangeDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"%\n\x14ObjectTrackingConfig\x12\r\n\x05model\x18\x01 \x01(\t"/\n\x1e\x45xplicitContentDetectionConfig\x12\r\n\x05model\x18\x01 \x01(\t"<\n\x13TextDetectionConfig\x12\x16\n\x0elanguage_hints\x18\x01 \x03(\t\x12\r\n\x05model\x18\x02 \x01(\t"x\n\x0cVideoSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"d\n\x0cLabelSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02"P\n\nLabelFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x12\n\nconfidence\x18\x02 \x01(\x02"G\n\x06\x45ntity\x12\x11\n\tentity_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t"\x94\x02\n\x0fLabelAnnotation\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x44\n\x11\x63\x61tegory_entities\x18\x02 \x03(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x41\n\x08segments\x18\x03 \x03(\x0b\x32/.google.cloud.videointelligence.v1.LabelSegment\x12=\n\x06\x66rames\x18\x04 \x03(\x0b\x32-.google.cloud.videointelligence.v1.LabelFrame"\x95\x01\n\x14\x45xplicitContentFrame\x12.\n\x0btime_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12M\n\x16pornography_likelihood\x18\x02 \x01(\x0e\x32-.google.cloud.videointelligence.v1.Likelihood"d\n\x19\x45xplicitContentAnnotation\x12G\n\x06\x66rames\x18\x01 \x03(\x0b\x32\x37.google.cloud.videointelligence.v1.ExplicitContentFrame"Q\n\x15NormalizedBoundingBox\x12\x0c\n\x04left\x18\x01 \x01(\x02\x12\x0b\n\x03top\x18\x02 \x01(\x02\x12\r\n\x05right\x18\x03 \x01(\x02\x12\x0e\n\x06\x62ottom\x18\x04 \x01(\x02"\xf1\x05\n\x16VideoAnnotationResults\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12U\n\x19segment_label_annotations\x18\x02 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12R\n\x16shot_label_annotations\x18\x03 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12S\n\x17\x66rame_label_annotations\x18\x04 \x03(\x0b\x32\x32.google.cloud.videointelligence.v1.LabelAnnotation\x12I\n\x10shot_annotations\x18\x06 \x03(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12Y\n\x13\x65xplicit_annotation\x18\x07 \x01(\x0b\x32<.google.cloud.videointelligence.v1.ExplicitContentAnnotation\x12U\n\x15speech_transcriptions\x18\x0b \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.SpeechTranscription\x12K\n\x10text_annotations\x18\x0c \x03(\x0b\x32\x31.google.cloud.videointelligence.v1.TextAnnotation\x12W\n\x12object_annotations\x18\x0e \x03(\x0b\x32;.google.cloud.videointelligence.v1.ObjectTrackingAnnotation\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status"n\n\x15\x41nnotateVideoResponse\x12U\n\x12\x61nnotation_results\x18\x01 \x03(\x0b\x32\x39.google.cloud.videointelligence.v1.VideoAnnotationResults"\xa6\x02\n\x17VideoAnnotationProgress\x12\x11\n\tinput_uri\x18\x01 \x01(\t\x12\x18\n\x10progress_percent\x18\x02 \x01(\x05\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12;\n\x07\x66\x65\x61ture\x18\x05 \x01(\x0e\x32*.google.cloud.videointelligence.v1.Feature\x12@\n\x07segment\x18\x06 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment"p\n\x15\x41nnotateVideoProgress\x12W\n\x13\x61nnotation_progress\x18\x01 \x03(\x0b\x32:.google.cloud.videointelligence.v1.VideoAnnotationProgress"\xd4\x02\n\x19SpeechTranscriptionConfig\x12\x15\n\rlanguage_code\x18\x01 \x01(\t\x12\x18\n\x10max_alternatives\x18\x02 \x01(\x05\x12\x18\n\x10\x66ilter_profanity\x18\x03 \x01(\x08\x12I\n\x0fspeech_contexts\x18\x04 \x03(\x0b\x32\x30.google.cloud.videointelligence.v1.SpeechContext\x12$\n\x1c\x65nable_automatic_punctuation\x18\x05 \x01(\x08\x12\x14\n\x0c\x61udio_tracks\x18\x06 \x03(\x05\x12"\n\x1a\x65nable_speaker_diarization\x18\x07 \x01(\x08\x12!\n\x19\x64iarization_speaker_count\x18\x08 \x01(\x05\x12\x1e\n\x16\x65nable_word_confidence\x18\t \x01(\x08" \n\rSpeechContext\x12\x0f\n\x07phrases\x18\x01 \x03(\t"\x83\x01\n\x13SpeechTranscription\x12U\n\x0c\x61lternatives\x18\x01 \x03(\x0b\x32?.google.cloud.videointelligence.v1.SpeechRecognitionAlternative\x12\x15\n\rlanguage_code\x18\x02 \x01(\t"\x82\x01\n\x1cSpeechRecognitionAlternative\x12\x12\n\ntranscript\x18\x01 \x01(\t\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12:\n\x05words\x18\x03 \x03(\x0b\x32+.google.cloud.videointelligence.v1.WordInfo"\x9d\x01\n\x08WordInfo\x12-\n\nstart_time\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12+\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0c\n\x04word\x18\x03 \x01(\t\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x13\n\x0bspeaker_tag\x18\x05 \x01(\x05"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"_\n\x16NormalizedBoundingPoly\x12\x45\n\x08vertices\x18\x01 \x03(\x0b\x32\x33.google.cloud.videointelligence.v1.NormalizedVertex"\xa1\x01\n\x0bTextSegment\x12@\n\x07segment\x18\x01 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegment\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12<\n\x06\x66rames\x18\x03 \x03(\x0b\x32,.google.cloud.videointelligence.v1.TextFrame"\x94\x01\n\tTextFrame\x12W\n\x14rotated_bounding_box\x18\x01 \x01(\x0b\x32\x39.google.cloud.videointelligence.v1.NormalizedBoundingPoly\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"`\n\x0eTextAnnotation\x12\x0c\n\x04text\x18\x01 \x01(\t\x12@\n\x08segments\x18\x02 \x03(\x0b\x32..google.cloud.videointelligence.v1.TextSegment"\xa0\x01\n\x13ObjectTrackingFrame\x12Y\n\x17normalized_bounding_box\x18\x01 \x01(\x0b\x32\x38.google.cloud.videointelligence.v1.NormalizedBoundingBox\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration"\x97\x02\n\x18ObjectTrackingAnnotation\x12\x42\n\x07segment\x18\x03 \x01(\x0b\x32/.google.cloud.videointelligence.v1.VideoSegmentH\x00\x12\x12\n\x08track_id\x18\x05 \x01(\x03H\x00\x12\x39\n\x06\x65ntity\x18\x01 \x01(\x0b\x32).google.cloud.videointelligence.v1.Entity\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\x46\n\x06\x66rames\x18\x02 \x03(\x0b\x32\x36.google.cloud.videointelligence.v1.ObjectTrackingFrameB\x0c\n\ntrack_info*\xb5\x01\n\x07\x46\x65\x61ture\x12\x17\n\x13\x46\x45\x41TURE_UNSPECIFIED\x10\x00\x12\x13\n\x0fLABEL_DETECTION\x10\x01\x12\x19\n\x15SHOT_CHANGE_DETECTION\x10\x02\x12\x1e\n\x1a\x45XPLICIT_CONTENT_DETECTION\x10\x03\x12\x18\n\x14SPEECH_TRANSCRIPTION\x10\x06\x12\x12\n\x0eTEXT_DETECTION\x10\x07\x12\x13\n\x0fOBJECT_TRACKING\x10\t*r\n\x12LabelDetectionMode\x12$\n LABEL_DETECTION_MODE_UNSPECIFIED\x10\x00\x12\r\n\tSHOT_MODE\x10\x01\x12\x0e\n\nFRAME_MODE\x10\x02\x12\x17\n\x13SHOT_AND_FRAME_MODE\x10\x03*t\n\nLikelihood\x12\x1a\n\x16LIKELIHOOD_UNSPECIFIED\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xfa\x01\n\x18VideoIntelligenceService\x12\x87\x01\n\rAnnotateVideo\x12\x37.google.cloud.videointelligence.v1.AnnotateVideoRequest\x1a\x1d.google.longrunning.Operation"\x1e\x82\xd3\xe4\x93\x02\x18"\x13/v1/videos:annotate:\x01*\x1aT\xca\x41 videointelligence.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x8b\x02\n%com.google.cloud.videointelligence.v1B\x1dVideoIntelligenceServiceProtoP\x01ZRgoogle.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence\xaa\x02!Google.Cloud.VideoIntelligence.V1\xca\x02!Google\\Cloud\\VideoIntelligence\\V1\xea\x02$Google::Cloud::VideoIntelligence::V1b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -41,6 +42,7 @@ google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, ], ) @@ -78,22 +80,19 @@ serialized_options=None, type=None, ), - _descriptor.EnumValueDescriptor( - name="FACE_DETECTION", index=4, number=4, serialized_options=None, type=None - ), _descriptor.EnumValueDescriptor( name="SPEECH_TRANSCRIPTION", - index=5, + index=4, number=6, serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( - name="TEXT_DETECTION", index=6, number=7, serialized_options=None, type=None + name="TEXT_DETECTION", index=5, number=7, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( name="OBJECT_TRACKING", - index=7, + index=6, number=9, serialized_options=None, type=None, @@ -101,8 +100,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=6163, - serialized_end=6364, + serialized_start=5679, + serialized_end=5860, ) _sym_db.RegisterEnumDescriptor(_FEATURE) @@ -136,8 +135,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=6366, - serialized_end=6480, + serialized_start=5862, + serialized_end=5976, ) _sym_db.RegisterEnumDescriptor(_LABELDETECTIONMODE) @@ -173,8 +172,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=6482, - serialized_end=6598, + serialized_start=5978, + serialized_end=6094, ) _sym_db.RegisterEnumDescriptor(_LIKELIHOOD) @@ -183,7 +182,6 @@ LABEL_DETECTION = 1 SHOT_CHANGE_DETECTION = 2 EXPLICIT_CONTENT_DETECTION = 3 -FACE_DETECTION = 4 SPEECH_TRANSCRIPTION = 6 TEXT_DETECTION = 7 OBJECT_TRACKING = 9 @@ -323,8 +321,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=261, - serialized_end=500, + serialized_start=286, + serialized_end=525, ) @@ -407,28 +405,10 @@ serialized_options=None, file=DESCRIPTOR, ), - _descriptor.FieldDescriptor( - name="face_detection_config", - full_name="google.cloud.videointelligence.v1.VideoContext.face_detection_config", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), _descriptor.FieldDescriptor( name="speech_transcription_config", full_name="google.cloud.videointelligence.v1.VideoContext.speech_transcription_config", - index=5, + index=4, number=6, type=11, cpp_type=10, @@ -446,7 +426,7 @@ _descriptor.FieldDescriptor( name="text_detection_config", full_name="google.cloud.videointelligence.v1.VideoContext.text_detection_config", - index=6, + index=5, number=8, type=11, cpp_type=10, @@ -464,7 +444,7 @@ _descriptor.FieldDescriptor( name="object_tracking_config", full_name="google.cloud.videointelligence.v1.VideoContext.object_tracking_config", - index=7, + index=6, number=13, type=11, cpp_type=10, @@ -488,8 +468,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=503, - serialized_end=1245, + serialized_start=528, + serialized_end=1183, ) @@ -599,8 +579,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1248, - serialized_end=1469, + serialized_start=1186, + serialized_end=1407, ) @@ -638,21 +618,21 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1471, - serialized_end=1513, + serialized_start=1409, + serialized_end=1451, ) -_EXPLICITCONTENTDETECTIONCONFIG = _descriptor.Descriptor( - name="ExplicitContentDetectionConfig", - full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig", +_OBJECTTRACKINGCONFIG = _descriptor.Descriptor( + name="ObjectTrackingConfig", + full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="model", - full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig.model", + full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig.model", index=0, number=1, type=9, @@ -677,78 +657,21 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1515, - serialized_end=1562, + serialized_start=1453, + serialized_end=1490, ) -_FACEDETECTIONCONFIG = _descriptor.Descriptor( - name="FaceDetectionConfig", - full_name="google.cloud.videointelligence.v1.FaceDetectionConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="model", - full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.model", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b("").decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="include_bounding_boxes", - full_name="google.cloud.videointelligence.v1.FaceDetectionConfig.include_bounding_boxes", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1564, - serialized_end=1632, -) - - -_OBJECTTRACKINGCONFIG = _descriptor.Descriptor( - name="ObjectTrackingConfig", - full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig", +_EXPLICITCONTENTDETECTIONCONFIG = _descriptor.Descriptor( + name="ExplicitContentDetectionConfig", + full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="model", - full_name="google.cloud.videointelligence.v1.ObjectTrackingConfig.model", + full_name="google.cloud.videointelligence.v1.ExplicitContentDetectionConfig.model", index=0, number=1, type=9, @@ -773,8 +696,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1634, - serialized_end=1671, + serialized_start=1492, + serialized_end=1539, ) @@ -830,8 +753,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1673, - serialized_end=1733, + serialized_start=1541, + serialized_end=1601, ) @@ -887,8 +810,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1735, - serialized_end=1855, + serialized_start=1603, + serialized_end=1723, ) @@ -944,8 +867,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1857, - serialized_end=1957, + serialized_start=1725, + serialized_end=1825, ) @@ -1001,8 +924,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1959, - serialized_end=2039, + serialized_start=1827, + serialized_end=1907, ) @@ -1076,8 +999,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2041, - serialized_end=2112, + serialized_start=1909, + serialized_end=1980, ) @@ -1169,8 +1092,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2115, - serialized_end=2391, + serialized_start=1983, + serialized_end=2259, ) @@ -1226,8 +1149,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2394, - serialized_end=2543, + serialized_start=2262, + serialized_end=2411, ) @@ -1265,8 +1188,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2545, - serialized_end=2645, + serialized_start=2413, + serialized_end=2513, ) @@ -1358,179 +1281,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2647, - serialized_end=2728, -) - - -_FACESEGMENT = _descriptor.Descriptor( - name="FaceSegment", - full_name="google.cloud.videointelligence.v1.FaceSegment", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="segment", - full_name="google.cloud.videointelligence.v1.FaceSegment.segment", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ) - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2730, - serialized_end=2809, -) - - -_FACEFRAME = _descriptor.Descriptor( - name="FaceFrame", - full_name="google.cloud.videointelligence.v1.FaceFrame", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="normalized_bounding_boxes", - full_name="google.cloud.videointelligence.v1.FaceFrame.normalized_bounding_boxes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="time_offset", - full_name="google.cloud.videointelligence.v1.FaceFrame.time_offset", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2812, - serialized_end=2964, -) - - -_FACEANNOTATION = _descriptor.Descriptor( - name="FaceAnnotation", - full_name="google.cloud.videointelligence.v1.FaceAnnotation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name="thumbnail", - full_name="google.cloud.videointelligence.v1.FaceAnnotation.thumbnail", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=_b(""), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="segments", - full_name="google.cloud.videointelligence.v1.FaceAnnotation.segments", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - _descriptor.FieldDescriptor( - name="frames", - full_name="google.cloud.videointelligence.v1.FaceAnnotation.frames", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2967, - serialized_end=3130, + serialized_start=2515, + serialized_end=2596, ) @@ -1613,28 +1365,10 @@ serialized_options=None, file=DESCRIPTOR, ), - _descriptor.FieldDescriptor( - name="face_annotations", - full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.face_annotations", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - ), _descriptor.FieldDescriptor( name="shot_annotations", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.shot_annotations", - index=5, + index=4, number=6, type=11, cpp_type=10, @@ -1652,7 +1386,7 @@ _descriptor.FieldDescriptor( name="explicit_annotation", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.explicit_annotation", - index=6, + index=5, number=7, type=11, cpp_type=10, @@ -1670,7 +1404,7 @@ _descriptor.FieldDescriptor( name="speech_transcriptions", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.speech_transcriptions", - index=7, + index=6, number=11, type=11, cpp_type=10, @@ -1688,7 +1422,7 @@ _descriptor.FieldDescriptor( name="text_annotations", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.text_annotations", - index=8, + index=7, number=12, type=11, cpp_type=10, @@ -1706,7 +1440,7 @@ _descriptor.FieldDescriptor( name="object_annotations", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.object_annotations", - index=9, + index=8, number=14, type=11, cpp_type=10, @@ -1724,7 +1458,7 @@ _descriptor.FieldDescriptor( name="error", full_name="google.cloud.videointelligence.v1.VideoAnnotationResults.error", - index=10, + index=9, number=9, type=11, cpp_type=10, @@ -1748,8 +1482,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3133, - serialized_end=3963, + serialized_start=2599, + serialized_end=3352, ) @@ -1787,8 +1521,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3965, - serialized_end=4075, + serialized_start=3354, + serialized_end=3464, ) @@ -1871,6 +1605,42 @@ serialized_options=None, file=DESCRIPTOR, ), + _descriptor.FieldDescriptor( + name="feature", + full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.feature", + index=4, + number=5, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="segment", + full_name="google.cloud.videointelligence.v1.VideoAnnotationProgress.segment", + index=5, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), ], extensions=[], nested_types=[], @@ -1880,8 +1650,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4078, - serialized_end=4245, + serialized_start=3467, + serialized_end=3761, ) @@ -1919,8 +1689,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4247, - serialized_end=4359, + serialized_start=3763, + serialized_end=3875, ) @@ -2102,8 +1872,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4362, - serialized_end=4702, + serialized_start=3878, + serialized_end=4218, ) @@ -2141,8 +1911,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4704, - serialized_end=4736, + serialized_start=4220, + serialized_end=4252, ) @@ -2198,8 +1968,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4739, - serialized_end=4870, + serialized_start=4255, + serialized_end=4386, ) @@ -2273,8 +2043,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=4873, - serialized_end=5003, + serialized_start=4389, + serialized_end=4519, ) @@ -2384,8 +2154,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5006, - serialized_end=5163, + serialized_start=4522, + serialized_end=4679, ) @@ -2441,8 +2211,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5165, - serialized_end=5205, + serialized_start=4681, + serialized_end=4721, ) @@ -2480,8 +2250,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5207, - serialized_end=5302, + serialized_start=4723, + serialized_end=4818, ) @@ -2555,8 +2325,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5305, - serialized_end=5466, + serialized_start=4821, + serialized_end=4982, ) @@ -2612,8 +2382,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5469, - serialized_end=5617, + serialized_start=4985, + serialized_end=5133, ) @@ -2669,8 +2439,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5619, - serialized_end=5715, + serialized_start=5135, + serialized_end=5231, ) @@ -2726,8 +2496,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=5718, - serialized_end=5878, + serialized_start=5234, + serialized_end=5394, ) @@ -2845,8 +2615,8 @@ fields=[], ) ], - serialized_start=5881, - serialized_end=6160, + serialized_start=5397, + serialized_end=5676, ) _ANNOTATEVIDEOREQUEST.fields_by_name["features"].enum_type = _FEATURE @@ -2861,9 +2631,6 @@ _VIDEOCONTEXT.fields_by_name[ "explicit_content_detection_config" ].message_type = _EXPLICITCONTENTDETECTIONCONFIG -_VIDEOCONTEXT.fields_by_name[ - "face_detection_config" -].message_type = _FACEDETECTIONCONFIG _VIDEOCONTEXT.fields_by_name[ "speech_transcription_config" ].message_type = _SPEECHTRANSCRIPTIONCONFIG @@ -2895,15 +2662,6 @@ ].message_type = google_dot_protobuf_dot_duration__pb2._DURATION _EXPLICITCONTENTFRAME.fields_by_name["pornography_likelihood"].enum_type = _LIKELIHOOD _EXPLICITCONTENTANNOTATION.fields_by_name["frames"].message_type = _EXPLICITCONTENTFRAME -_FACESEGMENT.fields_by_name["segment"].message_type = _VIDEOSEGMENT -_FACEFRAME.fields_by_name[ - "normalized_bounding_boxes" -].message_type = _NORMALIZEDBOUNDINGBOX -_FACEFRAME.fields_by_name[ - "time_offset" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_FACEANNOTATION.fields_by_name["segments"].message_type = _FACESEGMENT -_FACEANNOTATION.fields_by_name["frames"].message_type = _FACEFRAME _VIDEOANNOTATIONRESULTS.fields_by_name[ "segment_label_annotations" ].message_type = _LABELANNOTATION @@ -2913,9 +2671,6 @@ _VIDEOANNOTATIONRESULTS.fields_by_name[ "frame_label_annotations" ].message_type = _LABELANNOTATION -_VIDEOANNOTATIONRESULTS.fields_by_name[ - "face_annotations" -].message_type = _FACEANNOTATION _VIDEOANNOTATIONRESULTS.fields_by_name["shot_annotations"].message_type = _VIDEOSEGMENT _VIDEOANNOTATIONRESULTS.fields_by_name[ "explicit_annotation" @@ -2941,6 +2696,8 @@ _VIDEOANNOTATIONPROGRESS.fields_by_name[ "update_time" ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_VIDEOANNOTATIONPROGRESS.fields_by_name["feature"].enum_type = _FEATURE +_VIDEOANNOTATIONPROGRESS.fields_by_name["segment"].message_type = _VIDEOSEGMENT _ANNOTATEVIDEOPROGRESS.fields_by_name[ "annotation_progress" ].message_type = _VIDEOANNOTATIONPROGRESS @@ -2992,11 +2749,10 @@ DESCRIPTOR.message_types_by_name[ "ShotChangeDetectionConfig" ] = _SHOTCHANGEDETECTIONCONFIG +DESCRIPTOR.message_types_by_name["ObjectTrackingConfig"] = _OBJECTTRACKINGCONFIG DESCRIPTOR.message_types_by_name[ "ExplicitContentDetectionConfig" ] = _EXPLICITCONTENTDETECTIONCONFIG -DESCRIPTOR.message_types_by_name["FaceDetectionConfig"] = _FACEDETECTIONCONFIG -DESCRIPTOR.message_types_by_name["ObjectTrackingConfig"] = _OBJECTTRACKINGCONFIG DESCRIPTOR.message_types_by_name["TextDetectionConfig"] = _TEXTDETECTIONCONFIG DESCRIPTOR.message_types_by_name["VideoSegment"] = _VIDEOSEGMENT DESCRIPTOR.message_types_by_name["LabelSegment"] = _LABELSEGMENT @@ -3008,9 +2764,6 @@ "ExplicitContentAnnotation" ] = _EXPLICITCONTENTANNOTATION DESCRIPTOR.message_types_by_name["NormalizedBoundingBox"] = _NORMALIZEDBOUNDINGBOX -DESCRIPTOR.message_types_by_name["FaceSegment"] = _FACESEGMENT -DESCRIPTOR.message_types_by_name["FaceFrame"] = _FACEFRAME -DESCRIPTOR.message_types_by_name["FaceAnnotation"] = _FACEANNOTATION DESCRIPTOR.message_types_by_name["VideoAnnotationResults"] = _VIDEOANNOTATIONRESULTS DESCRIPTOR.message_types_by_name["AnnotateVideoResponse"] = _ANNOTATEVIDEORESPONSE DESCRIPTOR.message_types_by_name["VideoAnnotationProgress"] = _VIDEOANNOTATIONPROGRESS @@ -3107,8 +2860,6 @@ Config for SHOT\_CHANGE\_DETECTION. explicit_content_detection_config: Config for EXPLICIT\_CONTENT\_DETECTION. - face_detection_config: - Config for FACE\_DETECTION. speech_transcription_config: Config for SPEECH\_TRANSCRIPTION. text_detection_config: @@ -3184,65 +2935,43 @@ ) _sym_db.RegisterMessage(ShotChangeDetectionConfig) -ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( - "ExplicitContentDetectionConfig", - (_message.Message,), - dict( - DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. - - - Attributes: - model: - Model to use for explicit content detection. Supported values: - "builtin/stable" (the default if unset) and "builtin/latest". - """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentDetectionConfig) - ), -) -_sym_db.RegisterMessage(ExplicitContentDetectionConfig) - -FaceDetectionConfig = _reflection.GeneratedProtocolMessageType( - "FaceDetectionConfig", +ObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( + "ObjectTrackingConfig", (_message.Message,), dict( - DESCRIPTOR=_FACEDETECTIONCONFIG, + DESCRIPTOR=_OBJECTTRACKINGCONFIG, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for FACE\_DETECTION. + __doc__="""Config for OBJECT\_TRACKING. Attributes: model: - Model to use for face detection. Supported values: + Model to use for object tracking. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". - include_bounding_boxes: - Whether bounding boxes be included in the face annotation - output. """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceDetectionConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingConfig) ), ) -_sym_db.RegisterMessage(FaceDetectionConfig) +_sym_db.RegisterMessage(ObjectTrackingConfig) -ObjectTrackingConfig = _reflection.GeneratedProtocolMessageType( - "ObjectTrackingConfig", +ExplicitContentDetectionConfig = _reflection.GeneratedProtocolMessageType( + "ExplicitContentDetectionConfig", (_message.Message,), dict( - DESCRIPTOR=_OBJECTTRACKINGCONFIG, + DESCRIPTOR=_EXPLICITCONTENTDETECTIONCONFIG, __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Config for OBJECT\_TRACKING. + __doc__="""Config for EXPLICIT\_CONTENT\_DETECTION. Attributes: model: - Model to use for object tracking. Supported values: + Model to use for explicit content detection. Supported values: "builtin/stable" (the default if unset) and "builtin/latest". """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingConfig) + # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ExplicitContentDetectionConfig) ), ) -_sym_db.RegisterMessage(ObjectTrackingConfig) +_sym_db.RegisterMessage(ExplicitContentDetectionConfig) TextDetectionConfig = _reflection.GeneratedProtocolMessageType( "TextDetectionConfig", @@ -3449,69 +3178,6 @@ ) _sym_db.RegisterMessage(NormalizedBoundingBox) -FaceSegment = _reflection.GeneratedProtocolMessageType( - "FaceSegment", - (_message.Message,), - dict( - DESCRIPTOR=_FACESEGMENT, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video segment level annotation results for face detection. - - - Attributes: - segment: - Video segment where a face was detected. - """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceSegment) - ), -) -_sym_db.RegisterMessage(FaceSegment) - -FaceFrame = _reflection.GeneratedProtocolMessageType( - "FaceFrame", - (_message.Message,), - dict( - DESCRIPTOR=_FACEFRAME, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Video frame level annotation results for face detection. - - - Attributes: - normalized_bounding_boxes: - Normalized Bounding boxes in a frame. There can be more than - one boxes if the same face is detected in multiple locations - within the current frame. - time_offset: - Time-offset, relative to the beginning of the video, - corresponding to the video frame for this location. - """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceFrame) - ), -) -_sym_db.RegisterMessage(FaceFrame) - -FaceAnnotation = _reflection.GeneratedProtocolMessageType( - "FaceAnnotation", - (_message.Message,), - dict( - DESCRIPTOR=_FACEANNOTATION, - __module__="google.cloud.videointelligence_v1.proto.video_intelligence_pb2", - __doc__="""Face annotation. - - - Attributes: - thumbnail: - Thumbnail of a representative face view (in JPEG format). - segments: - All video segments where a face was detected. - frames: - All video frames where a face was detected. - """, - # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.FaceAnnotation) - ), -) -_sym_db.RegisterMessage(FaceAnnotation) - VideoAnnotationResults = _reflection.GeneratedProtocolMessageType( "VideoAnnotationResults", (_message.Message,), @@ -3526,17 +3192,15 @@ Video file location in `Google Cloud Storage `__. segment_label_annotations: - Label annotations on video level or user specified segment - level. There is exactly one element for each unique label. + Topical label annotations on video level or user specified + segment level. There is exactly one element for each unique + label. shot_label_annotations: - Label annotations on shot level. There is exactly one element - for each unique label. + Topical label annotations on shot level. There is exactly one + element for each unique label. frame_label_annotations: Label annotations on frame level. There is exactly one element for each unique label. - face_annotations: - Face annotations. There is exactly one element for each unique - face. shot_annotations: Shot annotations. Each shot is represented as a video segment. explicit_annotation: @@ -3600,6 +3264,12 @@ Time when the request was received. update_time: Time of the most recent update. + feature: + Specifies which feature is being tracked if the request + contains more than one features. + segment: + Specifies which segment is being tracked if the request + contains more than one segments. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.VideoAnnotationProgress) ), @@ -3763,15 +3433,18 @@ transcript: Transcript text representing the words that the user spoke. confidence: - The confidence estimate between 0.0 and 1.0. A higher number - indicates an estimated greater likelihood that the recognized - words are correct. This field is typically provided only for - the top hypothesis, and only for ``is_final=true`` results. - Clients should not rely on the ``confidence`` field as it is - not guaranteed to be accurate or consistent. The default of - 0.0 is a sentinel value indicating ``confidence`` was not set. + Output only. The confidence estimate between 0.0 and 1.0. A + higher number indicates an estimated greater likelihood that + the recognized words are correct. This field is set only for + the top alternative. This field is not guaranteed to be + accurate and users should not rely on it to be always + provided. The default of 0.0 is a sentinel value indicating + ``confidence`` was not set. words: - A list of word-specific information for each recognized word. + Output only. A list of word-specific information for each + recognized word. Note: When ``enable_speaker_diarization`` is + true, you will see all the words from the beginning of the + audio. """, # @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.SpeechRecognitionAlternative) ), @@ -4008,9 +3681,11 @@ full_name="google.cloud.videointelligence.v1.VideoIntelligenceService", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=6601, - serialized_end=6765, + serialized_options=_b( + "\312A videointelligence.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" + ), + serialized_start=6097, + serialized_end=6347, methods=[ _descriptor.MethodDescriptor( name="AnnotateVideo", diff --git a/videointelligence/synth.metadata b/videointelligence/synth.metadata index bb3590592027..65aaa3e5130a 100644 --- a/videointelligence/synth.metadata +++ b/videointelligence/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-07-03T12:45:26.994022Z", + "updateTime": "2019-07-12T17:16:14.999577Z", "sources": [ { "generator": { "name": "artman", - "version": "0.29.3", - "dockerImage": "googleapis/artman@sha256:8900f94a81adaab0238965aa8a7b3648791f4f3a95ee65adc6a56cfcc3753101" + "version": "0.29.4", + "dockerImage": "googleapis/artman@sha256:63f21e83cb92680b7001dc381069e962c9e6dee314fd8365ac554c07c89221fb" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "69916b6ffbb7717fa009033351777d0c9909fb79", - "internalRef": "256241904" + "sha": "47bd0c2ba33c28dd624a65dad382e02bb61d1618", + "internalRef": "257690259" } }, { diff --git a/videointelligence/synth.py b/videointelligence/synth.py index 07fbe50e3219..3e5d8389829c 100644 --- a/videointelligence/synth.py +++ b/videointelligence/synth.py @@ -66,6 +66,6 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library(unit_cov_level=97, cov_level=100) -s.move(templated_files) +s.move(templated_files, excludes="noxfile.py") s.shell.run(["nox", "-s", "blacken"], hide_output=False)