diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java
index 0c5d9cdef787..82a170ee4e0a 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/Feature.java
@@ -73,6 +73,26 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum {
* SPEECH_TRANSCRIPTION = 6;
*/
SPEECH_TRANSCRIPTION(6),
+ /**
+ *
+ *
+ *
+ * OCR text detection and tracking. + *+ * + *
TEXT_DETECTION = 7;
+ */
+ TEXT_DETECTION(7),
+ /**
+ *
+ *
+ * + * Object detection and tracking. + *+ * + *
OBJECT_TRACKING = 9;
+ */
+ OBJECT_TRACKING(9),
UNRECOGNIZED(-1),
;
@@ -136,6 +156,26 @@ public enum Feature implements com.google.protobuf.ProtocolMessageEnum {
* SPEECH_TRANSCRIPTION = 6;
*/
public static final int SPEECH_TRANSCRIPTION_VALUE = 6;
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + *+ * + *
TEXT_DETECTION = 7;
+ */
+ public static final int TEXT_DETECTION_VALUE = 7;
+ /**
+ *
+ *
+ * + * Object detection and tracking. + *+ * + *
OBJECT_TRACKING = 9;
+ */
+ public static final int OBJECT_TRACKING_VALUE = 9;
public final int getNumber() {
if (this == UNRECOGNIZED) {
@@ -165,6 +205,10 @@ public static Feature forNumber(int value) {
return FACE_DETECTION;
case 6:
return SPEECH_TRANSCRIPTION;
+ case 7:
+ return TEXT_DETECTION;
+ case 9:
+ return OBJECT_TRACKING;
default:
return null;
}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedBoundingPoly.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedBoundingPoly.java
new file mode 100644
index 000000000000..243db3ce6e4d
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedBoundingPoly.java
@@ -0,0 +1,974 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Normalized bounding polygon for text (that might not be aligned with axis). + * Contains list of the corner points in clockwise order starting from + * top-left corner. For example, for a rectangular bounding box: + * When the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * When it's clockwise rotated 180 degrees around the top-left corner it + * becomes: + * 2----3 + * | | + * 1----0 + * and the vertex order will still be (0, 1, 2, 3). Note that values can be less + * than 0, or greater than 1 due to trignometric calculations for location of + * the box. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.NormalizedBoundingPoly} + */ +public final class NormalizedBoundingPoly extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.NormalizedBoundingPoly) + NormalizedBoundingPolyOrBuilder { + private static final long serialVersionUID = 0L; + // Use NormalizedBoundingPoly.newBuilder() to construct. + private NormalizedBoundingPoly(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private NormalizedBoundingPoly() { + vertices_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private NormalizedBoundingPoly( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + vertices_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1.NormalizedVertex>(); + mutable_bitField0_ |= 0x00000001; + } + vertices_.add( + input.readMessage( + com.google.cloud.videointelligence.v1.NormalizedVertex.parser(), + extensionRegistry)); + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + vertices_ = java.util.Collections.unmodifiableList(vertices_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.class, + com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder.class); + } + + public static final int VERTICES_FIELD_NUMBER = 1; + private java.util.List
+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public java.util.List+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder>
+ getVerticesOrBuilderList() {
+ return vertices_;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public int getVerticesCount() {
+ return vertices_.size();
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertex getVertices(int index) {
+ return vertices_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder getVerticesOrBuilder(
+ int index) {
+ return vertices_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ for (int i = 0; i < vertices_.size(); i++) {
+ output.writeMessage(1, vertices_.get(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < vertices_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, vertices_.get(i));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.NormalizedBoundingPoly)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly other =
+ (com.google.cloud.videointelligence.v1.NormalizedBoundingPoly) obj;
+
+ boolean result = true;
+ result = result && getVerticesList().equals(other.getVerticesList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (getVerticesCount() > 0) {
+ hash = (37 * hash) + VERTICES_FIELD_NUMBER;
+ hash = (53 * hash) + getVerticesList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedBoundingPoly parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Normalized bounding polygon for text (that might not be aligned with axis). + * Contains list of the corner points in clockwise order starting from + * top-left corner. For example, for a rectangular bounding box: + * When the text is horizontal it might look like: + * 0----1 + * | | + * 3----2 + * When it's clockwise rotated 180 degrees around the top-left corner it + * becomes: + * 2----3 + * | | + * 1----0 + * and the vertex order will still be (0, 1, 2, 3). Note that values can be less + * than 0, or greater than 1 due to trignometric calculations for location of + * the box. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.NormalizedBoundingPoly} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public java.util.List+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public int getVerticesCount() {
+ if (verticesBuilder_ == null) {
+ return vertices_.size();
+ } else {
+ return verticesBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertex getVertices(int index) {
+ if (verticesBuilder_ == null) {
+ return vertices_.get(index);
+ } else {
+ return verticesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder setVertices(
+ int index, com.google.cloud.videointelligence.v1.NormalizedVertex value) {
+ if (verticesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVerticesIsMutable();
+ vertices_.set(index, value);
+ onChanged();
+ } else {
+ verticesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder setVertices(
+ int index, com.google.cloud.videointelligence.v1.NormalizedVertex.Builder builderForValue) {
+ if (verticesBuilder_ == null) {
+ ensureVerticesIsMutable();
+ vertices_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ verticesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder addVertices(com.google.cloud.videointelligence.v1.NormalizedVertex value) {
+ if (verticesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVerticesIsMutable();
+ vertices_.add(value);
+ onChanged();
+ } else {
+ verticesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder addVertices(
+ int index, com.google.cloud.videointelligence.v1.NormalizedVertex value) {
+ if (verticesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureVerticesIsMutable();
+ vertices_.add(index, value);
+ onChanged();
+ } else {
+ verticesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder addVertices(
+ com.google.cloud.videointelligence.v1.NormalizedVertex.Builder builderForValue) {
+ if (verticesBuilder_ == null) {
+ ensureVerticesIsMutable();
+ vertices_.add(builderForValue.build());
+ onChanged();
+ } else {
+ verticesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder addVertices(
+ int index, com.google.cloud.videointelligence.v1.NormalizedVertex.Builder builderForValue) {
+ if (verticesBuilder_ == null) {
+ ensureVerticesIsMutable();
+ vertices_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ verticesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder addAllVertices(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.NormalizedVertex>
+ values) {
+ if (verticesBuilder_ == null) {
+ ensureVerticesIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, vertices_);
+ onChanged();
+ } else {
+ verticesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder clearVertices() {
+ if (verticesBuilder_ == null) {
+ vertices_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ verticesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public Builder removeVertices(int index) {
+ if (verticesBuilder_ == null) {
+ ensureVerticesIsMutable();
+ vertices_.remove(index);
+ onChanged();
+ } else {
+ verticesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertex.Builder getVerticesBuilder(
+ int index) {
+ return getVerticesFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder getVerticesOrBuilder(
+ int index) {
+ if (verticesBuilder_ == null) {
+ return vertices_.get(index);
+ } else {
+ return verticesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder>
+ getVerticesOrBuilderList() {
+ if (verticesBuilder_ != null) {
+ return verticesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(vertices_);
+ }
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertex.Builder addVerticesBuilder() {
+ return getVerticesFieldBuilder()
+ .addBuilder(com.google.cloud.videointelligence.v1.NormalizedVertex.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedVertex.Builder addVerticesBuilder(
+ int index) {
+ return getVerticesFieldBuilder()
+ .addBuilder(
+ index, com.google.cloud.videointelligence.v1.NormalizedVertex.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ public java.util.List+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ java.util.List+ * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ com.google.cloud.videointelligence.v1.NormalizedVertex getVertices(int index);
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ int getVerticesCount();
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder>
+ getVerticesOrBuilderList();
+ /**
+ *
+ *
+ * + * Normalized vertices of the bounding polygon. + *+ * + *
repeated .google.cloud.videointelligence.v1.NormalizedVertex vertices = 1;
+ */
+ com.google.cloud.videointelligence.v1.NormalizedVertexOrBuilder getVerticesOrBuilder(int index);
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedVertex.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedVertex.java
new file mode 100644
index 000000000000..702d52c178b8
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/NormalizedVertex.java
@@ -0,0 +1,609 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * A vertex represents a 2D point in the image. + * NOTE: the normalized vertex coordinates are relative to the original image + * and range from 0 to 1. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.NormalizedVertex} + */ +public final class NormalizedVertex extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.NormalizedVertex) + NormalizedVertexOrBuilder { + private static final long serialVersionUID = 0L; + // Use NormalizedVertex.newBuilder() to construct. + private NormalizedVertex(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private NormalizedVertex() { + x_ = 0F; + y_ = 0F; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private NormalizedVertex( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 13: + { + x_ = input.readFloat(); + break; + } + case 21: + { + y_ = input.readFloat(); + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_NormalizedVertex_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.NormalizedVertex.class, + com.google.cloud.videointelligence.v1.NormalizedVertex.Builder.class); + } + + public static final int X_FIELD_NUMBER = 1; + private float x_; + /** + * + * + *
+ * X coordinate. + *+ * + *
float x = 1;
+ */
+ public float getX() {
+ return x_;
+ }
+
+ public static final int Y_FIELD_NUMBER = 2;
+ private float y_;
+ /**
+ *
+ *
+ * + * Y coordinate. + *+ * + *
float y = 2;
+ */
+ public float getY() {
+ return y_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (x_ != 0F) {
+ output.writeFloat(1, x_);
+ }
+ if (y_ != 0F) {
+ output.writeFloat(2, y_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (x_ != 0F) {
+ size += com.google.protobuf.CodedOutputStream.computeFloatSize(1, x_);
+ }
+ if (y_ != 0F) {
+ size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, y_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.NormalizedVertex)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.NormalizedVertex other =
+ (com.google.cloud.videointelligence.v1.NormalizedVertex) obj;
+
+ boolean result = true;
+ result =
+ result
+ && (java.lang.Float.floatToIntBits(getX())
+ == java.lang.Float.floatToIntBits(other.getX()));
+ result =
+ result
+ && (java.lang.Float.floatToIntBits(getY())
+ == java.lang.Float.floatToIntBits(other.getY()));
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + X_FIELD_NUMBER;
+ hash = (53 * hash) + java.lang.Float.floatToIntBits(getX());
+ hash = (37 * hash) + Y_FIELD_NUMBER;
+ hash = (53 * hash) + java.lang.Float.floatToIntBits(getY());
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.videointelligence.v1.NormalizedVertex prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * A vertex represents a 2D point in the image. + * NOTE: the normalized vertex coordinates are relative to the original image + * and range from 0 to 1. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.NormalizedVertex} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * X coordinate. + *+ * + *
float x = 1;
+ */
+ public float getX() {
+ return x_;
+ }
+ /**
+ *
+ *
+ * + * X coordinate. + *+ * + *
float x = 1;
+ */
+ public Builder setX(float value) {
+
+ x_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * X coordinate. + *+ * + *
float x = 1;
+ */
+ public Builder clearX() {
+
+ x_ = 0F;
+ onChanged();
+ return this;
+ }
+
+ private float y_;
+ /**
+ *
+ *
+ * + * Y coordinate. + *+ * + *
float y = 2;
+ */
+ public float getY() {
+ return y_;
+ }
+ /**
+ *
+ *
+ * + * Y coordinate. + *+ * + *
float y = 2;
+ */
+ public Builder setY(float value) {
+
+ y_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Y coordinate. + *+ * + *
float y = 2;
+ */
+ public Builder clearY() {
+
+ y_ = 0F;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.NormalizedVertex)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.NormalizedVertex)
+ private static final com.google.cloud.videointelligence.v1.NormalizedVertex DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.NormalizedVertex();
+ }
+
+ public static com.google.cloud.videointelligence.v1.NormalizedVertex getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * X coordinate. + *+ * + *
float x = 1;
+ */
+ float getX();
+
+ /**
+ *
+ *
+ * + * Y coordinate. + *+ * + *
float y = 2;
+ */
+ float getY();
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java
new file mode 100644
index 000000000000..d62de0f8a9e4
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingAnnotation.java
@@ -0,0 +1,1868 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Annotations corresponding to one tracked object. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.ObjectTrackingAnnotation} + */ +public final class ObjectTrackingAnnotation extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.ObjectTrackingAnnotation) + ObjectTrackingAnnotationOrBuilder { + private static final long serialVersionUID = 0L; + // Use ObjectTrackingAnnotation.newBuilder() to construct. + private ObjectTrackingAnnotation(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private ObjectTrackingAnnotation() { + confidence_ = 0F; + frames_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ObjectTrackingAnnotation( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.videointelligence.v1.Entity.Builder subBuilder = null; + if (entity_ != null) { + subBuilder = entity_.toBuilder(); + } + entity_ = + input.readMessage( + com.google.cloud.videointelligence.v1.Entity.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(entity_); + entity_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + frames_ = + new java.util.ArrayList< + com.google.cloud.videointelligence.v1.ObjectTrackingFrame>(); + mutable_bitField0_ |= 0x00000010; + } + frames_.add( + input.readMessage( + com.google.cloud.videointelligence.v1.ObjectTrackingFrame.parser(), + extensionRegistry)); + break; + } + case 26: + { + com.google.cloud.videointelligence.v1.VideoSegment.Builder subBuilder = null; + if (trackInfoCase_ == 3) { + subBuilder = + ((com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_).toBuilder(); + } + trackInfo_ = + input.readMessage( + com.google.cloud.videointelligence.v1.VideoSegment.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_); + trackInfo_ = subBuilder.buildPartial(); + } + trackInfoCase_ = 3; + break; + } + case 37: + { + confidence_ = input.readFloat(); + break; + } + case 40: + { + trackInfoCase_ = 5; + trackInfo_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + frames_ = java.util.Collections.unmodifiableList(frames_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.class, + com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder.class); + } + + private int bitField0_; + private int trackInfoCase_ = 0; + private java.lang.Object trackInfo_; + + public enum TrackInfoCase implements com.google.protobuf.Internal.EnumLite { + SEGMENT(3), + TRACK_ID(5), + TRACKINFO_NOT_SET(0); + private final int value; + + private TrackInfoCase(int value) { + this.value = value; + } + /** @deprecated Use {@link #forNumber(int)} instead. */ + @java.lang.Deprecated + public static TrackInfoCase valueOf(int value) { + return forNumber(value); + } + + public static TrackInfoCase forNumber(int value) { + switch (value) { + case 3: + return SEGMENT; + case 5: + return TRACK_ID; + case 0: + return TRACKINFO_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public TrackInfoCase getTrackInfoCase() { + return TrackInfoCase.forNumber(trackInfoCase_); + } + + public static final int SEGMENT_FIELD_NUMBER = 3; + /** + * + * + *
+ * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public boolean hasSegment() {
+ return trackInfoCase_ == 3;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() {
+ if (trackInfoCase_ == 3) {
+ return (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_;
+ }
+ return com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder() {
+ if (trackInfoCase_ == 3) {
+ return (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_;
+ }
+ return com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ }
+
+ public static final int TRACK_ID_FIELD_NUMBER = 5;
+ /**
+ *
+ *
+ * + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + *+ * + *
int64 track_id = 5;
+ */
+ public long getTrackId() {
+ if (trackInfoCase_ == 5) {
+ return (java.lang.Long) trackInfo_;
+ }
+ return 0L;
+ }
+
+ public static final int ENTITY_FIELD_NUMBER = 1;
+ private com.google.cloud.videointelligence.v1.Entity entity_;
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public boolean hasEntity() {
+ return entity_ != null;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public com.google.cloud.videointelligence.v1.Entity getEntity() {
+ return entity_ == null
+ ? com.google.cloud.videointelligence.v1.Entity.getDefaultInstance()
+ : entity_;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public com.google.cloud.videointelligence.v1.EntityOrBuilder getEntityOrBuilder() {
+ return getEntity();
+ }
+
+ public static final int CONFIDENCE_FIELD_NUMBER = 4;
+ private float confidence_;
+ /**
+ *
+ *
+ * + * Object category's labeling confidence of this track. + *+ * + *
float confidence = 4;
+ */
+ public float getConfidence() {
+ return confidence_;
+ }
+
+ public static final int FRAMES_FIELD_NUMBER = 2;
+ private java.util.List+ * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public java.util.List+ * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public java.util.List<
+ ? extends com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder>
+ getFramesOrBuilderList() {
+ return frames_;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public int getFramesCount() {
+ return frames_.size();
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame getFrames(int index) {
+ return frames_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder getFramesOrBuilder(
+ int index) {
+ return frames_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (entity_ != null) {
+ output.writeMessage(1, getEntity());
+ }
+ for (int i = 0; i < frames_.size(); i++) {
+ output.writeMessage(2, frames_.get(i));
+ }
+ if (trackInfoCase_ == 3) {
+ output.writeMessage(3, (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_);
+ }
+ if (confidence_ != 0F) {
+ output.writeFloat(4, confidence_);
+ }
+ if (trackInfoCase_ == 5) {
+ output.writeInt64(5, (long) ((java.lang.Long) trackInfo_));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (entity_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getEntity());
+ }
+ for (int i = 0; i < frames_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, frames_.get(i));
+ }
+ if (trackInfoCase_ == 3) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 3, (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_);
+ }
+ if (confidence_ != 0F) {
+ size += com.google.protobuf.CodedOutputStream.computeFloatSize(4, confidence_);
+ }
+ if (trackInfoCase_ == 5) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeInt64Size(
+ 5, (long) ((java.lang.Long) trackInfo_));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation other =
+ (com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation) obj;
+
+ boolean result = true;
+ result = result && (hasEntity() == other.hasEntity());
+ if (hasEntity()) {
+ result = result && getEntity().equals(other.getEntity());
+ }
+ result =
+ result
+ && (java.lang.Float.floatToIntBits(getConfidence())
+ == java.lang.Float.floatToIntBits(other.getConfidence()));
+ result = result && getFramesList().equals(other.getFramesList());
+ result = result && getTrackInfoCase().equals(other.getTrackInfoCase());
+ if (!result) return false;
+ switch (trackInfoCase_) {
+ case 3:
+ result = result && getSegment().equals(other.getSegment());
+ break;
+ case 5:
+ result = result && (getTrackId() == other.getTrackId());
+ break;
+ case 0:
+ default:
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasEntity()) {
+ hash = (37 * hash) + ENTITY_FIELD_NUMBER;
+ hash = (53 * hash) + getEntity().hashCode();
+ }
+ hash = (37 * hash) + CONFIDENCE_FIELD_NUMBER;
+ hash = (53 * hash) + java.lang.Float.floatToIntBits(getConfidence());
+ if (getFramesCount() > 0) {
+ hash = (37 * hash) + FRAMES_FIELD_NUMBER;
+ hash = (53 * hash) + getFramesList().hashCode();
+ }
+ switch (trackInfoCase_) {
+ case 3:
+ hash = (37 * hash) + SEGMENT_FIELD_NUMBER;
+ hash = (53 * hash) + getSegment().hashCode();
+ break;
+ case 5:
+ hash = (37 * hash) + TRACK_ID_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getTrackId());
+ break;
+ case 0:
+ default:
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Annotations corresponding to one tracked object. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.ObjectTrackingAnnotation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public boolean hasSegment() {
+ return trackInfoCase_ == 3;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() {
+ if (segmentBuilder_ == null) {
+ if (trackInfoCase_ == 3) {
+ return (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_;
+ }
+ return com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ } else {
+ if (trackInfoCase_ == 3) {
+ return segmentBuilder_.getMessage();
+ }
+ return com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public Builder setSegment(com.google.cloud.videointelligence.v1.VideoSegment value) {
+ if (segmentBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ trackInfo_ = value;
+ onChanged();
+ } else {
+ segmentBuilder_.setMessage(value);
+ }
+ trackInfoCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public Builder setSegment(
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) {
+ if (segmentBuilder_ == null) {
+ trackInfo_ = builderForValue.build();
+ onChanged();
+ } else {
+ segmentBuilder_.setMessage(builderForValue.build());
+ }
+ trackInfoCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public Builder mergeSegment(com.google.cloud.videointelligence.v1.VideoSegment value) {
+ if (segmentBuilder_ == null) {
+ if (trackInfoCase_ == 3
+ && trackInfo_
+ != com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()) {
+ trackInfo_ =
+ com.google.cloud.videointelligence.v1.VideoSegment.newBuilder(
+ (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ trackInfo_ = value;
+ }
+ onChanged();
+ } else {
+ if (trackInfoCase_ == 3) {
+ segmentBuilder_.mergeFrom(value);
+ }
+ segmentBuilder_.setMessage(value);
+ }
+ trackInfoCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public Builder clearSegment() {
+ if (segmentBuilder_ == null) {
+ if (trackInfoCase_ == 3) {
+ trackInfoCase_ = 0;
+ trackInfo_ = null;
+ onChanged();
+ }
+ } else {
+ if (trackInfoCase_ == 3) {
+ trackInfoCase_ = 0;
+ trackInfo_ = null;
+ }
+ segmentBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment.Builder getSegmentBuilder() {
+ return getSegmentFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder() {
+ if ((trackInfoCase_ == 3) && (segmentBuilder_ != null)) {
+ return segmentBuilder_.getMessageOrBuilder();
+ } else {
+ if (trackInfoCase_ == 3) {
+ return (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_;
+ }
+ return com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.VideoSegment,
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder,
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>
+ getSegmentFieldBuilder() {
+ if (segmentBuilder_ == null) {
+ if (!(trackInfoCase_ == 3)) {
+ trackInfo_ = com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance();
+ }
+ segmentBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.VideoSegment,
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder,
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>(
+ (com.google.cloud.videointelligence.v1.VideoSegment) trackInfo_,
+ getParentForChildren(),
+ isClean());
+ trackInfo_ = null;
+ }
+ trackInfoCase_ = 3;
+ onChanged();
+ ;
+ return segmentBuilder_;
+ }
+
+ /**
+ *
+ *
+ * + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + *+ * + *
int64 track_id = 5;
+ */
+ public long getTrackId() {
+ if (trackInfoCase_ == 5) {
+ return (java.lang.Long) trackInfo_;
+ }
+ return 0L;
+ }
+ /**
+ *
+ *
+ * + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + *+ * + *
int64 track_id = 5;
+ */
+ public Builder setTrackId(long value) {
+ trackInfoCase_ = 5;
+ trackInfo_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + *+ * + *
int64 track_id = 5;
+ */
+ public Builder clearTrackId() {
+ if (trackInfoCase_ == 5) {
+ trackInfoCase_ = 0;
+ trackInfo_ = null;
+ onChanged();
+ }
+ return this;
+ }
+
+ private com.google.cloud.videointelligence.v1.Entity entity_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.Entity,
+ com.google.cloud.videointelligence.v1.Entity.Builder,
+ com.google.cloud.videointelligence.v1.EntityOrBuilder>
+ entityBuilder_;
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public boolean hasEntity() {
+ return entityBuilder_ != null || entity_ != null;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public com.google.cloud.videointelligence.v1.Entity getEntity() {
+ if (entityBuilder_ == null) {
+ return entity_ == null
+ ? com.google.cloud.videointelligence.v1.Entity.getDefaultInstance()
+ : entity_;
+ } else {
+ return entityBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public Builder setEntity(com.google.cloud.videointelligence.v1.Entity value) {
+ if (entityBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ entity_ = value;
+ onChanged();
+ } else {
+ entityBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public Builder setEntity(com.google.cloud.videointelligence.v1.Entity.Builder builderForValue) {
+ if (entityBuilder_ == null) {
+ entity_ = builderForValue.build();
+ onChanged();
+ } else {
+ entityBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public Builder mergeEntity(com.google.cloud.videointelligence.v1.Entity value) {
+ if (entityBuilder_ == null) {
+ if (entity_ != null) {
+ entity_ =
+ com.google.cloud.videointelligence.v1.Entity.newBuilder(entity_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ entity_ = value;
+ }
+ onChanged();
+ } else {
+ entityBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public Builder clearEntity() {
+ if (entityBuilder_ == null) {
+ entity_ = null;
+ onChanged();
+ } else {
+ entity_ = null;
+ entityBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public com.google.cloud.videointelligence.v1.Entity.Builder getEntityBuilder() {
+
+ onChanged();
+ return getEntityFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ public com.google.cloud.videointelligence.v1.EntityOrBuilder getEntityOrBuilder() {
+ if (entityBuilder_ != null) {
+ return entityBuilder_.getMessageOrBuilder();
+ } else {
+ return entity_ == null
+ ? com.google.cloud.videointelligence.v1.Entity.getDefaultInstance()
+ : entity_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.Entity,
+ com.google.cloud.videointelligence.v1.Entity.Builder,
+ com.google.cloud.videointelligence.v1.EntityOrBuilder>
+ getEntityFieldBuilder() {
+ if (entityBuilder_ == null) {
+ entityBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.Entity,
+ com.google.cloud.videointelligence.v1.Entity.Builder,
+ com.google.cloud.videointelligence.v1.EntityOrBuilder>(
+ getEntity(), getParentForChildren(), isClean());
+ entity_ = null;
+ }
+ return entityBuilder_;
+ }
+
+ private float confidence_;
+ /**
+ *
+ *
+ * + * Object category's labeling confidence of this track. + *+ * + *
float confidence = 4;
+ */
+ public float getConfidence() {
+ return confidence_;
+ }
+ /**
+ *
+ *
+ * + * Object category's labeling confidence of this track. + *+ * + *
float confidence = 4;
+ */
+ public Builder setConfidence(float value) {
+
+ confidence_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Object category's labeling confidence of this track. + *+ * + *
float confidence = 4;
+ */
+ public Builder clearConfidence() {
+
+ confidence_ = 0F;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List+ * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public java.util.List+ * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public int getFramesCount() {
+ if (framesBuilder_ == null) {
+ return frames_.size();
+ } else {
+ return framesBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame getFrames(int index) {
+ if (framesBuilder_ == null) {
+ return frames_.get(index);
+ } else {
+ return framesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder setFrames(
+ int index, com.google.cloud.videointelligence.v1.ObjectTrackingFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.set(index, value);
+ onChanged();
+ } else {
+ framesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder setFrames(
+ int index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder addFrames(com.google.cloud.videointelligence.v1.ObjectTrackingFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.add(value);
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder addFrames(
+ int index, com.google.cloud.videointelligence.v1.ObjectTrackingFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.add(index, value);
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder addFrames(
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.add(builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder addFrames(
+ int index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder addAllFrames(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.ObjectTrackingFrame>
+ values) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, frames_);
+ onChanged();
+ } else {
+ framesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder clearFrames() {
+ if (framesBuilder_ == null) {
+ frames_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000010);
+ onChanged();
+ } else {
+ framesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public Builder removeFrames(int index) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.remove(index);
+ onChanged();
+ } else {
+ framesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder getFramesBuilder(
+ int index) {
+ return getFramesFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder getFramesOrBuilder(
+ int index) {
+ if (framesBuilder_ == null) {
+ return frames_.get(index);
+ } else {
+ return framesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public java.util.List<
+ ? extends com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder>
+ getFramesOrBuilderList() {
+ if (framesBuilder_ != null) {
+ return framesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(frames_);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder addFramesBuilder() {
+ return getFramesFieldBuilder()
+ .addBuilder(
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder addFramesBuilder(
+ int index) {
+ return getFramesFieldBuilder()
+ .addBuilder(
+ index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ public java.util.List+ * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ boolean hasSegment();
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ com.google.cloud.videointelligence.v1.VideoSegment getSegment();
+ /**
+ *
+ *
+ * + * Non-streaming batch mode ONLY. + * Each object track corresponds to one video segment where it appears. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 3;
+ */
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder();
+
+ /**
+ *
+ *
+ * + * Streaming mode ONLY. + * In streaming mode, we do not know the end time of a tracked object + * before it is completed. Hence, there is no VideoSegment info returned. + * Instead, we provide a unique identifiable integer track_id so that + * the customers can correlate the results of the ongoing + * ObjectTrackAnnotation of the same track_id over time. + *+ * + *
int64 track_id = 5;
+ */
+ long getTrackId();
+
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ boolean hasEntity();
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ com.google.cloud.videointelligence.v1.Entity getEntity();
+ /**
+ *
+ *
+ * + * Entity to specify the object category that this track is labeled as. + *+ * + *
.google.cloud.videointelligence.v1.Entity entity = 1;
+ */
+ com.google.cloud.videointelligence.v1.EntityOrBuilder getEntityOrBuilder();
+
+ /**
+ *
+ *
+ * + * Object category's labeling confidence of this track. + *+ * + *
float confidence = 4;
+ */
+ float getConfidence();
+
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ java.util.List+ * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame getFrames(int index);
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ int getFramesCount();
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder>
+ getFramesOrBuilderList();
+ /**
+ *
+ *
+ * + * Information corresponding to all frames where this object track appears. + * Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + * messages in frames. + * Streaming mode: it can only be one ObjectTrackingFrame message in frames. + *+ * + *
repeated .google.cloud.videointelligence.v1.ObjectTrackingFrame frames = 2;
+ */
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrameOrBuilder getFramesOrBuilder(int index);
+
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.TrackInfoCase
+ getTrackInfoCase();
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingFrame.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingFrame.java
new file mode 100644
index 000000000000..90c38769d66a
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/ObjectTrackingFrame.java
@@ -0,0 +1,989 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Video frame level annotations for object detection and tracking. This field + * stores per frame location, time offset, and confidence. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.ObjectTrackingFrame} + */ +public final class ObjectTrackingFrame extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.ObjectTrackingFrame) + ObjectTrackingFrameOrBuilder { + private static final long serialVersionUID = 0L; + // Use ObjectTrackingFrame.newBuilder() to construct. + private ObjectTrackingFrame(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private ObjectTrackingFrame() {} + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ObjectTrackingFrame( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.videointelligence.v1.NormalizedBoundingBox.Builder subBuilder = null; + if (normalizedBoundingBox_ != null) { + subBuilder = normalizedBoundingBox_.toBuilder(); + } + normalizedBoundingBox_ = + input.readMessage( + com.google.cloud.videointelligence.v1.NormalizedBoundingBox.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(normalizedBoundingBox_); + normalizedBoundingBox_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (timeOffset_ != null) { + subBuilder = timeOffset_.toBuilder(); + } + timeOffset_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timeOffset_); + timeOffset_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.ObjectTrackingFrame.class, + com.google.cloud.videointelligence.v1.ObjectTrackingFrame.Builder.class); + } + + public static final int NORMALIZED_BOUNDING_BOX_FIELD_NUMBER = 1; + private com.google.cloud.videointelligence.v1.NormalizedBoundingBox normalizedBoundingBox_; + /** + * + * + *
+ * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public boolean hasNormalizedBoundingBox() {
+ return normalizedBoundingBox_ != null;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingBox getNormalizedBoundingBox() {
+ return normalizedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingBox.getDefaultInstance()
+ : normalizedBoundingBox_;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingBoxOrBuilder
+ getNormalizedBoundingBoxOrBuilder() {
+ return getNormalizedBoundingBox();
+ }
+
+ public static final int TIME_OFFSET_FIELD_NUMBER = 2;
+ private com.google.protobuf.Duration timeOffset_;
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public boolean hasTimeOffset() {
+ return timeOffset_ != null;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration getTimeOffset() {
+ return timeOffset_ == null ? com.google.protobuf.Duration.getDefaultInstance() : timeOffset_;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder() {
+ return getTimeOffset();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (normalizedBoundingBox_ != null) {
+ output.writeMessage(1, getNormalizedBoundingBox());
+ }
+ if (timeOffset_ != null) {
+ output.writeMessage(2, getTimeOffset());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (normalizedBoundingBox_ != null) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(1, getNormalizedBoundingBox());
+ }
+ if (timeOffset_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTimeOffset());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.ObjectTrackingFrame)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame other =
+ (com.google.cloud.videointelligence.v1.ObjectTrackingFrame) obj;
+
+ boolean result = true;
+ result = result && (hasNormalizedBoundingBox() == other.hasNormalizedBoundingBox());
+ if (hasNormalizedBoundingBox()) {
+ result = result && getNormalizedBoundingBox().equals(other.getNormalizedBoundingBox());
+ }
+ result = result && (hasTimeOffset() == other.hasTimeOffset());
+ if (hasTimeOffset()) {
+ result = result && getTimeOffset().equals(other.getTimeOffset());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasNormalizedBoundingBox()) {
+ hash = (37 * hash) + NORMALIZED_BOUNDING_BOX_FIELD_NUMBER;
+ hash = (53 * hash) + getNormalizedBoundingBox().hashCode();
+ }
+ if (hasTimeOffset()) {
+ hash = (37 * hash) + TIME_OFFSET_FIELD_NUMBER;
+ hash = (53 * hash) + getTimeOffset().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.videointelligence.v1.ObjectTrackingFrame prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Video frame level annotations for object detection and tracking. This field + * stores per frame location, time offset, and confidence. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.ObjectTrackingFrame} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public boolean hasNormalizedBoundingBox() {
+ return normalizedBoundingBoxBuilder_ != null || normalizedBoundingBox_ != null;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingBox getNormalizedBoundingBox() {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ return normalizedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingBox.getDefaultInstance()
+ : normalizedBoundingBox_;
+ } else {
+ return normalizedBoundingBoxBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public Builder setNormalizedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox value) {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ normalizedBoundingBox_ = value;
+ onChanged();
+ } else {
+ normalizedBoundingBoxBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public Builder setNormalizedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox.Builder builderForValue) {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ normalizedBoundingBox_ = builderForValue.build();
+ onChanged();
+ } else {
+ normalizedBoundingBoxBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public Builder mergeNormalizedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox value) {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ if (normalizedBoundingBox_ != null) {
+ normalizedBoundingBox_ =
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox.newBuilder(
+ normalizedBoundingBox_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ normalizedBoundingBox_ = value;
+ }
+ onChanged();
+ } else {
+ normalizedBoundingBoxBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public Builder clearNormalizedBoundingBox() {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ normalizedBoundingBox_ = null;
+ onChanged();
+ } else {
+ normalizedBoundingBox_ = null;
+ normalizedBoundingBoxBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingBox.Builder
+ getNormalizedBoundingBoxBuilder() {
+
+ onChanged();
+ return getNormalizedBoundingBoxFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingBoxOrBuilder
+ getNormalizedBoundingBoxOrBuilder() {
+ if (normalizedBoundingBoxBuilder_ != null) {
+ return normalizedBoundingBoxBuilder_.getMessageOrBuilder();
+ } else {
+ return normalizedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingBox.getDefaultInstance()
+ : normalizedBoundingBox_;
+ }
+ }
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox.Builder,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBoxOrBuilder>
+ getNormalizedBoundingBoxFieldBuilder() {
+ if (normalizedBoundingBoxBuilder_ == null) {
+ normalizedBoundingBoxBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox.Builder,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBoxOrBuilder>(
+ getNormalizedBoundingBox(), getParentForChildren(), isClean());
+ normalizedBoundingBox_ = null;
+ }
+ return normalizedBoundingBoxBuilder_;
+ }
+
+ private com.google.protobuf.Duration timeOffset_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>
+ timeOffsetBuilder_;
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public boolean hasTimeOffset() {
+ return timeOffsetBuilder_ != null || timeOffset_ != null;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration getTimeOffset() {
+ if (timeOffsetBuilder_ == null) {
+ return timeOffset_ == null
+ ? com.google.protobuf.Duration.getDefaultInstance()
+ : timeOffset_;
+ } else {
+ return timeOffsetBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder setTimeOffset(com.google.protobuf.Duration value) {
+ if (timeOffsetBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timeOffset_ = value;
+ onChanged();
+ } else {
+ timeOffsetBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder setTimeOffset(com.google.protobuf.Duration.Builder builderForValue) {
+ if (timeOffsetBuilder_ == null) {
+ timeOffset_ = builderForValue.build();
+ onChanged();
+ } else {
+ timeOffsetBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder mergeTimeOffset(com.google.protobuf.Duration value) {
+ if (timeOffsetBuilder_ == null) {
+ if (timeOffset_ != null) {
+ timeOffset_ =
+ com.google.protobuf.Duration.newBuilder(timeOffset_).mergeFrom(value).buildPartial();
+ } else {
+ timeOffset_ = value;
+ }
+ onChanged();
+ } else {
+ timeOffsetBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder clearTimeOffset() {
+ if (timeOffsetBuilder_ == null) {
+ timeOffset_ = null;
+ onChanged();
+ } else {
+ timeOffset_ = null;
+ timeOffsetBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration.Builder getTimeOffsetBuilder() {
+
+ onChanged();
+ return getTimeOffsetFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder() {
+ if (timeOffsetBuilder_ != null) {
+ return timeOffsetBuilder_.getMessageOrBuilder();
+ } else {
+ return timeOffset_ == null
+ ? com.google.protobuf.Duration.getDefaultInstance()
+ : timeOffset_;
+ }
+ }
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>
+ getTimeOffsetFieldBuilder() {
+ if (timeOffsetBuilder_ == null) {
+ timeOffsetBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>(
+ getTimeOffset(), getParentForChildren(), isClean());
+ timeOffset_ = null;
+ }
+ return timeOffsetBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.ObjectTrackingFrame)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.ObjectTrackingFrame)
+ private static final com.google.cloud.videointelligence.v1.ObjectTrackingFrame DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.ObjectTrackingFrame();
+ }
+
+ public static com.google.cloud.videointelligence.v1.ObjectTrackingFrame getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ boolean hasNormalizedBoundingBox();
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBox getNormalizedBoundingBox();
+ /**
+ *
+ *
+ * + * The normalized bounding box location of this object track for the frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingBox normalized_bounding_box = 1;
+ *
+ */
+ com.google.cloud.videointelligence.v1.NormalizedBoundingBoxOrBuilder
+ getNormalizedBoundingBoxOrBuilder();
+
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ boolean hasTimeOffset();
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ com.google.protobuf.Duration getTimeOffset();
+ /**
+ *
+ *
+ * + * The timestamp of the frame in microseconds. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder();
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java
new file mode 100644
index 000000000000..aa34458d6689
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextAnnotation.java
@@ -0,0 +1,1102 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Annotations related to one detected OCR text snippet. This will contain the + * corresponding text, confidence value, and frame level information for each + * detection. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextAnnotation} + */ +public final class TextAnnotation extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.TextAnnotation) + TextAnnotationOrBuilder { + private static final long serialVersionUID = 0L; + // Use TextAnnotation.newBuilder() to construct. + private TextAnnotation(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private TextAnnotation() { + text_ = ""; + segments_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TextAnnotation( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + text_ = s; + break; + } + case 18: + { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + segments_ = + new java.util.ArrayList
+ * The detected text. + *+ * + *
string text = 1;
+ */
+ public java.lang.String getText() {
+ java.lang.Object ref = text_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ text_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ public com.google.protobuf.ByteString getTextBytes() {
+ java.lang.Object ref = text_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ text_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int SEGMENTS_FIELD_NUMBER = 2;
+ private java.util.List+ * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public java.util.List+ * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextSegmentOrBuilder>
+ getSegmentsOrBuilderList() {
+ return segments_;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public int getSegmentsCount() {
+ return segments_.size();
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegment getSegments(int index) {
+ return segments_.get(index);
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegmentOrBuilder getSegmentsOrBuilder(
+ int index) {
+ return segments_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!getTextBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, text_);
+ }
+ for (int i = 0; i < segments_.size(); i++) {
+ output.writeMessage(2, segments_.get(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getTextBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, text_);
+ }
+ for (int i = 0; i < segments_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, segments_.get(i));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.TextAnnotation)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.TextAnnotation other =
+ (com.google.cloud.videointelligence.v1.TextAnnotation) obj;
+
+ boolean result = true;
+ result = result && getText().equals(other.getText());
+ result = result && getSegmentsList().equals(other.getSegmentsList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + TEXT_FIELD_NUMBER;
+ hash = (53 * hash) + getText().hashCode();
+ if (getSegmentsCount() > 0) {
+ hash = (37 * hash) + SEGMENTS_FIELD_NUMBER;
+ hash = (53 * hash) + getSegmentsList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextAnnotation parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.videointelligence.v1.TextAnnotation prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Annotations related to one detected OCR text snippet. This will contain the + * corresponding text, confidence value, and frame level information for each + * detection. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextAnnotation} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * The detected text. + *+ * + *
string text = 1;
+ */
+ public java.lang.String getText() {
+ java.lang.Object ref = text_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ text_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ public com.google.protobuf.ByteString getTextBytes() {
+ java.lang.Object ref = text_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ text_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ public Builder setText(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ text_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ public Builder clearText() {
+
+ text_ = getDefaultInstance().getText();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ public Builder setTextBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ text_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List+ * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public java.util.List+ * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public int getSegmentsCount() {
+ if (segmentsBuilder_ == null) {
+ return segments_.size();
+ } else {
+ return segmentsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegment getSegments(int index) {
+ if (segmentsBuilder_ == null) {
+ return segments_.get(index);
+ } else {
+ return segmentsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder setSegments(int index, com.google.cloud.videointelligence.v1.TextSegment value) {
+ if (segmentsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSegmentsIsMutable();
+ segments_.set(index, value);
+ onChanged();
+ } else {
+ segmentsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder setSegments(
+ int index, com.google.cloud.videointelligence.v1.TextSegment.Builder builderForValue) {
+ if (segmentsBuilder_ == null) {
+ ensureSegmentsIsMutable();
+ segments_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ segmentsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder addSegments(com.google.cloud.videointelligence.v1.TextSegment value) {
+ if (segmentsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSegmentsIsMutable();
+ segments_.add(value);
+ onChanged();
+ } else {
+ segmentsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder addSegments(int index, com.google.cloud.videointelligence.v1.TextSegment value) {
+ if (segmentsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSegmentsIsMutable();
+ segments_.add(index, value);
+ onChanged();
+ } else {
+ segmentsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder addSegments(
+ com.google.cloud.videointelligence.v1.TextSegment.Builder builderForValue) {
+ if (segmentsBuilder_ == null) {
+ ensureSegmentsIsMutable();
+ segments_.add(builderForValue.build());
+ onChanged();
+ } else {
+ segmentsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder addSegments(
+ int index, com.google.cloud.videointelligence.v1.TextSegment.Builder builderForValue) {
+ if (segmentsBuilder_ == null) {
+ ensureSegmentsIsMutable();
+ segments_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ segmentsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder addAllSegments(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.TextSegment> values) {
+ if (segmentsBuilder_ == null) {
+ ensureSegmentsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, segments_);
+ onChanged();
+ } else {
+ segmentsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder clearSegments() {
+ if (segmentsBuilder_ == null) {
+ segments_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ segmentsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public Builder removeSegments(int index) {
+ if (segmentsBuilder_ == null) {
+ ensureSegmentsIsMutable();
+ segments_.remove(index);
+ onChanged();
+ } else {
+ segmentsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegment.Builder getSegmentsBuilder(int index) {
+ return getSegmentsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegmentOrBuilder getSegmentsOrBuilder(
+ int index) {
+ if (segmentsBuilder_ == null) {
+ return segments_.get(index);
+ } else {
+ return segmentsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextSegmentOrBuilder>
+ getSegmentsOrBuilderList() {
+ if (segmentsBuilder_ != null) {
+ return segmentsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(segments_);
+ }
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegment.Builder addSegmentsBuilder() {
+ return getSegmentsFieldBuilder()
+ .addBuilder(com.google.cloud.videointelligence.v1.TextSegment.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public com.google.cloud.videointelligence.v1.TextSegment.Builder addSegmentsBuilder(int index) {
+ return getSegmentsFieldBuilder()
+ .addBuilder(
+ index, com.google.cloud.videointelligence.v1.TextSegment.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ public java.util.List+ * The detected text. + *+ * + *
string text = 1;
+ */
+ java.lang.String getText();
+ /**
+ *
+ *
+ * + * The detected text. + *+ * + *
string text = 1;
+ */
+ com.google.protobuf.ByteString getTextBytes();
+
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ java.util.List+ * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ com.google.cloud.videointelligence.v1.TextSegment getSegments(int index);
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ int getSegmentsCount();
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.TextSegmentOrBuilder>
+ getSegmentsOrBuilderList();
+ /**
+ *
+ *
+ * + * All video segments where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextSegment segments = 2;
+ */
+ com.google.cloud.videointelligence.v1.TextSegmentOrBuilder getSegmentsOrBuilder(int index);
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextDetectionConfig.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextDetectionConfig.java
new file mode 100644
index 000000000000..3ad7b4a3c3aa
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextDetectionConfig.java
@@ -0,0 +1,722 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextDetectionConfig} + */ +public final class TextDetectionConfig extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.TextDetectionConfig) + TextDetectionConfigOrBuilder { + private static final long serialVersionUID = 0L; + // Use TextDetectionConfig.newBuilder() to construct. + private TextDetectionConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private TextDetectionConfig() { + languageHints_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TextDetectionConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageHints_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + languageHints_.add(s); + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + languageHints_ = languageHints_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.TextDetectionConfig.class, + com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder.class); + } + + public static final int LANGUAGE_HINTS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList languageHints_; + /** + * + * + *
+ * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public com.google.protobuf.ProtocolStringList getLanguageHintsList() {
+ return languageHints_;
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public int getLanguageHintsCount() {
+ return languageHints_.size();
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public java.lang.String getLanguageHints(int index) {
+ return languageHints_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public com.google.protobuf.ByteString getLanguageHintsBytes(int index) {
+ return languageHints_.getByteString(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ for (int i = 0; i < languageHints_.size(); i++) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageHints_.getRaw(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < languageHints_.size(); i++) {
+ dataSize += computeStringSizeNoTag(languageHints_.getRaw(i));
+ }
+ size += dataSize;
+ size += 1 * getLanguageHintsList().size();
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.TextDetectionConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.TextDetectionConfig other =
+ (com.google.cloud.videointelligence.v1.TextDetectionConfig) obj;
+
+ boolean result = true;
+ result = result && getLanguageHintsList().equals(other.getLanguageHintsList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (getLanguageHintsCount() > 0) {
+ hash = (37 * hash) + LANGUAGE_HINTS_FIELD_NUMBER;
+ hash = (53 * hash) + getLanguageHintsList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.videointelligence.v1.TextDetectionConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextDetectionConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public com.google.protobuf.ProtocolStringList getLanguageHintsList() {
+ return languageHints_.getUnmodifiableView();
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public int getLanguageHintsCount() {
+ return languageHints_.size();
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public java.lang.String getLanguageHints(int index) {
+ return languageHints_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public com.google.protobuf.ByteString getLanguageHintsBytes(int index) {
+ return languageHints_.getByteString(index);
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public Builder setLanguageHints(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLanguageHintsIsMutable();
+ languageHints_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public Builder addLanguageHints(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureLanguageHintsIsMutable();
+ languageHints_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public Builder addAllLanguageHints(java.lang.Iterable+ * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public Builder clearLanguageHints() {
+ languageHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ public Builder addLanguageHintsBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ ensureLanguageHintsIsMutable();
+ languageHints_.add(value);
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.TextDetectionConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextDetectionConfig)
+ private static final com.google.cloud.videointelligence.v1.TextDetectionConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.TextDetectionConfig();
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextDetectionConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ java.util.List+ * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ int getLanguageHintsCount();
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ java.lang.String getLanguageHints(int index);
+ /**
+ *
+ *
+ * + * Language hint can be specified if the language to be detected is known a + * priori. It can increase the accuracy of the detection. Language hint must + * be language code in BCP-47 format. + * Automatic language detection is performed if no hint is provided. + *+ * + *
repeated string language_hints = 1;
+ */
+ com.google.protobuf.ByteString getLanguageHintsBytes(int index);
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextFrame.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextFrame.java
new file mode 100644
index 000000000000..73dbf86f637e
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextFrame.java
@@ -0,0 +1,988 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Video frame level annotation results for text annotation (OCR). + * Contains information regarding timestamp and bounding box locations for the + * frames containing detected OCR text snippets. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextFrame} + */ +public final class TextFrame extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.TextFrame) + TextFrameOrBuilder { + private static final long serialVersionUID = 0L; + // Use TextFrame.newBuilder() to construct. + private TextFrame(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private TextFrame() {} + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TextFrame( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder subBuilder = + null; + if (rotatedBoundingBox_ != null) { + subBuilder = rotatedBoundingBox_.toBuilder(); + } + rotatedBoundingBox_ = + input.readMessage( + com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rotatedBoundingBox_); + rotatedBoundingBox_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.protobuf.Duration.Builder subBuilder = null; + if (timeOffset_ != null) { + subBuilder = timeOffset_.toBuilder(); + } + timeOffset_ = + input.readMessage(com.google.protobuf.Duration.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timeOffset_); + timeOffset_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.videointelligence.v1.VideoIntelligenceServiceProto + .internal_static_google_cloud_videointelligence_v1_TextFrame_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.videointelligence.v1.TextFrame.class, + com.google.cloud.videointelligence.v1.TextFrame.Builder.class); + } + + public static final int ROTATED_BOUNDING_BOX_FIELD_NUMBER = 1; + private com.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotatedBoundingBox_; + /** + * + * + *
+ * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public boolean hasRotatedBoundingBox() {
+ return rotatedBoundingBox_ != null;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingPoly getRotatedBoundingBox() {
+ return rotatedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.getDefaultInstance()
+ : rotatedBoundingBox_;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingPolyOrBuilder
+ getRotatedBoundingBoxOrBuilder() {
+ return getRotatedBoundingBox();
+ }
+
+ public static final int TIME_OFFSET_FIELD_NUMBER = 2;
+ private com.google.protobuf.Duration timeOffset_;
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public boolean hasTimeOffset() {
+ return timeOffset_ != null;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration getTimeOffset() {
+ return timeOffset_ == null ? com.google.protobuf.Duration.getDefaultInstance() : timeOffset_;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder() {
+ return getTimeOffset();
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (rotatedBoundingBox_ != null) {
+ output.writeMessage(1, getRotatedBoundingBox());
+ }
+ if (timeOffset_ != null) {
+ output.writeMessage(2, getTimeOffset());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (rotatedBoundingBox_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getRotatedBoundingBox());
+ }
+ if (timeOffset_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getTimeOffset());
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.TextFrame)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.TextFrame other =
+ (com.google.cloud.videointelligence.v1.TextFrame) obj;
+
+ boolean result = true;
+ result = result && (hasRotatedBoundingBox() == other.hasRotatedBoundingBox());
+ if (hasRotatedBoundingBox()) {
+ result = result && getRotatedBoundingBox().equals(other.getRotatedBoundingBox());
+ }
+ result = result && (hasTimeOffset() == other.hasTimeOffset());
+ if (hasTimeOffset()) {
+ result = result && getTimeOffset().equals(other.getTimeOffset());
+ }
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasRotatedBoundingBox()) {
+ hash = (37 * hash) + ROTATED_BOUNDING_BOX_FIELD_NUMBER;
+ hash = (53 * hash) + getRotatedBoundingBox().hashCode();
+ }
+ if (hasTimeOffset()) {
+ hash = (37 * hash) + TIME_OFFSET_FIELD_NUMBER;
+ hash = (53 * hash) + getTimeOffset().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.videointelligence.v1.TextFrame prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Video frame level annotation results for text annotation (OCR). + * Contains information regarding timestamp and bounding box locations for the + * frames containing detected OCR text snippets. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextFrame} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public boolean hasRotatedBoundingBox() {
+ return rotatedBoundingBoxBuilder_ != null || rotatedBoundingBox_ != null;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingPoly getRotatedBoundingBox() {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ return rotatedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.getDefaultInstance()
+ : rotatedBoundingBox_;
+ } else {
+ return rotatedBoundingBoxBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public Builder setRotatedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly value) {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ rotatedBoundingBox_ = value;
+ onChanged();
+ } else {
+ rotatedBoundingBoxBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public Builder setRotatedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder builderForValue) {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ rotatedBoundingBox_ = builderForValue.build();
+ onChanged();
+ } else {
+ rotatedBoundingBoxBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public Builder mergeRotatedBoundingBox(
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly value) {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ if (rotatedBoundingBox_ != null) {
+ rotatedBoundingBox_ =
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.newBuilder(
+ rotatedBoundingBox_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ rotatedBoundingBox_ = value;
+ }
+ onChanged();
+ } else {
+ rotatedBoundingBoxBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public Builder clearRotatedBoundingBox() {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ rotatedBoundingBox_ = null;
+ onChanged();
+ } else {
+ rotatedBoundingBox_ = null;
+ rotatedBoundingBoxBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder
+ getRotatedBoundingBoxBuilder() {
+
+ onChanged();
+ return getRotatedBoundingBoxFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.NormalizedBoundingPolyOrBuilder
+ getRotatedBoundingBoxOrBuilder() {
+ if (rotatedBoundingBoxBuilder_ != null) {
+ return rotatedBoundingBoxBuilder_.getMessageOrBuilder();
+ } else {
+ return rotatedBoundingBox_ == null
+ ? com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.getDefaultInstance()
+ : rotatedBoundingBox_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPolyOrBuilder>
+ getRotatedBoundingBoxFieldBuilder() {
+ if (rotatedBoundingBoxBuilder_ == null) {
+ rotatedBoundingBoxBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly.Builder,
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPolyOrBuilder>(
+ getRotatedBoundingBox(), getParentForChildren(), isClean());
+ rotatedBoundingBox_ = null;
+ }
+ return rotatedBoundingBoxBuilder_;
+ }
+
+ private com.google.protobuf.Duration timeOffset_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>
+ timeOffsetBuilder_;
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public boolean hasTimeOffset() {
+ return timeOffsetBuilder_ != null || timeOffset_ != null;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration getTimeOffset() {
+ if (timeOffsetBuilder_ == null) {
+ return timeOffset_ == null
+ ? com.google.protobuf.Duration.getDefaultInstance()
+ : timeOffset_;
+ } else {
+ return timeOffsetBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder setTimeOffset(com.google.protobuf.Duration value) {
+ if (timeOffsetBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timeOffset_ = value;
+ onChanged();
+ } else {
+ timeOffsetBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder setTimeOffset(com.google.protobuf.Duration.Builder builderForValue) {
+ if (timeOffsetBuilder_ == null) {
+ timeOffset_ = builderForValue.build();
+ onChanged();
+ } else {
+ timeOffsetBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder mergeTimeOffset(com.google.protobuf.Duration value) {
+ if (timeOffsetBuilder_ == null) {
+ if (timeOffset_ != null) {
+ timeOffset_ =
+ com.google.protobuf.Duration.newBuilder(timeOffset_).mergeFrom(value).buildPartial();
+ } else {
+ timeOffset_ = value;
+ }
+ onChanged();
+ } else {
+ timeOffsetBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public Builder clearTimeOffset() {
+ if (timeOffsetBuilder_ == null) {
+ timeOffset_ = null;
+ onChanged();
+ } else {
+ timeOffset_ = null;
+ timeOffsetBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.Duration.Builder getTimeOffsetBuilder() {
+
+ onChanged();
+ return getTimeOffsetFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ public com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder() {
+ if (timeOffsetBuilder_ != null) {
+ return timeOffsetBuilder_.getMessageOrBuilder();
+ } else {
+ return timeOffset_ == null
+ ? com.google.protobuf.Duration.getDefaultInstance()
+ : timeOffset_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>
+ getTimeOffsetFieldBuilder() {
+ if (timeOffsetBuilder_ == null) {
+ timeOffsetBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Duration,
+ com.google.protobuf.Duration.Builder,
+ com.google.protobuf.DurationOrBuilder>(
+ getTimeOffset(), getParentForChildren(), isClean());
+ timeOffset_ = null;
+ }
+ return timeOffsetBuilder_;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.videointelligence.v1.TextFrame)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.videointelligence.v1.TextFrame)
+ private static final com.google.cloud.videointelligence.v1.TextFrame DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.videointelligence.v1.TextFrame();
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextFrame getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ boolean hasRotatedBoundingBox();
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPoly getRotatedBoundingBox();
+ /**
+ *
+ *
+ * + * Bounding polygon of the detected text for this frame. + *+ * + *
.google.cloud.videointelligence.v1.NormalizedBoundingPoly rotated_bounding_box = 1;
+ *
+ */
+ com.google.cloud.videointelligence.v1.NormalizedBoundingPolyOrBuilder
+ getRotatedBoundingBoxOrBuilder();
+
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ boolean hasTimeOffset();
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ com.google.protobuf.Duration getTimeOffset();
+ /**
+ *
+ *
+ * + * Timestamp of this frame. + *+ * + *
.google.protobuf.Duration time_offset = 2;
+ */
+ com.google.protobuf.DurationOrBuilder getTimeOffsetOrBuilder();
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextSegment.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextSegment.java
new file mode 100644
index 000000000000..916e356f77b5
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/TextSegment.java
@@ -0,0 +1,1287 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/videointelligence/v1/video_intelligence.proto
+
+package com.google.cloud.videointelligence.v1;
+
+/**
+ *
+ *
+ * + * Video segment level annotation results for text detection. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextSegment} + */ +public final class TextSegment extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.videointelligence.v1.TextSegment) + TextSegmentOrBuilder { + private static final long serialVersionUID = 0L; + // Use TextSegment.newBuilder() to construct. + private TextSegment(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private TextSegment() { + confidence_ = 0F; + frames_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TextSegment( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.videointelligence.v1.VideoSegment.Builder subBuilder = null; + if (segment_ != null) { + subBuilder = segment_.toBuilder(); + } + segment_ = + input.readMessage( + com.google.cloud.videointelligence.v1.VideoSegment.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(segment_); + segment_ = subBuilder.buildPartial(); + } + + break; + } + case 21: + { + confidence_ = input.readFloat(); + break; + } + case 26: + { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + frames_ = + new java.util.ArrayList
+ * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public boolean hasSegment() {
+ return segment_ != null;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() {
+ return segment_ == null
+ ? com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()
+ : segment_;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder() {
+ return getSegment();
+ }
+
+ public static final int CONFIDENCE_FIELD_NUMBER = 2;
+ private float confidence_;
+ /**
+ *
+ *
+ * + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + *+ * + *
float confidence = 2;
+ */
+ public float getConfidence() {
+ return confidence_;
+ }
+
+ public static final int FRAMES_FIELD_NUMBER = 3;
+ private java.util.List+ * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public java.util.List+ * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextFrameOrBuilder>
+ getFramesOrBuilderList() {
+ return frames_;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public int getFramesCount() {
+ return frames_.size();
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrame getFrames(int index) {
+ return frames_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrameOrBuilder getFramesOrBuilder(int index) {
+ return frames_.get(index);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (segment_ != null) {
+ output.writeMessage(1, getSegment());
+ }
+ if (confidence_ != 0F) {
+ output.writeFloat(2, confidence_);
+ }
+ for (int i = 0; i < frames_.size(); i++) {
+ output.writeMessage(3, frames_.get(i));
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (segment_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSegment());
+ }
+ if (confidence_ != 0F) {
+ size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, confidence_);
+ }
+ for (int i = 0; i < frames_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, frames_.get(i));
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.videointelligence.v1.TextSegment)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.videointelligence.v1.TextSegment other =
+ (com.google.cloud.videointelligence.v1.TextSegment) obj;
+
+ boolean result = true;
+ result = result && (hasSegment() == other.hasSegment());
+ if (hasSegment()) {
+ result = result && getSegment().equals(other.getSegment());
+ }
+ result =
+ result
+ && (java.lang.Float.floatToIntBits(getConfidence())
+ == java.lang.Float.floatToIntBits(other.getConfidence()));
+ result = result && getFramesList().equals(other.getFramesList());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasSegment()) {
+ hash = (37 * hash) + SEGMENT_FIELD_NUMBER;
+ hash = (53 * hash) + getSegment().hashCode();
+ }
+ hash = (37 * hash) + CONFIDENCE_FIELD_NUMBER;
+ hash = (53 * hash) + java.lang.Float.floatToIntBits(getConfidence());
+ if (getFramesCount() > 0) {
+ hash = (37 * hash) + FRAMES_FIELD_NUMBER;
+ hash = (53 * hash) + getFramesList().hashCode();
+ }
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.videointelligence.v1.TextSegment parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.videointelligence.v1.TextSegment prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Video segment level annotation results for text detection. + *+ * + * Protobuf type {@code google.cloud.videointelligence.v1.TextSegment} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public boolean hasSegment() {
+ return segmentBuilder_ != null || segment_ != null;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment getSegment() {
+ if (segmentBuilder_ == null) {
+ return segment_ == null
+ ? com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()
+ : segment_;
+ } else {
+ return segmentBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public Builder setSegment(com.google.cloud.videointelligence.v1.VideoSegment value) {
+ if (segmentBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ segment_ = value;
+ onChanged();
+ } else {
+ segmentBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public Builder setSegment(
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder builderForValue) {
+ if (segmentBuilder_ == null) {
+ segment_ = builderForValue.build();
+ onChanged();
+ } else {
+ segmentBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public Builder mergeSegment(com.google.cloud.videointelligence.v1.VideoSegment value) {
+ if (segmentBuilder_ == null) {
+ if (segment_ != null) {
+ segment_ =
+ com.google.cloud.videointelligence.v1.VideoSegment.newBuilder(segment_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ segment_ = value;
+ }
+ onChanged();
+ } else {
+ segmentBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public Builder clearSegment() {
+ if (segmentBuilder_ == null) {
+ segment_ = null;
+ onChanged();
+ } else {
+ segment_ = null;
+ segmentBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegment.Builder getSegmentBuilder() {
+
+ onChanged();
+ return getSegmentFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ public com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder() {
+ if (segmentBuilder_ != null) {
+ return segmentBuilder_.getMessageOrBuilder();
+ } else {
+ return segment_ == null
+ ? com.google.cloud.videointelligence.v1.VideoSegment.getDefaultInstance()
+ : segment_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.VideoSegment,
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder,
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>
+ getSegmentFieldBuilder() {
+ if (segmentBuilder_ == null) {
+ segmentBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.VideoSegment,
+ com.google.cloud.videointelligence.v1.VideoSegment.Builder,
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder>(
+ getSegment(), getParentForChildren(), isClean());
+ segment_ = null;
+ }
+ return segmentBuilder_;
+ }
+
+ private float confidence_;
+ /**
+ *
+ *
+ * + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + *+ * + *
float confidence = 2;
+ */
+ public float getConfidence() {
+ return confidence_;
+ }
+ /**
+ *
+ *
+ * + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + *+ * + *
float confidence = 2;
+ */
+ public Builder setConfidence(float value) {
+
+ confidence_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + *+ * + *
float confidence = 2;
+ */
+ public Builder clearConfidence() {
+
+ confidence_ = 0F;
+ onChanged();
+ return this;
+ }
+
+ private java.util.List+ * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public java.util.List+ * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public int getFramesCount() {
+ if (framesBuilder_ == null) {
+ return frames_.size();
+ } else {
+ return framesBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrame getFrames(int index) {
+ if (framesBuilder_ == null) {
+ return frames_.get(index);
+ } else {
+ return framesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder setFrames(int index, com.google.cloud.videointelligence.v1.TextFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.set(index, value);
+ onChanged();
+ } else {
+ framesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder setFrames(
+ int index, com.google.cloud.videointelligence.v1.TextFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder addFrames(com.google.cloud.videointelligence.v1.TextFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.add(value);
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder addFrames(int index, com.google.cloud.videointelligence.v1.TextFrame value) {
+ if (framesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureFramesIsMutable();
+ frames_.add(index, value);
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder addFrames(
+ com.google.cloud.videointelligence.v1.TextFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.add(builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder addFrames(
+ int index, com.google.cloud.videointelligence.v1.TextFrame.Builder builderForValue) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ framesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder addAllFrames(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.TextFrame> values) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, frames_);
+ onChanged();
+ } else {
+ framesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder clearFrames() {
+ if (framesBuilder_ == null) {
+ frames_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ framesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public Builder removeFrames(int index) {
+ if (framesBuilder_ == null) {
+ ensureFramesIsMutable();
+ frames_.remove(index);
+ onChanged();
+ } else {
+ framesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrame.Builder getFramesBuilder(int index) {
+ return getFramesFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrameOrBuilder getFramesOrBuilder(int index) {
+ if (framesBuilder_ == null) {
+ return frames_.get(index);
+ } else {
+ return framesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextFrameOrBuilder>
+ getFramesOrBuilderList() {
+ if (framesBuilder_ != null) {
+ return framesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(frames_);
+ }
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrame.Builder addFramesBuilder() {
+ return getFramesFieldBuilder()
+ .addBuilder(com.google.cloud.videointelligence.v1.TextFrame.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public com.google.cloud.videointelligence.v1.TextFrame.Builder addFramesBuilder(int index) {
+ return getFramesFieldBuilder()
+ .addBuilder(index, com.google.cloud.videointelligence.v1.TextFrame.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ public java.util.List+ * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ boolean hasSegment();
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ com.google.cloud.videointelligence.v1.VideoSegment getSegment();
+ /**
+ *
+ *
+ * + * Video segment where a text snippet was detected. + *+ * + *
.google.cloud.videointelligence.v1.VideoSegment segment = 1;
+ */
+ com.google.cloud.videointelligence.v1.VideoSegmentOrBuilder getSegmentOrBuilder();
+
+ /**
+ *
+ *
+ * + * Confidence for the track of detected text. It is calculated as the highest + * over all frames where OCR detected text appears. + *+ * + *
float confidence = 2;
+ */
+ float getConfidence();
+
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ java.util.List+ * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ com.google.cloud.videointelligence.v1.TextFrame getFrames(int index);
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ int getFramesCount();
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.TextFrameOrBuilder>
+ getFramesOrBuilderList();
+ /**
+ *
+ *
+ * + * Information related to the frames where OCR detected text appears. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextFrame frames = 3;
+ */
+ com.google.cloud.videointelligence.v1.TextFrameOrBuilder getFramesOrBuilder(int index);
+}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java
index a67d7facb16b..83062c1520a2 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoAnnotationResults.java
@@ -30,6 +30,8 @@ private VideoAnnotationResults() {
faceAnnotations_ = java.util.Collections.emptyList();
shotAnnotations_ = java.util.Collections.emptyList();
speechTranscriptions_ = java.util.Collections.emptyList();
+ textAnnotations_ = java.util.Collections.emptyList();
+ objectAnnotations_ = java.util.Collections.emptyList();
}
@java.lang.Override
@@ -177,6 +179,33 @@ private VideoAnnotationResults(
extensionRegistry));
break;
}
+ case 98:
+ {
+ if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) {
+ textAnnotations_ =
+ new java.util.ArrayList+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ public java.util.List+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder>
+ getTextAnnotationsOrBuilderList() {
+ return textAnnotations_;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ public int getTextAnnotationsCount() {
+ return textAnnotations_.size();
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotation getTextAnnotations(int index) {
+ return textAnnotations_.get(index);
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder getTextAnnotationsOrBuilder(
+ int index) {
+ return textAnnotations_.get(index);
+ }
+
+ public static final int OBJECT_ANNOTATIONS_FIELD_NUMBER = 14;
+ private java.util.List+ * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public java.util.List+ * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public java.util.List<
+ ? extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder>
+ getObjectAnnotationsOrBuilderList() {
+ return objectAnnotations_;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public int getObjectAnnotationsCount() {
+ return objectAnnotations_.size();
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation getObjectAnnotations(
+ int index) {
+ return objectAnnotations_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder
+ getObjectAnnotationsOrBuilder(int index) {
+ return objectAnnotations_.get(index);
+ }
+
public static final int ERROR_FIELD_NUMBER = 9;
private com.google.rpc.Status error_;
/**
@@ -851,6 +1041,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < speechTranscriptions_.size(); i++) {
output.writeMessage(11, speechTranscriptions_.get(i));
}
+ for (int i = 0; i < textAnnotations_.size(); i++) {
+ output.writeMessage(12, textAnnotations_.get(i));
+ }
+ for (int i = 0; i < objectAnnotations_.size(); i++) {
+ output.writeMessage(14, objectAnnotations_.get(i));
+ }
unknownFields.writeTo(output);
}
@@ -894,6 +1090,13 @@ public int getSerializedSize() {
com.google.protobuf.CodedOutputStream.computeMessageSize(
11, speechTranscriptions_.get(i));
}
+ for (int i = 0; i < textAnnotations_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(12, textAnnotations_.get(i));
+ }
+ for (int i = 0; i < objectAnnotations_.size(); i++) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(14, objectAnnotations_.get(i));
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -923,6 +1126,8 @@ public boolean equals(final java.lang.Object obj) {
result = result && getExplicitAnnotation().equals(other.getExplicitAnnotation());
}
result = result && getSpeechTranscriptionsList().equals(other.getSpeechTranscriptionsList());
+ result = result && getTextAnnotationsList().equals(other.getTextAnnotationsList());
+ result = result && getObjectAnnotationsList().equals(other.getObjectAnnotationsList());
result = result && (hasError() == other.hasError());
if (hasError()) {
result = result && getError().equals(other.getError());
@@ -968,6 +1173,14 @@ public int hashCode() {
hash = (37 * hash) + SPEECH_TRANSCRIPTIONS_FIELD_NUMBER;
hash = (53 * hash) + getSpeechTranscriptionsList().hashCode();
}
+ if (getTextAnnotationsCount() > 0) {
+ hash = (37 * hash) + TEXT_ANNOTATIONS_FIELD_NUMBER;
+ hash = (53 * hash) + getTextAnnotationsList().hashCode();
+ }
+ if (getObjectAnnotationsCount() > 0) {
+ hash = (37 * hash) + OBJECT_ANNOTATIONS_FIELD_NUMBER;
+ hash = (53 * hash) + getObjectAnnotationsList().hashCode();
+ }
if (hasError()) {
hash = (37 * hash) + ERROR_FIELD_NUMBER;
hash = (53 * hash) + getError().hashCode();
@@ -1119,6 +1332,8 @@ private void maybeForceBuilderInitialization() {
getFaceAnnotationsFieldBuilder();
getShotAnnotationsFieldBuilder();
getSpeechTranscriptionsFieldBuilder();
+ getTextAnnotationsFieldBuilder();
+ getObjectAnnotationsFieldBuilder();
}
}
@@ -1169,6 +1384,18 @@ public Builder clear() {
} else {
speechTranscriptionsBuilder_.clear();
}
+ if (textAnnotationsBuilder_ == null) {
+ textAnnotations_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000100);
+ } else {
+ textAnnotationsBuilder_.clear();
+ }
+ if (objectAnnotationsBuilder_ == null) {
+ objectAnnotations_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000200);
+ } else {
+ objectAnnotationsBuilder_.clear();
+ }
if (errorBuilder_ == null) {
error_ = null;
} else {
@@ -1266,6 +1493,24 @@ public com.google.cloud.videointelligence.v1.VideoAnnotationResults buildPartial
} else {
result.speechTranscriptions_ = speechTranscriptionsBuilder_.build();
}
+ if (textAnnotationsBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ textAnnotations_ = java.util.Collections.unmodifiableList(textAnnotations_);
+ bitField0_ = (bitField0_ & ~0x00000100);
+ }
+ result.textAnnotations_ = textAnnotations_;
+ } else {
+ result.textAnnotations_ = textAnnotationsBuilder_.build();
+ }
+ if (objectAnnotationsBuilder_ == null) {
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ objectAnnotations_ = java.util.Collections.unmodifiableList(objectAnnotations_);
+ bitField0_ = (bitField0_ & ~0x00000200);
+ }
+ result.objectAnnotations_ = objectAnnotations_;
+ } else {
+ result.objectAnnotations_ = objectAnnotationsBuilder_.build();
+ }
if (errorBuilder_ == null) {
result.error_ = error_;
} else {
@@ -1492,6 +1737,60 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoAnnotationRe
}
}
}
+ if (textAnnotationsBuilder_ == null) {
+ if (!other.textAnnotations_.isEmpty()) {
+ if (textAnnotations_.isEmpty()) {
+ textAnnotations_ = other.textAnnotations_;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ } else {
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.addAll(other.textAnnotations_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.textAnnotations_.isEmpty()) {
+ if (textAnnotationsBuilder_.isEmpty()) {
+ textAnnotationsBuilder_.dispose();
+ textAnnotationsBuilder_ = null;
+ textAnnotations_ = other.textAnnotations_;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ textAnnotationsBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getTextAnnotationsFieldBuilder()
+ : null;
+ } else {
+ textAnnotationsBuilder_.addAllMessages(other.textAnnotations_);
+ }
+ }
+ }
+ if (objectAnnotationsBuilder_ == null) {
+ if (!other.objectAnnotations_.isEmpty()) {
+ if (objectAnnotations_.isEmpty()) {
+ objectAnnotations_ = other.objectAnnotations_;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ } else {
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.addAll(other.objectAnnotations_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.objectAnnotations_.isEmpty()) {
+ if (objectAnnotationsBuilder_.isEmpty()) {
+ objectAnnotationsBuilder_.dispose();
+ objectAnnotationsBuilder_ = null;
+ objectAnnotations_ = other.objectAnnotations_;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ objectAnnotationsBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getObjectAnnotationsFieldBuilder()
+ : null;
+ } else {
+ objectAnnotationsBuilder_.addAllMessages(other.objectAnnotations_);
+ }
+ }
+ }
if (other.hasError()) {
mergeError(other.getError());
}
@@ -4197,6 +4496,830 @@ public Builder removeSpeechTranscriptions(int index) {
return speechTranscriptionsBuilder_;
}
+ private java.util.List+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public java.util.List+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public int getTextAnnotationsCount() {
+ if (textAnnotationsBuilder_ == null) {
+ return textAnnotations_.size();
+ } else {
+ return textAnnotationsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotation getTextAnnotations(int index) {
+ if (textAnnotationsBuilder_ == null) {
+ return textAnnotations_.get(index);
+ } else {
+ return textAnnotationsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder setTextAnnotations(
+ int index, com.google.cloud.videointelligence.v1.TextAnnotation value) {
+ if (textAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.set(index, value);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder setTextAnnotations(
+ int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) {
+ if (textAnnotationsBuilder_ == null) {
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder addTextAnnotations(com.google.cloud.videointelligence.v1.TextAnnotation value) {
+ if (textAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.add(value);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder addTextAnnotations(
+ int index, com.google.cloud.videointelligence.v1.TextAnnotation value) {
+ if (textAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.add(index, value);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder addTextAnnotations(
+ com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) {
+ if (textAnnotationsBuilder_ == null) {
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.add(builderForValue.build());
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder addTextAnnotations(
+ int index, com.google.cloud.videointelligence.v1.TextAnnotation.Builder builderForValue) {
+ if (textAnnotationsBuilder_ == null) {
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder addAllTextAnnotations(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.TextAnnotation> values) {
+ if (textAnnotationsBuilder_ == null) {
+ ensureTextAnnotationsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, textAnnotations_);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder clearTextAnnotations() {
+ if (textAnnotationsBuilder_ == null) {
+ textAnnotations_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000100);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public Builder removeTextAnnotations(int index) {
+ if (textAnnotationsBuilder_ == null) {
+ ensureTextAnnotationsIsMutable();
+ textAnnotations_.remove(index);
+ onChanged();
+ } else {
+ textAnnotationsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotation.Builder getTextAnnotationsBuilder(
+ int index) {
+ return getTextAnnotationsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder
+ getTextAnnotationsOrBuilder(int index) {
+ if (textAnnotationsBuilder_ == null) {
+ return textAnnotations_.get(index);
+ } else {
+ return textAnnotationsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public java.util.List extends com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder>
+ getTextAnnotationsOrBuilderList() {
+ if (textAnnotationsBuilder_ != null) {
+ return textAnnotationsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(textAnnotations_);
+ }
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotation.Builder
+ addTextAnnotationsBuilder() {
+ return getTextAnnotationsFieldBuilder()
+ .addBuilder(com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextAnnotation.Builder addTextAnnotationsBuilder(
+ int index) {
+ return getTextAnnotationsFieldBuilder()
+ .addBuilder(
+ index, com.google.cloud.videointelligence.v1.TextAnnotation.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ *
+ */
+ public java.util.List+ * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public java.util.List+ * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public int getObjectAnnotationsCount() {
+ if (objectAnnotationsBuilder_ == null) {
+ return objectAnnotations_.size();
+ } else {
+ return objectAnnotationsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation getObjectAnnotations(
+ int index) {
+ if (objectAnnotationsBuilder_ == null) {
+ return objectAnnotations_.get(index);
+ } else {
+ return objectAnnotationsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder setObjectAnnotations(
+ int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) {
+ if (objectAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.set(index, value);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder setObjectAnnotations(
+ int index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) {
+ if (objectAnnotationsBuilder_ == null) {
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder addObjectAnnotations(
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) {
+ if (objectAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.add(value);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder addObjectAnnotations(
+ int index, com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation value) {
+ if (objectAnnotationsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.add(index, value);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder addObjectAnnotations(
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) {
+ if (objectAnnotationsBuilder_ == null) {
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.add(builderForValue.build());
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder addObjectAnnotations(
+ int index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder builderForValue) {
+ if (objectAnnotationsBuilder_ == null) {
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder addAllObjectAnnotations(
+ java.lang.Iterable extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation>
+ values) {
+ if (objectAnnotationsBuilder_ == null) {
+ ensureObjectAnnotationsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, objectAnnotations_);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder clearObjectAnnotations() {
+ if (objectAnnotationsBuilder_ == null) {
+ objectAnnotations_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000200);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public Builder removeObjectAnnotations(int index) {
+ if (objectAnnotationsBuilder_ == null) {
+ ensureObjectAnnotationsIsMutable();
+ objectAnnotations_.remove(index);
+ onChanged();
+ } else {
+ objectAnnotationsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder
+ getObjectAnnotationsBuilder(int index) {
+ return getObjectAnnotationsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder
+ getObjectAnnotationsOrBuilder(int index) {
+ if (objectAnnotationsBuilder_ == null) {
+ return objectAnnotations_.get(index);
+ } else {
+ return objectAnnotationsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public java.util.List<
+ ? extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder>
+ getObjectAnnotationsOrBuilderList() {
+ if (objectAnnotationsBuilder_ != null) {
+ return objectAnnotationsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(objectAnnotations_);
+ }
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder
+ addObjectAnnotationsBuilder() {
+ return getObjectAnnotationsFieldBuilder()
+ .addBuilder(
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.Builder
+ addObjectAnnotationsBuilder(int index) {
+ return getObjectAnnotationsFieldBuilder()
+ .addBuilder(
+ index,
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ public java.util.List+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ java.util.List+ * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ com.google.cloud.videointelligence.v1.TextAnnotation getTextAnnotations(int index);
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ int getTextAnnotationsCount();
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder>
+ getTextAnnotationsOrBuilderList();
+ /**
+ *
+ *
+ * + * OCR text detection and tracking. + * Annotations for list of detected text snippets. Each will have list of + * frame information associated with it. + *+ * + *
repeated .google.cloud.videointelligence.v1.TextAnnotation text_annotations = 12;
+ */
+ com.google.cloud.videointelligence.v1.TextAnnotationOrBuilder getTextAnnotationsOrBuilder(
+ int index);
+
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ java.util.List+ * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotation getObjectAnnotations(int index);
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ int getObjectAnnotationsCount();
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ java.util.List extends com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder>
+ getObjectAnnotationsOrBuilderList();
+ /**
+ *
+ *
+ * + * Annotations for list of objects detected and tracked in video. + *+ * + *
+ * repeated .google.cloud.videointelligence.v1.ObjectTrackingAnnotation object_annotations = 14;
+ *
+ */
+ com.google.cloud.videointelligence.v1.ObjectTrackingAnnotationOrBuilder
+ getObjectAnnotationsOrBuilder(int index);
+
/**
*
*
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java
index 55eaae6e6b4e..811656b4f2fc 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContext.java
@@ -149,6 +149,23 @@ private VideoContext(
speechTranscriptionConfig_ = subBuilder.buildPartial();
}
+ break;
+ }
+ case 66:
+ {
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder subBuilder = null;
+ if (textDetectionConfig_ != null) {
+ subBuilder = textDetectionConfig_.toBuilder();
+ }
+ textDetectionConfig_ =
+ input.readMessage(
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(textDetectionConfig_);
+ textDetectionConfig_ = subBuilder.buildPartial();
+ }
+
break;
}
default:
@@ -501,6 +518,48 @@ public boolean hasSpeechTranscriptionConfig() {
return getSpeechTranscriptionConfig();
}
+ public static final int TEXT_DETECTION_CONFIG_FIELD_NUMBER = 8;
+ private com.google.cloud.videointelligence.v1.TextDetectionConfig textDetectionConfig_;
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ public boolean hasTextDetectionConfig() {
+ return textDetectionConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ public com.google.cloud.videointelligence.v1.TextDetectionConfig getTextDetectionConfig() {
+ return textDetectionConfig_ == null
+ ? com.google.cloud.videointelligence.v1.TextDetectionConfig.getDefaultInstance()
+ : textDetectionConfig_;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ public com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder
+ getTextDetectionConfigOrBuilder() {
+ return getTextDetectionConfig();
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -533,6 +592,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (speechTranscriptionConfig_ != null) {
output.writeMessage(6, getSpeechTranscriptionConfig());
}
+ if (textDetectionConfig_ != null) {
+ output.writeMessage(8, getTextDetectionConfig());
+ }
unknownFields.writeTo(output);
}
@@ -567,6 +629,9 @@ public int getSerializedSize() {
com.google.protobuf.CodedOutputStream.computeMessageSize(
6, getSpeechTranscriptionConfig());
}
+ if (textDetectionConfig_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getTextDetectionConfig());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -612,6 +677,10 @@ && getExplicitContentDetectionConfig()
result =
result && getSpeechTranscriptionConfig().equals(other.getSpeechTranscriptionConfig());
}
+ result = result && (hasTextDetectionConfig() == other.hasTextDetectionConfig());
+ if (hasTextDetectionConfig()) {
+ result = result && getTextDetectionConfig().equals(other.getTextDetectionConfig());
+ }
result = result && unknownFields.equals(other.unknownFields);
return result;
}
@@ -647,6 +716,10 @@ public int hashCode() {
hash = (37 * hash) + SPEECH_TRANSCRIPTION_CONFIG_FIELD_NUMBER;
hash = (53 * hash) + getSpeechTranscriptionConfig().hashCode();
}
+ if (hasTextDetectionConfig()) {
+ hash = (37 * hash) + TEXT_DETECTION_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getTextDetectionConfig().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -830,6 +903,12 @@ public Builder clear() {
speechTranscriptionConfig_ = null;
speechTranscriptionConfigBuilder_ = null;
}
+ if (textDetectionConfigBuilder_ == null) {
+ textDetectionConfig_ = null;
+ } else {
+ textDetectionConfig_ = null;
+ textDetectionConfigBuilder_ = null;
+ }
return this;
}
@@ -893,6 +972,11 @@ public com.google.cloud.videointelligence.v1.VideoContext buildPartial() {
} else {
result.speechTranscriptionConfig_ = speechTranscriptionConfigBuilder_.build();
}
+ if (textDetectionConfigBuilder_ == null) {
+ result.textDetectionConfig_ = textDetectionConfig_;
+ } else {
+ result.textDetectionConfig_ = textDetectionConfigBuilder_.build();
+ }
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -986,6 +1070,9 @@ public Builder mergeFrom(com.google.cloud.videointelligence.v1.VideoContext othe
if (other.hasSpeechTranscriptionConfig()) {
mergeSpeechTranscriptionConfig(other.getSpeechTranscriptionConfig());
}
+ if (other.hasTextDetectionConfig()) {
+ mergeTextDetectionConfig(other.getTextDetectionConfig());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -2431,6 +2518,202 @@ public Builder clearSpeechTranscriptionConfig() {
return speechTranscriptionConfigBuilder_;
}
+ private com.google.cloud.videointelligence.v1.TextDetectionConfig textDetectionConfig_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.TextDetectionConfig,
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder,
+ com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder>
+ textDetectionConfigBuilder_;
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public boolean hasTextDetectionConfig() {
+ return textDetectionConfigBuilder_ != null || textDetectionConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextDetectionConfig getTextDetectionConfig() {
+ if (textDetectionConfigBuilder_ == null) {
+ return textDetectionConfig_ == null
+ ? com.google.cloud.videointelligence.v1.TextDetectionConfig.getDefaultInstance()
+ : textDetectionConfig_;
+ } else {
+ return textDetectionConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public Builder setTextDetectionConfig(
+ com.google.cloud.videointelligence.v1.TextDetectionConfig value) {
+ if (textDetectionConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ textDetectionConfig_ = value;
+ onChanged();
+ } else {
+ textDetectionConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public Builder setTextDetectionConfig(
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder builderForValue) {
+ if (textDetectionConfigBuilder_ == null) {
+ textDetectionConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ textDetectionConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public Builder mergeTextDetectionConfig(
+ com.google.cloud.videointelligence.v1.TextDetectionConfig value) {
+ if (textDetectionConfigBuilder_ == null) {
+ if (textDetectionConfig_ != null) {
+ textDetectionConfig_ =
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.newBuilder(
+ textDetectionConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ textDetectionConfig_ = value;
+ }
+ onChanged();
+ } else {
+ textDetectionConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public Builder clearTextDetectionConfig() {
+ if (textDetectionConfigBuilder_ == null) {
+ textDetectionConfig_ = null;
+ onChanged();
+ } else {
+ textDetectionConfig_ = null;
+ textDetectionConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder
+ getTextDetectionConfigBuilder() {
+
+ onChanged();
+ return getTextDetectionConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ public com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder
+ getTextDetectionConfigOrBuilder() {
+ if (textDetectionConfigBuilder_ != null) {
+ return textDetectionConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return textDetectionConfig_ == null
+ ? com.google.cloud.videointelligence.v1.TextDetectionConfig.getDefaultInstance()
+ : textDetectionConfig_;
+ }
+ }
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.TextDetectionConfig,
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder,
+ com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder>
+ getTextDetectionConfigFieldBuilder() {
+ if (textDetectionConfigBuilder_ == null) {
+ textDetectionConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.videointelligence.v1.TextDetectionConfig,
+ com.google.cloud.videointelligence.v1.TextDetectionConfig.Builder,
+ com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder>(
+ getTextDetectionConfig(), getParentForChildren(), isClean());
+ textDetectionConfig_ = null;
+ }
+ return textDetectionConfigBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFieldsProto3(unknownFields);
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java
index f425fbbce8a8..2dc62b868a65 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoContextOrBuilder.java
@@ -251,4 +251,36 @@ public interface VideoContextOrBuilder
*/
com.google.cloud.videointelligence.v1.SpeechTranscriptionConfigOrBuilder
getSpeechTranscriptionConfigOrBuilder();
+
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ boolean hasTextDetectionConfig();
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ com.google.cloud.videointelligence.v1.TextDetectionConfig getTextDetectionConfig();
+ /**
+ *
+ *
+ * + * Config for TEXT_DETECTION. + *+ * + *
.google.cloud.videointelligence.v1.TextDetectionConfig text_detection_config = 8;
+ */
+ com.google.cloud.videointelligence.v1.TextDetectionConfigOrBuilder
+ getTextDetectionConfigOrBuilder();
}
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java
index 530be1b5a186..cc6bac53eec5 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/java/com/google/cloud/videointelligence/v1/VideoIntelligenceServiceProto.java
@@ -36,6 +36,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_videointelligence_v1_FaceDetectionConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_videointelligence_v1_FaceDetectionConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -116,6 +120,34 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_videointelligence_v1_WordInfo_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_videointelligence_v1_WordInfo_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_NormalizedVertex_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_TextSegment_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_TextSegment_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_TextFrame_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_TextAnnotation_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_TextAnnotation_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
return descriptor;
@@ -137,7 +169,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "telligence.v1.Feature\022F\n\rvideo_context\030\003"
+ " \001(\0132/.google.cloud.videointelligence.v1"
+ ".VideoContext\022\022\n\noutput_uri\030\004 \001(\t\022\023\n\013loc"
- + "ation_id\030\005 \001(\t\"\266\004\n\014VideoContext\022A\n\010segme"
+ + "ation_id\030\005 \001(\t\"\215\005\n\014VideoContext\022A\n\010segme"
+ "nts\030\001 \003(\0132/.google.cloud.videointelligen"
+ "ce.v1.VideoSegment\022W\n\026label_detection_co"
+ "nfig\030\002 \001(\01327.google.cloud.videointellige"
@@ -151,117 +183,150 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "oud.videointelligence.v1.FaceDetectionCo"
+ "nfig\022a\n\033speech_transcription_config\030\006 \001("
+ "\0132<.google.cloud.videointelligence.v1.Sp"
- + "eechTranscriptionConfig\"\225\001\n\024LabelDetecti"
- + "onConfig\022S\n\024label_detection_mode\030\001 \001(\01625"
- + ".google.cloud.videointelligence.v1.Label"
- + "DetectionMode\022\031\n\021stationary_camera\030\002 \001(\010"
- + "\022\r\n\005model\030\003 \001(\t\"*\n\031ShotChangeDetectionCo"
- + "nfig\022\r\n\005model\030\001 \001(\t\"/\n\036ExplicitContentDe"
- + "tectionConfig\022\r\n\005model\030\001 \001(\t\"D\n\023FaceDete"
- + "ctionConfig\022\r\n\005model\030\001 \001(\t\022\036\n\026include_bo"
- + "unding_boxes\030\002 \001(\010\"x\n\014VideoSegment\0224\n\021st"
- + "art_time_offset\030\001 \001(\0132\031.google.protobuf."
- + "Duration\0222\n\017end_time_offset\030\002 \001(\0132\031.goog"
- + "le.protobuf.Duration\"d\n\014LabelSegment\022@\n\007"
- + "segment\030\001 \001(\0132/.google.cloud.videointell"
- + "igence.v1.VideoSegment\022\022\n\nconfidence\030\002 \001"
- + "(\002\"P\n\nLabelFrame\022.\n\013time_offset\030\001 \001(\0132\031."
- + "google.protobuf.Duration\022\022\n\nconfidence\030\002"
- + " \001(\002\"G\n\006Entity\022\021\n\tentity_id\030\001 \001(\t\022\023\n\013des"
- + "cription\030\002 \001(\t\022\025\n\rlanguage_code\030\003 \001(\t\"\224\002"
- + "\n\017LabelAnnotation\0229\n\006entity\030\001 \001(\0132).goog"
- + "le.cloud.videointelligence.v1.Entity\022D\n\021"
- + "category_entities\030\002 \003(\0132).google.cloud.v"
- + "ideointelligence.v1.Entity\022A\n\010segments\030\003"
- + " \003(\0132/.google.cloud.videointelligence.v1"
- + ".LabelSegment\022=\n\006frames\030\004 \003(\0132-.google.c"
- + "loud.videointelligence.v1.LabelFrame\"\225\001\n"
- + "\024ExplicitContentFrame\022.\n\013time_offset\030\001 \001"
- + "(\0132\031.google.protobuf.Duration\022M\n\026pornogr"
- + "aphy_likelihood\030\002 \001(\0162-.google.cloud.vid"
- + "eointelligence.v1.Likelihood\"d\n\031Explicit"
- + "ContentAnnotation\022G\n\006frames\030\001 \003(\01327.goog"
- + "le.cloud.videointelligence.v1.ExplicitCo"
- + "ntentFrame\"Q\n\025NormalizedBoundingBox\022\014\n\004l"
- + "eft\030\001 \001(\002\022\013\n\003top\030\002 \001(\002\022\r\n\005right\030\003 \001(\002\022\016\n"
- + "\006bottom\030\004 \001(\002\"O\n\013FaceSegment\022@\n\007segment\030"
- + "\001 \001(\0132/.google.cloud.videointelligence.v"
- + "1.VideoSegment\"\230\001\n\tFaceFrame\022[\n\031normaliz"
- + "ed_bounding_boxes\030\001 \003(\01328.google.cloud.v"
- + "ideointelligence.v1.NormalizedBoundingBo"
- + "x\022.\n\013time_offset\030\002 \001(\0132\031.google.protobuf"
- + ".Duration\"\243\001\n\016FaceAnnotation\022\021\n\tthumbnai"
- + "l\030\001 \001(\014\022@\n\010segments\030\002 \003(\0132..google.cloud"
- + ".videointelligence.v1.FaceSegment\022<\n\006fra"
- + "mes\030\003 \003(\0132,.google.cloud.videointelligen"
- + "ce.v1.FaceFrame\"\230\005\n\026VideoAnnotationResul"
- + "ts\022\021\n\tinput_uri\030\001 \001(\t\022U\n\031segment_label_a"
- + "nnotations\030\002 \003(\01322.google.cloud.videoint"
- + "elligence.v1.LabelAnnotation\022R\n\026shot_lab"
- + "el_annotations\030\003 \003(\01322.google.cloud.vide"
- + "ointelligence.v1.LabelAnnotation\022S\n\027fram"
- + "e_label_annotations\030\004 \003(\01322.google.cloud"
- + ".videointelligence.v1.LabelAnnotation\022K\n"
- + "\020face_annotations\030\005 \003(\01321.google.cloud.v"
- + "ideointelligence.v1.FaceAnnotation\022I\n\020sh"
- + "ot_annotations\030\006 \003(\0132/.google.cloud.vide"
- + "ointelligence.v1.VideoSegment\022Y\n\023explici"
- + "t_annotation\030\007 \001(\0132<.google.cloud.videoi"
- + "ntelligence.v1.ExplicitContentAnnotation"
- + "\022U\n\025speech_transcriptions\030\013 \003(\01326.google"
- + ".cloud.videointelligence.v1.SpeechTransc"
- + "ription\022!\n\005error\030\t \001(\0132\022.google.rpc.Stat"
- + "us\"n\n\025AnnotateVideoResponse\022U\n\022annotatio"
- + "n_results\030\001 \003(\01329.google.cloud.videointe"
- + "lligence.v1.VideoAnnotationResults\"\247\001\n\027V"
- + "ideoAnnotationProgress\022\021\n\tinput_uri\030\001 \001("
- + "\t\022\030\n\020progress_percent\030\002 \001(\005\022.\n\nstart_tim"
- + "e\030\003 \001(\0132\032.google.protobuf.Timestamp\022/\n\013u"
- + "pdate_time\030\004 \001(\0132\032.google.protobuf.Times"
- + "tamp\"p\n\025AnnotateVideoProgress\022W\n\023annotat"
- + "ion_progress\030\001 \003(\0132:.google.cloud.videoi"
- + "ntelligence.v1.VideoAnnotationProgress\"\324"
- + "\002\n\031SpeechTranscriptionConfig\022\025\n\rlanguage"
- + "_code\030\001 \001(\t\022\030\n\020max_alternatives\030\002 \001(\005\022\030\n"
- + "\020filter_profanity\030\003 \001(\010\022I\n\017speech_contex"
- + "ts\030\004 \003(\01320.google.cloud.videointelligenc"
- + "e.v1.SpeechContext\022$\n\034enable_automatic_p"
- + "unctuation\030\005 \001(\010\022\024\n\014audio_tracks\030\006 \003(\005\022\""
- + "\n\032enable_speaker_diarization\030\007 \001(\010\022!\n\031di"
- + "arization_speaker_count\030\010 \001(\005\022\036\n\026enable_"
- + "word_confidence\030\t \001(\010\" \n\rSpeechContext\022\017"
- + "\n\007phrases\030\001 \003(\t\"\203\001\n\023SpeechTranscription\022"
- + "U\n\014alternatives\030\001 \003(\0132?.google.cloud.vid"
- + "eointelligence.v1.SpeechRecognitionAlter"
- + "native\022\025\n\rlanguage_code\030\002 \001(\t\"\202\001\n\034Speech"
- + "RecognitionAlternative\022\022\n\ntranscript\030\001 \001"
- + "(\t\022\022\n\nconfidence\030\002 \001(\002\022:\n\005words\030\003 \003(\0132+."
- + "google.cloud.videointelligence.v1.WordIn"
- + "fo\"\235\001\n\010WordInfo\022-\n\nstart_time\030\001 \001(\0132\031.go"
- + "ogle.protobuf.Duration\022+\n\010end_time\030\002 \001(\013"
- + "2\031.google.protobuf.Duration\022\014\n\004word\030\003 \001("
- + "\t\022\022\n\nconfidence\030\004 \001(\002\022\023\n\013speaker_tag\030\005 \001"
- + "(\005*\240\001\n\007Feature\022\027\n\023FEATURE_UNSPECIFIED\020\000\022"
- + "\023\n\017LABEL_DETECTION\020\001\022\031\n\025SHOT_CHANGE_DETE"
- + "CTION\020\002\022\036\n\032EXPLICIT_CONTENT_DETECTION\020\003\022"
- + "\022\n\016FACE_DETECTION\020\004\022\030\n\024SPEECH_TRANSCRIPT"
- + "ION\020\006*r\n\022LabelDetectionMode\022$\n LABEL_DET"
- + "ECTION_MODE_UNSPECIFIED\020\000\022\r\n\tSHOT_MODE\020\001"
- + "\022\016\n\nFRAME_MODE\020\002\022\027\n\023SHOT_AND_FRAME_MODE\020"
- + "\003*t\n\nLikelihood\022\032\n\026LIKELIHOOD_UNSPECIFIE"
- + "D\020\000\022\021\n\rVERY_UNLIKELY\020\001\022\014\n\010UNLIKELY\020\002\022\014\n\010"
- + "POSSIBLE\020\003\022\n\n\006LIKELY\020\004\022\017\n\013VERY_LIKELY\020\0052"
- + "\244\001\n\030VideoIntelligenceService\022\207\001\n\rAnnotat"
- + "eVideo\0227.google.cloud.videointelligence."
- + "v1.AnnotateVideoRequest\032\035.google.longrun"
- + "ning.Operation\"\036\202\323\344\223\002\030\"\023/v1/videos:annot"
- + "ate:\001*B\344\001\n%com.google.cloud.videointelli"
- + "gence.v1B\035VideoIntelligenceServiceProtoP"
- + "\001ZRgoogle.golang.org/genproto/googleapis"
- + "/cloud/videointelligence/v1;videointelli"
- + "gence\252\002!Google.Cloud.VideoIntelligence.V"
- + "1\312\002!Google\\Cloud\\VideoIntelligence\\V1b\006p"
- + "roto3"
+ + "eechTranscriptionConfig\022U\n\025text_detectio"
+ + "n_config\030\010 \001(\01326.google.cloud.videointel"
+ + "ligence.v1.TextDetectionConfig\"\225\001\n\024Label"
+ + "DetectionConfig\022S\n\024label_detection_mode\030"
+ + "\001 \001(\01625.google.cloud.videointelligence.v"
+ + "1.LabelDetectionMode\022\031\n\021stationary_camer"
+ + "a\030\002 \001(\010\022\r\n\005model\030\003 \001(\t\"*\n\031ShotChangeDete"
+ + "ctionConfig\022\r\n\005model\030\001 \001(\t\"/\n\036ExplicitCo"
+ + "ntentDetectionConfig\022\r\n\005model\030\001 \001(\t\"D\n\023F"
+ + "aceDetectionConfig\022\r\n\005model\030\001 \001(\t\022\036\n\026inc"
+ + "lude_bounding_boxes\030\002 \001(\010\"-\n\023TextDetecti"
+ + "onConfig\022\026\n\016language_hints\030\001 \003(\t\"x\n\014Vide"
+ + "oSegment\0224\n\021start_time_offset\030\001 \001(\0132\031.go"
+ + "ogle.protobuf.Duration\0222\n\017end_time_offse"
+ + "t\030\002 \001(\0132\031.google.protobuf.Duration\"d\n\014La"
+ + "belSegment\022@\n\007segment\030\001 \001(\0132/.google.clo"
+ + "ud.videointelligence.v1.VideoSegment\022\022\n\n"
+ + "confidence\030\002 \001(\002\"P\n\nLabelFrame\022.\n\013time_o"
+ + "ffset\030\001 \001(\0132\031.google.protobuf.Duration\022\022"
+ + "\n\nconfidence\030\002 \001(\002\"G\n\006Entity\022\021\n\tentity_i"
+ + "d\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022\025\n\rlanguage"
+ + "_code\030\003 \001(\t\"\224\002\n\017LabelAnnotation\0229\n\006entit"
+ + "y\030\001 \001(\0132).google.cloud.videointelligence"
+ + ".v1.Entity\022D\n\021category_entities\030\002 \003(\0132)."
+ + "google.cloud.videointelligence.v1.Entity"
+ + "\022A\n\010segments\030\003 \003(\0132/.google.cloud.videoi"
+ + "ntelligence.v1.LabelSegment\022=\n\006frames\030\004 "
+ + "\003(\0132-.google.cloud.videointelligence.v1."
+ + "LabelFrame\"\225\001\n\024ExplicitContentFrame\022.\n\013t"
+ + "ime_offset\030\001 \001(\0132\031.google.protobuf.Durat"
+ + "ion\022M\n\026pornography_likelihood\030\002 \001(\0162-.go"
+ + "ogle.cloud.videointelligence.v1.Likeliho"
+ + "od\"d\n\031ExplicitContentAnnotation\022G\n\006frame"
+ + "s\030\001 \003(\01327.google.cloud.videointelligence"
+ + ".v1.ExplicitContentFrame\"Q\n\025NormalizedBo"
+ + "undingBox\022\014\n\004left\030\001 \001(\002\022\013\n\003top\030\002 \001(\002\022\r\n\005"
+ + "right\030\003 \001(\002\022\016\n\006bottom\030\004 \001(\002\"O\n\013FaceSegme"
+ + "nt\022@\n\007segment\030\001 \001(\0132/.google.cloud.video"
+ + "intelligence.v1.VideoSegment\"\230\001\n\tFaceFra"
+ + "me\022[\n\031normalized_bounding_boxes\030\001 \003(\01328."
+ + "google.cloud.videointelligence.v1.Normal"
+ + "izedBoundingBox\022.\n\013time_offset\030\002 \001(\0132\031.g"
+ + "oogle.protobuf.Duration\"\243\001\n\016FaceAnnotati"
+ + "on\022\021\n\tthumbnail\030\001 \001(\014\022@\n\010segments\030\002 \003(\0132"
+ + "..google.cloud.videointelligence.v1.Face"
+ + "Segment\022<\n\006frames\030\003 \003(\0132,.google.cloud.v"
+ + "ideointelligence.v1.FaceFrame\"\276\006\n\026VideoA"
+ + "nnotationResults\022\021\n\tinput_uri\030\001 \001(\t\022U\n\031s"
+ + "egment_label_annotations\030\002 \003(\01322.google."
+ + "cloud.videointelligence.v1.LabelAnnotati"
+ + "on\022R\n\026shot_label_annotations\030\003 \003(\01322.goo"
+ + "gle.cloud.videointelligence.v1.LabelAnno"
+ + "tation\022S\n\027frame_label_annotations\030\004 \003(\0132"
+ + "2.google.cloud.videointelligence.v1.Labe"
+ + "lAnnotation\022K\n\020face_annotations\030\005 \003(\01321."
+ + "google.cloud.videointelligence.v1.FaceAn"
+ + "notation\022I\n\020shot_annotations\030\006 \003(\0132/.goo"
+ + "gle.cloud.videointelligence.v1.VideoSegm"
+ + "ent\022Y\n\023explicit_annotation\030\007 \001(\0132<.googl"
+ + "e.cloud.videointelligence.v1.ExplicitCon"
+ + "tentAnnotation\022U\n\025speech_transcriptions\030"
+ + "\013 \003(\01326.google.cloud.videointelligence.v"
+ + "1.SpeechTranscription\022K\n\020text_annotation"
+ + "s\030\014 \003(\01321.google.cloud.videointelligence"
+ + ".v1.TextAnnotation\022W\n\022object_annotations"
+ + "\030\016 \003(\0132;.google.cloud.videointelligence."
+ + "v1.ObjectTrackingAnnotation\022!\n\005error\030\t \001"
+ + "(\0132\022.google.rpc.Status\"n\n\025AnnotateVideoR"
+ + "esponse\022U\n\022annotation_results\030\001 \003(\01329.go"
+ + "ogle.cloud.videointelligence.v1.VideoAnn"
+ + "otationResults\"\247\001\n\027VideoAnnotationProgre"
+ + "ss\022\021\n\tinput_uri\030\001 \001(\t\022\030\n\020progress_percen"
+ + "t\030\002 \001(\005\022.\n\nstart_time\030\003 \001(\0132\032.google.pro"
+ + "tobuf.Timestamp\022/\n\013update_time\030\004 \001(\0132\032.g"
+ + "oogle.protobuf.Timestamp\"p\n\025AnnotateVide"
+ + "oProgress\022W\n\023annotation_progress\030\001 \003(\0132:"
+ + ".google.cloud.videointelligence.v1.Video"
+ + "AnnotationProgress\"\324\002\n\031SpeechTranscripti"
+ + "onConfig\022\025\n\rlanguage_code\030\001 \001(\t\022\030\n\020max_a"
+ + "lternatives\030\002 \001(\005\022\030\n\020filter_profanity\030\003 "
+ + "\001(\010\022I\n\017speech_contexts\030\004 \003(\01320.google.cl"
+ + "oud.videointelligence.v1.SpeechContext\022$"
+ + "\n\034enable_automatic_punctuation\030\005 \001(\010\022\024\n\014"
+ + "audio_tracks\030\006 \003(\005\022\"\n\032enable_speaker_dia"
+ + "rization\030\007 \001(\010\022!\n\031diarization_speaker_co"
+ + "unt\030\010 \001(\005\022\036\n\026enable_word_confidence\030\t \001("
+ + "\010\" \n\rSpeechContext\022\017\n\007phrases\030\001 \003(\t\"\203\001\n\023"
+ + "SpeechTranscription\022U\n\014alternatives\030\001 \003("
+ + "\0132?.google.cloud.videointelligence.v1.Sp"
+ + "eechRecognitionAlternative\022\025\n\rlanguage_c"
+ + "ode\030\002 \001(\t\"\202\001\n\034SpeechRecognitionAlternati"
+ + "ve\022\022\n\ntranscript\030\001 \001(\t\022\022\n\nconfidence\030\002 \001"
+ + "(\002\022:\n\005words\030\003 \003(\0132+.google.cloud.videoin"
+ + "telligence.v1.WordInfo\"\235\001\n\010WordInfo\022-\n\ns"
+ + "tart_time\030\001 \001(\0132\031.google.protobuf.Durati"
+ + "on\022+\n\010end_time\030\002 \001(\0132\031.google.protobuf.D"
+ + "uration\022\014\n\004word\030\003 \001(\t\022\022\n\nconfidence\030\004 \001("
+ + "\002\022\023\n\013speaker_tag\030\005 \001(\005\"(\n\020NormalizedVert"
+ + "ex\022\t\n\001x\030\001 \001(\002\022\t\n\001y\030\002 \001(\002\"_\n\026NormalizedBo"
+ + "undingPoly\022E\n\010vertices\030\001 \003(\01323.google.cl"
+ + "oud.videointelligence.v1.NormalizedVerte"
+ + "x\"\241\001\n\013TextSegment\022@\n\007segment\030\001 \001(\0132/.goo"
+ + "gle.cloud.videointelligence.v1.VideoSegm"
+ + "ent\022\022\n\nconfidence\030\002 \001(\002\022<\n\006frames\030\003 \003(\0132"
+ + ",.google.cloud.videointelligence.v1.Text"
+ + "Frame\"\224\001\n\tTextFrame\022W\n\024rotated_bounding_"
+ + "box\030\001 \001(\01329.google.cloud.videointelligen"
+ + "ce.v1.NormalizedBoundingPoly\022.\n\013time_off"
+ + "set\030\002 \001(\0132\031.google.protobuf.Duration\"`\n\016"
+ + "TextAnnotation\022\014\n\004text\030\001 \001(\t\022@\n\010segments"
+ + "\030\002 \003(\0132..google.cloud.videointelligence."
+ + "v1.TextSegment\"\240\001\n\023ObjectTrackingFrame\022Y"
+ + "\n\027normalized_bounding_box\030\001 \001(\01328.google"
+ + ".cloud.videointelligence.v1.NormalizedBo"
+ + "undingBox\022.\n\013time_offset\030\002 \001(\0132\031.google."
+ + "protobuf.Duration\"\227\002\n\030ObjectTrackingAnno"
+ + "tation\022B\n\007segment\030\003 \001(\0132/.google.cloud.v"
+ + "ideointelligence.v1.VideoSegmentH\000\022\022\n\010tr"
+ + "ack_id\030\005 \001(\003H\000\0229\n\006entity\030\001 \001(\0132).google."
+ + "cloud.videointelligence.v1.Entity\022\022\n\ncon"
+ + "fidence\030\004 \001(\002\022F\n\006frames\030\002 \003(\01326.google.c"
+ + "loud.videointelligence.v1.ObjectTracking"
+ + "FrameB\014\n\ntrack_info*\311\001\n\007Feature\022\027\n\023FEATU"
+ + "RE_UNSPECIFIED\020\000\022\023\n\017LABEL_DETECTION\020\001\022\031\n"
+ + "\025SHOT_CHANGE_DETECTION\020\002\022\036\n\032EXPLICIT_CON"
+ + "TENT_DETECTION\020\003\022\022\n\016FACE_DETECTION\020\004\022\030\n\024"
+ + "SPEECH_TRANSCRIPTION\020\006\022\022\n\016TEXT_DETECTION"
+ + "\020\007\022\023\n\017OBJECT_TRACKING\020\t*r\n\022LabelDetectio"
+ + "nMode\022$\n LABEL_DETECTION_MODE_UNSPECIFIE"
+ + "D\020\000\022\r\n\tSHOT_MODE\020\001\022\016\n\nFRAME_MODE\020\002\022\027\n\023SH"
+ + "OT_AND_FRAME_MODE\020\003*t\n\nLikelihood\022\032\n\026LIK"
+ + "ELIHOOD_UNSPECIFIED\020\000\022\021\n\rVERY_UNLIKELY\020\001"
+ + "\022\014\n\010UNLIKELY\020\002\022\014\n\010POSSIBLE\020\003\022\n\n\006LIKELY\020\004"
+ + "\022\017\n\013VERY_LIKELY\020\0052\244\001\n\030VideoIntelligenceS"
+ + "ervice\022\207\001\n\rAnnotateVideo\0227.google.cloud."
+ + "videointelligence.v1.AnnotateVideoReques"
+ + "t\032\035.google.longrunning.Operation\"\036\202\323\344\223\002\030"
+ + "\"\023/v1/videos:annotate:\001*B\344\001\n%com.google."
+ + "cloud.videointelligence.v1B\035VideoIntelli"
+ + "genceServiceProtoP\001ZRgoogle.golang.org/g"
+ + "enproto/googleapis/cloud/videointelligen"
+ + "ce/v1;videointelligence\252\002!Google.Cloud.V"
+ + "ideoIntelligence.V1\312\002!Google\\Cloud\\Video"
+ + "Intelligence\\V1b\006proto3"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -301,6 +366,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"ExplicitContentDetectionConfig",
"FaceDetectionConfig",
"SpeechTranscriptionConfig",
+ "TextDetectionConfig",
});
internal_static_google_cloud_videointelligence_v1_LabelDetectionConfig_descriptor =
getDescriptor().getMessageTypes().get(2);
@@ -334,8 +400,16 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
new java.lang.String[] {
"Model", "IncludeBoundingBoxes",
});
- internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor =
+ internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor =
getDescriptor().getMessageTypes().get(6);
+ internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_TextDetectionConfig_descriptor,
+ new java.lang.String[] {
+ "LanguageHints",
+ });
+ internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor =
+ getDescriptor().getMessageTypes().get(7);
internal_static_google_cloud_videointelligence_v1_VideoSegment_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_VideoSegment_descriptor,
@@ -343,7 +417,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"StartTimeOffset", "EndTimeOffset",
});
internal_static_google_cloud_videointelligence_v1_LabelSegment_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_google_cloud_videointelligence_v1_LabelSegment_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_LabelSegment_descriptor,
@@ -351,7 +425,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Segment", "Confidence",
});
internal_static_google_cloud_videointelligence_v1_LabelFrame_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_google_cloud_videointelligence_v1_LabelFrame_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_LabelFrame_descriptor,
@@ -359,7 +433,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"TimeOffset", "Confidence",
});
internal_static_google_cloud_videointelligence_v1_Entity_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_google_cloud_videointelligence_v1_Entity_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_Entity_descriptor,
@@ -367,7 +441,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"EntityId", "Description", "LanguageCode",
});
internal_static_google_cloud_videointelligence_v1_LabelAnnotation_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_google_cloud_videointelligence_v1_LabelAnnotation_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_LabelAnnotation_descriptor,
@@ -375,7 +449,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Entity", "CategoryEntities", "Segments", "Frames",
});
internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_ExplicitContentFrame_descriptor,
@@ -383,7 +457,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"TimeOffset", "PornographyLikelihood",
});
internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_ExplicitContentAnnotation_descriptor,
@@ -391,7 +465,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Frames",
});
internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_NormalizedBoundingBox_descriptor,
@@ -399,7 +473,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Left", "Top", "Right", "Bottom",
});
internal_static_google_cloud_videointelligence_v1_FaceSegment_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_google_cloud_videointelligence_v1_FaceSegment_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_FaceSegment_descriptor,
@@ -407,7 +481,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Segment",
});
internal_static_google_cloud_videointelligence_v1_FaceFrame_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_google_cloud_videointelligence_v1_FaceFrame_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_FaceFrame_descriptor,
@@ -415,7 +489,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"NormalizedBoundingBoxes", "TimeOffset",
});
internal_static_google_cloud_videointelligence_v1_FaceAnnotation_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(17);
internal_static_google_cloud_videointelligence_v1_FaceAnnotation_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_FaceAnnotation_descriptor,
@@ -423,7 +497,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Thumbnail", "Segments", "Frames",
});
internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(18);
internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_VideoAnnotationResults_descriptor,
@@ -436,10 +510,12 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"ShotAnnotations",
"ExplicitAnnotation",
"SpeechTranscriptions",
+ "TextAnnotations",
+ "ObjectAnnotations",
"Error",
});
internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(19);
internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_AnnotateVideoResponse_descriptor,
@@ -447,7 +523,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"AnnotationResults",
});
internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(20);
internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_VideoAnnotationProgress_descriptor,
@@ -455,7 +531,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"InputUri", "ProgressPercent", "StartTime", "UpdateTime",
});
internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(21);
internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_AnnotateVideoProgress_descriptor,
@@ -463,7 +539,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"AnnotationProgress",
});
internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(22);
internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_SpeechTranscriptionConfig_descriptor,
@@ -479,7 +555,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"EnableWordConfidence",
});
internal_static_google_cloud_videointelligence_v1_SpeechContext_descriptor =
- getDescriptor().getMessageTypes().get(22);
+ getDescriptor().getMessageTypes().get(23);
internal_static_google_cloud_videointelligence_v1_SpeechContext_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_SpeechContext_descriptor,
@@ -487,7 +563,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Phrases",
});
internal_static_google_cloud_videointelligence_v1_SpeechTranscription_descriptor =
- getDescriptor().getMessageTypes().get(23);
+ getDescriptor().getMessageTypes().get(24);
internal_static_google_cloud_videointelligence_v1_SpeechTranscription_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_SpeechTranscription_descriptor,
@@ -495,7 +571,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Alternatives", "LanguageCode",
});
internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_descriptor =
- getDescriptor().getMessageTypes().get(24);
+ getDescriptor().getMessageTypes().get(25);
internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_SpeechRecognitionAlternative_descriptor,
@@ -503,13 +579,69 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Transcript", "Confidence", "Words",
});
internal_static_google_cloud_videointelligence_v1_WordInfo_descriptor =
- getDescriptor().getMessageTypes().get(25);
+ getDescriptor().getMessageTypes().get(26);
internal_static_google_cloud_videointelligence_v1_WordInfo_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_videointelligence_v1_WordInfo_descriptor,
new java.lang.String[] {
"StartTime", "EndTime", "Word", "Confidence", "SpeakerTag",
});
+ internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor =
+ getDescriptor().getMessageTypes().get(27);
+ internal_static_google_cloud_videointelligence_v1_NormalizedVertex_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_NormalizedVertex_descriptor,
+ new java.lang.String[] {
+ "X", "Y",
+ });
+ internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor =
+ getDescriptor().getMessageTypes().get(28);
+ internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_NormalizedBoundingPoly_descriptor,
+ new java.lang.String[] {
+ "Vertices",
+ });
+ internal_static_google_cloud_videointelligence_v1_TextSegment_descriptor =
+ getDescriptor().getMessageTypes().get(29);
+ internal_static_google_cloud_videointelligence_v1_TextSegment_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_TextSegment_descriptor,
+ new java.lang.String[] {
+ "Segment", "Confidence", "Frames",
+ });
+ internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor =
+ getDescriptor().getMessageTypes().get(30);
+ internal_static_google_cloud_videointelligence_v1_TextFrame_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_TextFrame_descriptor,
+ new java.lang.String[] {
+ "RotatedBoundingBox", "TimeOffset",
+ });
+ internal_static_google_cloud_videointelligence_v1_TextAnnotation_descriptor =
+ getDescriptor().getMessageTypes().get(31);
+ internal_static_google_cloud_videointelligence_v1_TextAnnotation_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_TextAnnotation_descriptor,
+ new java.lang.String[] {
+ "Text", "Segments",
+ });
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor =
+ getDescriptor().getMessageTypes().get(32);
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingFrame_descriptor,
+ new java.lang.String[] {
+ "NormalizedBoundingBox", "TimeOffset",
+ });
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor =
+ getDescriptor().getMessageTypes().get(33);
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_videointelligence_v1_ObjectTrackingAnnotation_descriptor,
+ new java.lang.String[] {
+ "Segment", "TrackId", "Entity", "Confidence", "Frames", "TrackInfo",
+ });
com.google.protobuf.ExtensionRegistry registry =
com.google.protobuf.ExtensionRegistry.newInstance();
registry.add(com.google.api.AnnotationsProto.http);
diff --git a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto
index 223e866e53a9..048750865109 100644
--- a/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto
+++ b/google-api-grpc/proto-google-cloud-video-intelligence-v1/src/main/proto/google/cloud/videointelligence/v1/video_intelligence.proto
@@ -1,4 +1,4 @@
-// Copyright 2017 Google Inc.
+// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
syntax = "proto3";
@@ -101,6 +102,9 @@ message VideoContext {
// Config for SPEECH_TRANSCRIPTION.
SpeechTranscriptionConfig speech_transcription_config = 6;
+
+ // Config for TEXT_DETECTION.
+ TextDetectionConfig text_detection_config = 8;
}
// Config for LABEL_DETECTION.
@@ -148,6 +152,16 @@ message FaceDetectionConfig {
bool include_bounding_boxes = 2;
}
+// Config for TEXT_DETECTION.
+message TextDetectionConfig {
+ // Language hint can be specified if the language to be detected is known a
+ // priori. It can increase the accuracy of the detection. Language hint must
+ // be language code in BCP-47 format.
+ //
+ // Automatic language detection is performed if no hint is provided.
+ repeated string language_hints = 1;
+}
+
// Video segment.
message VideoSegment {
// Time-offset, relative to the beginning of the video,
@@ -305,6 +319,14 @@ message VideoAnnotationResults {
// Speech transcription.
repeated SpeechTranscription speech_transcriptions = 11;
+ // OCR text detection and tracking.
+ // Annotations for list of detected text snippets. Each will have list of
+ // frame information associated with it.
+ repeated TextAnnotation text_annotations = 12;
+
+ // Annotations for list of objects detected and tracked in video.
+ repeated ObjectTrackingAnnotation object_annotations = 14;
+
// If set, indicates an error. Note that for a single `AnnotateVideoRequest`
// some videos may succeed and some may fail.
google.rpc.Status error = 9;
@@ -479,6 +501,115 @@ message WordInfo {
int32 speaker_tag = 5;
}
+// A vertex represents a 2D point in the image.
+// NOTE: the normalized vertex coordinates are relative to the original image
+// and range from 0 to 1.
+message NormalizedVertex {
+ // X coordinate.
+ float x = 1;
+
+ // Y coordinate.
+ float y = 2;
+}
+
+// Normalized bounding polygon for text (that might not be aligned with axis).
+// Contains list of the corner points in clockwise order starting from
+// top-left corner. For example, for a rectangular bounding box:
+// When the text is horizontal it might look like:
+// 0----1
+// | |
+// 3----2
+//
+// When it's clockwise rotated 180 degrees around the top-left corner it
+// becomes:
+// 2----3
+// | |
+// 1----0
+//
+// and the vertex order will still be (0, 1, 2, 3). Note that values can be less
+// than 0, or greater than 1 due to trignometric calculations for location of
+// the box.
+message NormalizedBoundingPoly {
+ // Normalized vertices of the bounding polygon.
+ repeated NormalizedVertex vertices = 1;
+}
+
+// Video segment level annotation results for text detection.
+message TextSegment {
+ // Video segment where a text snippet was detected.
+ VideoSegment segment = 1;
+
+ // Confidence for the track of detected text. It is calculated as the highest
+ // over all frames where OCR detected text appears.
+ float confidence = 2;
+
+ // Information related to the frames where OCR detected text appears.
+ repeated TextFrame frames = 3;
+}
+
+// Video frame level annotation results for text annotation (OCR).
+// Contains information regarding timestamp and bounding box locations for the
+// frames containing detected OCR text snippets.
+message TextFrame {
+ // Bounding polygon of the detected text for this frame.
+ NormalizedBoundingPoly rotated_bounding_box = 1;
+
+ // Timestamp of this frame.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Annotations related to one detected OCR text snippet. This will contain the
+// corresponding text, confidence value, and frame level information for each
+// detection.
+message TextAnnotation {
+ // The detected text.
+ string text = 1;
+
+ // All video segments where OCR detected text appears.
+ repeated TextSegment segments = 2;
+}
+
+// Video frame level annotations for object detection and tracking. This field
+// stores per frame location, time offset, and confidence.
+message ObjectTrackingFrame {
+ // The normalized bounding box location of this object track for the frame.
+ NormalizedBoundingBox normalized_bounding_box = 1;
+
+ // The timestamp of the frame in microseconds.
+ google.protobuf.Duration time_offset = 2;
+}
+
+// Annotations corresponding to one tracked object.
+message ObjectTrackingAnnotation {
+ // Different representation of tracking info in non-streaming batch
+ // and streaming modes.
+ oneof track_info {
+ // Non-streaming batch mode ONLY.
+ // Each object track corresponds to one video segment where it appears.
+ VideoSegment segment = 3;
+
+ // Streaming mode ONLY.
+ // In streaming mode, we do not know the end time of a tracked object
+ // before it is completed. Hence, there is no VideoSegment info returned.
+ // Instead, we provide a unique identifiable integer track_id so that
+ // the customers can correlate the results of the ongoing
+ // ObjectTrackAnnotation of the same track_id over time.
+ int64 track_id = 5;
+ }
+
+ // Entity to specify the object category that this track is labeled as.
+ Entity entity = 1;
+
+ // Object category's labeling confidence of this track.
+ float confidence = 4;
+
+ // Information corresponding to all frames where this object track appears.
+ // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
+ // messages in frames.
+ // Streaming mode: it can only be one ObjectTrackingFrame message in frames.
+ repeated ObjectTrackingFrame frames = 2;
+}
+
// Video annotation feature.
enum Feature {
// Unspecified.
@@ -498,6 +629,12 @@ enum Feature {
// Speech transcription.
SPEECH_TRANSCRIPTION = 6;
+
+ // OCR text detection and tracking.
+ TEXT_DETECTION = 7;
+
+ // Object detection and tracking.
+ OBJECT_TRACKING = 9;
}
// Label detection mode.
diff --git a/google-cloud-clients/google-cloud-video-intelligence/synth.metadata b/google-cloud-clients/google-cloud-video-intelligence/synth.metadata
index f89f53c27529..c774c0cb15cf 100644
--- a/google-cloud-clients/google-cloud-video-intelligence/synth.metadata
+++ b/google-cloud-clients/google-cloud-video-intelligence/synth.metadata
@@ -1,19 +1,19 @@
{
- "updateTime": "2019-02-02T08:50:14.214660Z",
+ "updateTime": "2019-02-21T08:55:19.182601Z",
"sources": [
{
"generator": {
"name": "artman",
- "version": "0.16.8",
- "dockerImage": "googleapis/artman@sha256:75bc07ef34a1de9895c18af54dc503ed3b3f3b52e85062e3360a979d2a0741e7"
+ "version": "0.16.14",
+ "dockerImage": "googleapis/artman@sha256:f3d61ae45abaeefb6be5f228cda22732c2f1b00fb687c79c4bd4f2c42bb1e1a7"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "bce093dab3e65c40eb9a37efbdc960f34df6037a",
- "internalRef": "231974277"
+ "sha": "9cf63704bd272a40b79dde5a2b33f61104ee4f7f",
+ "internalRef": "234935970"
}
}
],