diff --git a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java
index 92966cd6eab6..4ada66a5d44e 100644
--- a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java
+++ b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfig.java
@@ -168,6 +168,23 @@ private RecognitionConfig(
mutable_bitField0_ |= 0x00000020;
}
alternativeLanguageCodes_.add(s);
+ break;
+ }
+ case 154:
+ {
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder subBuilder = null;
+ if (diarizationConfig_ != null) {
+ subBuilder = diarizationConfig_.toBuilder();
+ }
+ diarizationConfig_ =
+ input.readMessage(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(diarizationConfig_);
+ diarizationConfig_ = subBuilder.buildPartial();
+ }
+
break;
}
default:
@@ -981,16 +998,12 @@ public boolean getEnableAutomaticPunctuation() {
* *Optional* If 'true', enables speaker detection for each recognized word in
* the top alternative of the recognition result using a speaker_tag provided
* in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
+ * Note: Use diarization_config instead.
*
*
- * bool enable_speaker_diarization = 16;
+ * bool enable_speaker_diarization = 16 [deprecated = true];
*/
+ @java.lang.Deprecated
public boolean getEnableSpeakerDiarization() {
return enableSpeakerDiarization_;
}
@@ -1003,16 +1016,80 @@ public boolean getEnableSpeakerDiarization() {
*
* *Optional*
* If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true."
+ * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ * Note: Use diarization_config instead.
*
*
- * int32 diarization_speaker_count = 17;
+ * int32 diarization_speaker_count = 17 [deprecated = true];
*/
+ @java.lang.Deprecated
public int getDiarizationSpeakerCount() {
return diarizationSpeakerCount_;
}
+ public static final int DIARIZATION_CONFIG_FIELD_NUMBER = 19;
+ private com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarizationConfig_;
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public boolean hasDiarizationConfig() {
+ return diarizationConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarizationConfig() {
+ return diarizationConfig_ == null
+ ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
+ : diarizationConfig_;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder
+ getDiarizationConfigOrBuilder() {
+ return getDiarizationConfig();
+ }
+
public static final int METADATA_FIELD_NUMBER = 9;
private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_;
/**
@@ -1248,6 +1325,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
com.google.protobuf.GeneratedMessageV3.writeString(
output, 18, alternativeLanguageCodes_.getRaw(i));
}
+ if (diarizationConfig_ != null) {
+ output.writeMessage(19, getDiarizationConfig());
+ }
unknownFields.writeTo(output);
}
@@ -1318,6 +1398,9 @@ public int getSerializedSize() {
size += dataSize;
size += 2 * getAlternativeLanguageCodesList().size();
}
+ if (diarizationConfig_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(19, getDiarizationConfig());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -1350,6 +1433,10 @@ public boolean equals(final java.lang.Object obj) {
if (getEnableAutomaticPunctuation() != other.getEnableAutomaticPunctuation()) return false;
if (getEnableSpeakerDiarization() != other.getEnableSpeakerDiarization()) return false;
if (getDiarizationSpeakerCount() != other.getDiarizationSpeakerCount()) return false;
+ if (hasDiarizationConfig() != other.hasDiarizationConfig()) return false;
+ if (hasDiarizationConfig()) {
+ if (!getDiarizationConfig().equals(other.getDiarizationConfig())) return false;
+ }
if (hasMetadata() != other.hasMetadata()) return false;
if (hasMetadata()) {
if (!getMetadata().equals(other.getMetadata())) return false;
@@ -1401,6 +1488,10 @@ public int hashCode() {
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpeakerDiarization());
hash = (37 * hash) + DIARIZATION_SPEAKER_COUNT_FIELD_NUMBER;
hash = (53 * hash) + getDiarizationSpeakerCount();
+ if (hasDiarizationConfig()) {
+ hash = (37 * hash) + DIARIZATION_CONFIG_FIELD_NUMBER;
+ hash = (53 * hash) + getDiarizationConfig().hashCode();
+ }
if (hasMetadata()) {
hash = (37 * hash) + METADATA_FIELD_NUMBER;
hash = (53 * hash) + getMetadata().hashCode();
@@ -1589,6 +1680,12 @@ public Builder clear() {
diarizationSpeakerCount_ = 0;
+ if (diarizationConfigBuilder_ == null) {
+ diarizationConfig_ = null;
+ } else {
+ diarizationConfig_ = null;
+ diarizationConfigBuilder_ = null;
+ }
if (metadataBuilder_ == null) {
metadata_ = null;
} else {
@@ -1654,6 +1751,11 @@ public com.google.cloud.speech.v1p1beta1.RecognitionConfig buildPartial() {
result.enableAutomaticPunctuation_ = enableAutomaticPunctuation_;
result.enableSpeakerDiarization_ = enableSpeakerDiarization_;
result.diarizationSpeakerCount_ = diarizationSpeakerCount_;
+ if (diarizationConfigBuilder_ == null) {
+ result.diarizationConfig_ = diarizationConfig_;
+ } else {
+ result.diarizationConfig_ = diarizationConfigBuilder_.build();
+ }
if (metadataBuilder_ == null) {
result.metadata_ = metadata_;
} else {
@@ -1786,6 +1888,9 @@ public Builder mergeFrom(com.google.cloud.speech.v1p1beta1.RecognitionConfig oth
if (other.getDiarizationSpeakerCount() != 0) {
setDiarizationSpeakerCount(other.getDiarizationSpeakerCount());
}
+ if (other.hasDiarizationConfig()) {
+ mergeDiarizationConfig(other.getDiarizationConfig());
+ }
if (other.hasMetadata()) {
mergeMetadata(other.getMetadata());
}
@@ -3170,16 +3275,12 @@ public Builder clearEnableAutomaticPunctuation() {
* *Optional* If 'true', enables speaker detection for each recognized word in
* the top alternative of the recognition result using a speaker_tag provided
* in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
+ * Note: Use diarization_config instead.
*
*
- * bool enable_speaker_diarization = 16;
+ * bool enable_speaker_diarization = 16 [deprecated = true];
*/
+ @java.lang.Deprecated
public boolean getEnableSpeakerDiarization() {
return enableSpeakerDiarization_;
}
@@ -3190,16 +3291,12 @@ public boolean getEnableSpeakerDiarization() {
* *Optional* If 'true', enables speaker detection for each recognized word in
* the top alternative of the recognition result using a speaker_tag provided
* in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
+ * Note: Use diarization_config instead.
*
*
- * bool enable_speaker_diarization = 16;
+ * bool enable_speaker_diarization = 16 [deprecated = true];
*/
+ @java.lang.Deprecated
public Builder setEnableSpeakerDiarization(boolean value) {
enableSpeakerDiarization_ = value;
@@ -3213,16 +3310,12 @@ public Builder setEnableSpeakerDiarization(boolean value) {
* *Optional* If 'true', enables speaker detection for each recognized word in
* the top alternative of the recognition result using a speaker_tag provided
* in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
+ * Note: Use diarization_config instead.
*
*
- * bool enable_speaker_diarization = 16;
+ * bool enable_speaker_diarization = 16 [deprecated = true];
*/
+ @java.lang.Deprecated
public Builder clearEnableSpeakerDiarization() {
enableSpeakerDiarization_ = false;
@@ -3237,12 +3330,13 @@ public Builder clearEnableSpeakerDiarization() {
*
* *Optional*
* If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true."
+ * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ * Note: Use diarization_config instead.
*
*
- * int32 diarization_speaker_count = 17;
+ * int32 diarization_speaker_count = 17 [deprecated = true];
*/
+ @java.lang.Deprecated
public int getDiarizationSpeakerCount() {
return diarizationSpeakerCount_;
}
@@ -3252,12 +3346,13 @@ public int getDiarizationSpeakerCount() {
*
* *Optional*
* If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true."
+ * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ * Note: Use diarization_config instead.
*
*
- * int32 diarization_speaker_count = 17;
+ * int32 diarization_speaker_count = 17 [deprecated = true];
*/
+ @java.lang.Deprecated
public Builder setDiarizationSpeakerCount(int value) {
diarizationSpeakerCount_ = value;
@@ -3270,12 +3365,13 @@ public Builder setDiarizationSpeakerCount(int value) {
*
* *Optional*
* If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true."
+ * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ * Note: Use diarization_config instead.
*
*
- * int32 diarization_speaker_count = 17;
+ * int32 diarization_speaker_count = 17 [deprecated = true];
*/
+ @java.lang.Deprecated
public Builder clearDiarizationSpeakerCount() {
diarizationSpeakerCount_ = 0;
@@ -3283,6 +3379,256 @@ public Builder clearDiarizationSpeakerCount() {
return this;
}
+ private com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarizationConfig_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>
+ diarizationConfigBuilder_;
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public boolean hasDiarizationConfig() {
+ return diarizationConfigBuilder_ != null || diarizationConfig_ != null;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarizationConfig() {
+ if (diarizationConfigBuilder_ == null) {
+ return diarizationConfig_ == null
+ ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
+ : diarizationConfig_;
+ } else {
+ return diarizationConfigBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public Builder setDiarizationConfig(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig value) {
+ if (diarizationConfigBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ diarizationConfig_ = value;
+ onChanged();
+ } else {
+ diarizationConfigBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public Builder setDiarizationConfig(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder builderForValue) {
+ if (diarizationConfigBuilder_ == null) {
+ diarizationConfig_ = builderForValue.build();
+ onChanged();
+ } else {
+ diarizationConfigBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public Builder mergeDiarizationConfig(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig value) {
+ if (diarizationConfigBuilder_ == null) {
+ if (diarizationConfig_ != null) {
+ diarizationConfig_ =
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.newBuilder(
+ diarizationConfig_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ diarizationConfig_ = value;
+ }
+ onChanged();
+ } else {
+ diarizationConfigBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public Builder clearDiarizationConfig() {
+ if (diarizationConfigBuilder_ == null) {
+ diarizationConfig_ = null;
+ onChanged();
+ } else {
+ diarizationConfig_ = null;
+ diarizationConfigBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder
+ getDiarizationConfigBuilder() {
+
+ onChanged();
+ return getDiarizationConfigFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ public com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder
+ getDiarizationConfigOrBuilder() {
+ if (diarizationConfigBuilder_ != null) {
+ return diarizationConfigBuilder_.getMessageOrBuilder();
+ } else {
+ return diarizationConfig_ == null
+ ? com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.getDefaultInstance()
+ : diarizationConfig_;
+ }
+ }
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>
+ getDiarizationConfigFieldBuilder() {
+ if (diarizationConfigBuilder_ == null) {
+ diarizationConfigBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder>(
+ getDiarizationConfig(), getParentForChildren(), isClean());
+ diarizationConfig_ = null;
+ }
+ return diarizationConfigBuilder_;
+ }
+
private com.google.cloud.speech.v1p1beta1.RecognitionMetadata metadata_;
private com.google.protobuf.SingleFieldBuilderV3<
com.google.cloud.speech.v1p1beta1.RecognitionMetadata,
diff --git a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
index 1b71d1c6a738..d529b0a938aa 100644
--- a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/RecognitionConfigOrBuilder.java
@@ -350,16 +350,12 @@ public interface RecognitionConfigOrBuilder
* *Optional* If 'true', enables speaker detection for each recognized word in
* the top alternative of the recognition result using a speaker_tag provided
* in the WordInfo.
- * Note: When this is true, we send all the words from the beginning of the
- * audio for the top alternative in every consecutive STREAMING responses.
- * This is done in order to improve our speaker tags as our models learn to
- * identify the speakers in the conversation over time.
- * For non-streaming requests, the diarization results will be provided only
- * in the top alternative of the FINAL SpeechRecognitionResult.
+ * Note: Use diarization_config instead.
*
*
- * bool enable_speaker_diarization = 16;
+ * bool enable_speaker_diarization = 16 [deprecated = true];
*/
+ @java.lang.Deprecated
boolean getEnableSpeakerDiarization();
/**
@@ -368,14 +364,68 @@ public interface RecognitionConfigOrBuilder
*
* *Optional*
* If set, specifies the estimated number of speakers in the conversation.
- * If not set, defaults to '2'.
- * Ignored unless enable_speaker_diarization is set to true."
+ * Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ * Note: Use diarization_config instead.
*
*
- * int32 diarization_speaker_count = 17;
+ * int32 diarization_speaker_count = 17 [deprecated = true];
*/
+ @java.lang.Deprecated
int getDiarizationSpeakerCount();
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ boolean hasDiarizationConfig();
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDiarizationConfig();
+ /**
+ *
+ *
+ * + * *Optional* Config to enable speaker diarization and set additional + * parameters to make diarization better suited for your application. + * Note: When this is enabled, we send all the words from the beginning of the + * audio for the top alternative in every consecutive STREAMING responses. + * This is done in order to improve our speaker tags as our models learn to + * identify the speakers in the conversation over time. + * For non-streaming requests, the diarization results will be provided only + * in the top alternative of the FINAL SpeechRecognitionResult. + *+ * + *
.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig diarization_config = 19;
+ */
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfigOrBuilder
+ getDiarizationConfigOrBuilder();
+
/**
*
*
diff --git a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java
new file mode 100644
index 000000000000..deba8f7d2ce5
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeakerDiarizationConfig.java
@@ -0,0 +1,690 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/speech/v1p1beta1/cloud_speech.proto
+
+package com.google.cloud.speech.v1p1beta1;
+
+/** Protobuf type {@code google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig} */
+public final class SpeakerDiarizationConfig extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig)
+ SpeakerDiarizationConfigOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use SpeakerDiarizationConfig.newBuilder() to construct.
+ private SpeakerDiarizationConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private SpeakerDiarizationConfig() {}
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private SpeakerDiarizationConfig(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 8:
+ {
+ enableSpeakerDiarization_ = input.readBool();
+ break;
+ }
+ case 16:
+ {
+ minSpeakerCount_ = input.readInt32();
+ break;
+ }
+ case 24:
+ {
+ maxSpeakerCount_ = input.readInt32();
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.speech.v1p1beta1.SpeechProto
+ .internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.speech.v1p1beta1.SpeechProto
+ .internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.class,
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig.Builder.class);
+ }
+
+ public static final int ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER = 1;
+ private boolean enableSpeakerDiarization_;
+ /**
+ *
+ *
+ * + * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + *+ * + *
bool enable_speaker_diarization = 1;
+ */
+ public boolean getEnableSpeakerDiarization() {
+ return enableSpeakerDiarization_;
+ }
+
+ public static final int MIN_SPEAKER_COUNT_FIELD_NUMBER = 2;
+ private int minSpeakerCount_;
+ /**
+ *
+ *
+ * + * *Optional* + * Minimum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 2. + *+ * + *
int32 min_speaker_count = 2;
+ */
+ public int getMinSpeakerCount() {
+ return minSpeakerCount_;
+ }
+
+ public static final int MAX_SPEAKER_COUNT_FIELD_NUMBER = 3;
+ private int maxSpeakerCount_;
+ /**
+ *
+ *
+ * + * *Optional* + * Maximum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 6. + *+ * + *
int32 max_speaker_count = 3;
+ */
+ public int getMaxSpeakerCount() {
+ return maxSpeakerCount_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (enableSpeakerDiarization_ != false) {
+ output.writeBool(1, enableSpeakerDiarization_);
+ }
+ if (minSpeakerCount_ != 0) {
+ output.writeInt32(2, minSpeakerCount_);
+ }
+ if (maxSpeakerCount_ != 0) {
+ output.writeInt32(3, maxSpeakerCount_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (enableSpeakerDiarization_ != false) {
+ size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enableSpeakerDiarization_);
+ }
+ if (minSpeakerCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(2, minSpeakerCount_);
+ }
+ if (maxSpeakerCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxSpeakerCount_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig other =
+ (com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig) obj;
+
+ if (getEnableSpeakerDiarization() != other.getEnableSpeakerDiarization()) return false;
+ if (getMinSpeakerCount() != other.getMinSpeakerCount()) return false;
+ if (getMaxSpeakerCount() != other.getMaxSpeakerCount()) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + ENABLE_SPEAKER_DIARIZATION_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableSpeakerDiarization());
+ hash = (37 * hash) + MIN_SPEAKER_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getMinSpeakerCount();
+ hash = (37 * hash) + MAX_SPEAKER_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getMaxSpeakerCount();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /** Protobuf type {@code google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig} */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder+ * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + *+ * + *
bool enable_speaker_diarization = 1;
+ */
+ public boolean getEnableSpeakerDiarization() {
+ return enableSpeakerDiarization_;
+ }
+ /**
+ *
+ *
+ * + * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + *+ * + *
bool enable_speaker_diarization = 1;
+ */
+ public Builder setEnableSpeakerDiarization(boolean value) {
+
+ enableSpeakerDiarization_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + *+ * + *
bool enable_speaker_diarization = 1;
+ */
+ public Builder clearEnableSpeakerDiarization() {
+
+ enableSpeakerDiarization_ = false;
+ onChanged();
+ return this;
+ }
+
+ private int minSpeakerCount_;
+ /**
+ *
+ *
+ * + * *Optional* + * Minimum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 2. + *+ * + *
int32 min_speaker_count = 2;
+ */
+ public int getMinSpeakerCount() {
+ return minSpeakerCount_;
+ }
+ /**
+ *
+ *
+ * + * *Optional* + * Minimum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 2. + *+ * + *
int32 min_speaker_count = 2;
+ */
+ public Builder setMinSpeakerCount(int value) {
+
+ minSpeakerCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* + * Minimum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 2. + *+ * + *
int32 min_speaker_count = 2;
+ */
+ public Builder clearMinSpeakerCount() {
+
+ minSpeakerCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ private int maxSpeakerCount_;
+ /**
+ *
+ *
+ * + * *Optional* + * Maximum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 6. + *+ * + *
int32 max_speaker_count = 3;
+ */
+ public int getMaxSpeakerCount() {
+ return maxSpeakerCount_;
+ }
+ /**
+ *
+ *
+ * + * *Optional* + * Maximum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 6. + *+ * + *
int32 max_speaker_count = 3;
+ */
+ public Builder setMaxSpeakerCount(int value) {
+
+ maxSpeakerCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * *Optional* + * Maximum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 6. + *+ * + *
int32 max_speaker_count = 3;
+ */
+ public Builder clearMaxSpeakerCount() {
+
+ maxSpeakerCount_ = 0;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig)
+ private static final com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig();
+ }
+
+ public static com.google.cloud.speech.v1p1beta1.SpeakerDiarizationConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * *Optional* If 'true', enables speaker detection for each recognized word in + * the top alternative of the recognition result using a speaker_tag provided + * in the WordInfo. + *+ * + *
bool enable_speaker_diarization = 1;
+ */
+ boolean getEnableSpeakerDiarization();
+
+ /**
+ *
+ *
+ * + * *Optional* + * Minimum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 2. + *+ * + *
int32 min_speaker_count = 2;
+ */
+ int getMinSpeakerCount();
+
+ /**
+ *
+ *
+ * + * *Optional* + * Maximum number of speakers in the conversation. This range gives you more + * flexibility by allowing the system to automatically determine the correct + * number of speakers. If not set, the default value is 6. + *+ * + *
int32 max_speaker_count = 3;
+ */
+ int getMaxSpeakerCount();
+}
diff --git a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
index 4819f4227159..e61f6c9809da 100644
--- a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
+++ b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechProto.java
@@ -32,6 +32,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_speech_v1p1beta1_RecognitionConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_speech_v1p1beta1_RecognitionMetadata_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -107,8 +111,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "ng_request\"\221\001\n\032StreamingRecognitionConfi"
+ "g\022@\n\006config\030\001 \001(\01320.google.cloud.speech."
+ "v1p1beta1.RecognitionConfig\022\030\n\020single_ut"
- + "terance\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(\010\"\265"
- + "\006\n\021RecognitionConfig\022P\n\010encoding\030\001 \001(\0162>"
+ + "terance\030\002 \001(\010\022\027\n\017interim_results\030\003 \001(\010\"\222"
+ + "\007\n\021RecognitionConfig\022P\n\010encoding\030\001 \001(\0162>"
+ ".google.cloud.speech.v1p1beta1.Recogniti"
+ "onConfig.AudioEncoding\022\031\n\021sample_rate_he"
+ "rtz\030\002 \001(\005\022\033\n\023audio_channel_count\030\007 \001(\005\022/"
@@ -120,95 +124,101 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "peech.v1p1beta1.SpeechContext\022 \n\030enable_"
+ "word_time_offsets\030\010 \001(\010\022\036\n\026enable_word_c"
+ "onfidence\030\017 \001(\010\022$\n\034enable_automatic_punc"
- + "tuation\030\013 \001(\010\022\"\n\032enable_speaker_diarizat"
- + "ion\030\020 \001(\010\022!\n\031diarization_speaker_count\030\021"
- + " \001(\005\022D\n\010metadata\030\t \001(\01322.google.cloud.sp"
- + "eech.v1p1beta1.RecognitionMetadata\022\r\n\005mo"
- + "del\030\r \001(\t\022\024\n\014use_enhanced\030\016 \001(\010\"\224\001\n\rAudi"
- + "oEncoding\022\030\n\024ENCODING_UNSPECIFIED\020\000\022\014\n\010L"
- + "INEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULAW\020\003\022\007\n\003AMR\020\004\022"
- + "\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032\n\026SPEEX_WITH_"
- + "HEADER_BYTE\020\007\022\007\n\003MP3\020\010\"\323\010\n\023RecognitionMe"
- + "tadata\022\\\n\020interaction_type\030\001 \001(\0162B.googl"
- + "e.cloud.speech.v1p1beta1.RecognitionMeta"
- + "data.InteractionType\022$\n\034industry_naics_c"
- + "ode_of_audio\030\003 \001(\r\022b\n\023microphone_distanc"
- + "e\030\004 \001(\0162E.google.cloud.speech.v1p1beta1."
- + "RecognitionMetadata.MicrophoneDistance\022a"
- + "\n\023original_media_type\030\005 \001(\0162D.google.clo"
- + "ud.speech.v1p1beta1.RecognitionMetadata."
- + "OriginalMediaType\022e\n\025recording_device_ty"
- + "pe\030\006 \001(\0162F.google.cloud.speech.v1p1beta1"
- + ".RecognitionMetadata.RecordingDeviceType"
- + "\022\035\n\025recording_device_name\030\007 \001(\t\022\032\n\022origi"
- + "nal_mime_type\030\010 \001(\t\022\025\n\robfuscated_id\030\t \001"
- + "(\003\022\023\n\013audio_topic\030\n \001(\t\"\305\001\n\017InteractionT"
- + "ype\022 \n\034INTERACTION_TYPE_UNSPECIFIED\020\000\022\016\n"
- + "\nDISCUSSION\020\001\022\020\n\014PRESENTATION\020\002\022\016\n\nPHONE"
- + "_CALL\020\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PROFESSIONALLY"
- + "_PRODUCED\020\005\022\020\n\014VOICE_SEARCH\020\006\022\021\n\rVOICE_C"
- + "OMMAND\020\007\022\r\n\tDICTATION\020\010\"d\n\022MicrophoneDis"
- + "tance\022#\n\037MICROPHONE_DISTANCE_UNSPECIFIED"
- + "\020\000\022\r\n\tNEARFIELD\020\001\022\014\n\010MIDFIELD\020\002\022\014\n\010FARFI"
- + "ELD\020\003\"N\n\021OriginalMediaType\022#\n\037ORIGINAL_M"
- + "EDIA_TYPE_UNSPECIFIED\020\000\022\t\n\005AUDIO\020\001\022\t\n\005VI"
- + "DEO\020\002\"\244\001\n\023RecordingDeviceType\022%\n!RECORDI"
- + "NG_DEVICE_TYPE_UNSPECIFIED\020\000\022\016\n\nSMARTPHO"
- + "NE\020\001\022\006\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022\013\n\007VEHICLE\020"
- + "\004\022\030\n\024OTHER_OUTDOOR_DEVICE\020\005\022\027\n\023OTHER_IND"
- + "OOR_DEVICE\020\006\"/\n\rSpeechContext\022\017\n\007phrases"
- + "\030\001 \003(\t\022\r\n\005boost\030\004 \001(\002\"D\n\020RecognitionAudi"
- + "o\022\021\n\007content\030\001 \001(\014H\000\022\r\n\003uri\030\002 \001(\tH\000B\016\n\014a"
- + "udio_source\"\\\n\021RecognizeResponse\022G\n\007resu"
- + "lts\030\002 \003(\01326.google.cloud.speech.v1p1beta"
- + "1.SpeechRecognitionResult\"g\n\034LongRunning"
- + "RecognizeResponse\022G\n\007results\030\002 \003(\01326.goo"
- + "gle.cloud.speech.v1p1beta1.SpeechRecogni"
- + "tionResult\"\236\001\n\034LongRunningRecognizeMetad"
- + "ata\022\030\n\020progress_percent\030\001 \001(\005\022.\n\nstart_t"
- + "ime\030\002 \001(\0132\032.google.protobuf.Timestamp\0224\n"
- + "\020last_update_time\030\003 \001(\0132\032.google.protobu"
- + "f.Timestamp\"\277\002\n\032StreamingRecognizeRespon"
- + "se\022!\n\005error\030\001 \001(\0132\022.google.rpc.Status\022J\n"
- + "\007results\030\002 \003(\01329.google.cloud.speech.v1p"
- + "1beta1.StreamingRecognitionResult\022d\n\021spe"
- + "ech_event_type\030\004 \001(\0162I.google.cloud.spee"
- + "ch.v1p1beta1.StreamingRecognizeResponse."
- + "SpeechEventType\"L\n\017SpeechEventType\022\034\n\030SP"
- + "EECH_EVENT_UNSPECIFIED\020\000\022\033\n\027END_OF_SINGL"
- + "E_UTTERANCE\020\001\"\364\001\n\032StreamingRecognitionRe"
- + "sult\022Q\n\014alternatives\030\001 \003(\0132;.google.clou"
- + "d.speech.v1p1beta1.SpeechRecognitionAlte"
- + "rnative\022\020\n\010is_final\030\002 \001(\010\022\021\n\tstability\030\003"
- + " \001(\002\0222\n\017result_end_time\030\004 \001(\0132\031.google.p"
- + "rotobuf.Duration\022\023\n\013channel_tag\030\005 \001(\005\022\025\n"
- + "\rlanguage_code\030\006 \001(\t\"\230\001\n\027SpeechRecogniti"
- + "onResult\022Q\n\014alternatives\030\001 \003(\0132;.google."
- + "cloud.speech.v1p1beta1.SpeechRecognition"
- + "Alternative\022\023\n\013channel_tag\030\002 \001(\005\022\025\n\rlang"
- + "uage_code\030\005 \001(\t\"~\n\034SpeechRecognitionAlte"
- + "rnative\022\022\n\ntranscript\030\001 \001(\t\022\022\n\nconfidenc"
- + "e\030\002 \001(\002\0226\n\005words\030\003 \003(\0132\'.google.cloud.sp"
- + "eech.v1p1beta1.WordInfo\"\235\001\n\010WordInfo\022-\n\n"
- + "start_time\030\001 \001(\0132\031.google.protobuf.Durat"
- + "ion\022+\n\010end_time\030\002 \001(\0132\031.google.protobuf."
- + "Duration\022\014\n\004word\030\003 \001(\t\022\022\n\nconfidence\030\004 \001"
- + "(\002\022\023\n\013speaker_tag\030\005 \001(\0052\332\003\n\006Speech\022\226\001\n\tR"
- + "ecognize\022/.google.cloud.speech.v1p1beta1"
- + ".RecognizeRequest\0320.google.cloud.speech."
- + "v1p1beta1.RecognizeResponse\"&\202\323\344\223\002 \"\033/v1"
- + "p1beta1/speech:recognize:\001*\022\244\001\n\024LongRunn"
- + "ingRecognize\022:.google.cloud.speech.v1p1b"
- + "eta1.LongRunningRecognizeRequest\032\035.googl"
- + "e.longrunning.Operation\"1\202\323\344\223\002+\"&/v1p1be"
- + "ta1/speech:longrunningrecognize:\001*\022\217\001\n\022S"
- + "treamingRecognize\0228.google.cloud.speech."
- + "v1p1beta1.StreamingRecognizeRequest\0329.go"
- + "ogle.cloud.speech.v1p1beta1.StreamingRec"
- + "ognizeResponse\"\000(\0010\001Bz\n!com.google.cloud"
- + ".speech.v1p1beta1B\013SpeechProtoP\001ZCgoogle"
- + ".golang.org/genproto/googleapis/cloud/sp"
- + "eech/v1p1beta1;speech\370\001\001b\006proto3"
+ + "tuation\030\013 \001(\010\022&\n\032enable_speaker_diarizat"
+ + "ion\030\020 \001(\010B\002\030\001\022%\n\031diarization_speaker_cou"
+ + "nt\030\021 \001(\005B\002\030\001\022S\n\022diarization_config\030\023 \001(\013"
+ + "27.google.cloud.speech.v1p1beta1.Speaker"
+ + "DiarizationConfig\022D\n\010metadata\030\t \001(\01322.go"
+ + "ogle.cloud.speech.v1p1beta1.RecognitionM"
+ + "etadata\022\r\n\005model\030\r \001(\t\022\024\n\014use_enhanced\030\016"
+ + " \001(\010\"\224\001\n\rAudioEncoding\022\030\n\024ENCODING_UNSPE"
+ + "CIFIED\020\000\022\014\n\010LINEAR16\020\001\022\010\n\004FLAC\020\002\022\t\n\005MULA"
+ + "W\020\003\022\007\n\003AMR\020\004\022\n\n\006AMR_WB\020\005\022\014\n\010OGG_OPUS\020\006\022\032"
+ + "\n\026SPEEX_WITH_HEADER_BYTE\020\007\022\007\n\003MP3\020\010\"t\n\030S"
+ + "peakerDiarizationConfig\022\"\n\032enable_speake"
+ + "r_diarization\030\001 \001(\010\022\031\n\021min_speaker_count"
+ + "\030\002 \001(\005\022\031\n\021max_speaker_count\030\003 \001(\005\"\323\010\n\023Re"
+ + "cognitionMetadata\022\\\n\020interaction_type\030\001 "
+ + "\001(\0162B.google.cloud.speech.v1p1beta1.Reco"
+ + "gnitionMetadata.InteractionType\022$\n\034indus"
+ + "try_naics_code_of_audio\030\003 \001(\r\022b\n\023microph"
+ + "one_distance\030\004 \001(\0162E.google.cloud.speech"
+ + ".v1p1beta1.RecognitionMetadata.Microphon"
+ + "eDistance\022a\n\023original_media_type\030\005 \001(\0162D"
+ + ".google.cloud.speech.v1p1beta1.Recogniti"
+ + "onMetadata.OriginalMediaType\022e\n\025recordin"
+ + "g_device_type\030\006 \001(\0162F.google.cloud.speec"
+ + "h.v1p1beta1.RecognitionMetadata.Recordin"
+ + "gDeviceType\022\035\n\025recording_device_name\030\007 \001"
+ + "(\t\022\032\n\022original_mime_type\030\010 \001(\t\022\025\n\robfusc"
+ + "ated_id\030\t \001(\003\022\023\n\013audio_topic\030\n \001(\t\"\305\001\n\017I"
+ + "nteractionType\022 \n\034INTERACTION_TYPE_UNSPE"
+ + "CIFIED\020\000\022\016\n\nDISCUSSION\020\001\022\020\n\014PRESENTATION"
+ + "\020\002\022\016\n\nPHONE_CALL\020\003\022\r\n\tVOICEMAIL\020\004\022\033\n\027PRO"
+ + "FESSIONALLY_PRODUCED\020\005\022\020\n\014VOICE_SEARCH\020\006"
+ + "\022\021\n\rVOICE_COMMAND\020\007\022\r\n\tDICTATION\020\010\"d\n\022Mi"
+ + "crophoneDistance\022#\n\037MICROPHONE_DISTANCE_"
+ + "UNSPECIFIED\020\000\022\r\n\tNEARFIELD\020\001\022\014\n\010MIDFIELD"
+ + "\020\002\022\014\n\010FARFIELD\020\003\"N\n\021OriginalMediaType\022#\n"
+ + "\037ORIGINAL_MEDIA_TYPE_UNSPECIFIED\020\000\022\t\n\005AU"
+ + "DIO\020\001\022\t\n\005VIDEO\020\002\"\244\001\n\023RecordingDeviceType"
+ + "\022%\n!RECORDING_DEVICE_TYPE_UNSPECIFIED\020\000\022"
+ + "\016\n\nSMARTPHONE\020\001\022\006\n\002PC\020\002\022\016\n\nPHONE_LINE\020\003\022"
+ + "\013\n\007VEHICLE\020\004\022\030\n\024OTHER_OUTDOOR_DEVICE\020\005\022\027"
+ + "\n\023OTHER_INDOOR_DEVICE\020\006\"/\n\rSpeechContext"
+ + "\022\017\n\007phrases\030\001 \003(\t\022\r\n\005boost\030\004 \001(\002\"D\n\020Reco"
+ + "gnitionAudio\022\021\n\007content\030\001 \001(\014H\000\022\r\n\003uri\030\002"
+ + " \001(\tH\000B\016\n\014audio_source\"\\\n\021RecognizeRespo"
+ + "nse\022G\n\007results\030\002 \003(\01326.google.cloud.spee"
+ + "ch.v1p1beta1.SpeechRecognitionResult\"g\n\034"
+ + "LongRunningRecognizeResponse\022G\n\007results\030"
+ + "\002 \003(\01326.google.cloud.speech.v1p1beta1.Sp"
+ + "eechRecognitionResult\"\236\001\n\034LongRunningRec"
+ + "ognizeMetadata\022\030\n\020progress_percent\030\001 \001(\005"
+ + "\022.\n\nstart_time\030\002 \001(\0132\032.google.protobuf.T"
+ + "imestamp\0224\n\020last_update_time\030\003 \001(\0132\032.goo"
+ + "gle.protobuf.Timestamp\"\277\002\n\032StreamingReco"
+ + "gnizeResponse\022!\n\005error\030\001 \001(\0132\022.google.rp"
+ + "c.Status\022J\n\007results\030\002 \003(\01329.google.cloud"
+ + ".speech.v1p1beta1.StreamingRecognitionRe"
+ + "sult\022d\n\021speech_event_type\030\004 \001(\0162I.google"
+ + ".cloud.speech.v1p1beta1.StreamingRecogni"
+ + "zeResponse.SpeechEventType\"L\n\017SpeechEven"
+ + "tType\022\034\n\030SPEECH_EVENT_UNSPECIFIED\020\000\022\033\n\027E"
+ + "ND_OF_SINGLE_UTTERANCE\020\001\"\364\001\n\032StreamingRe"
+ + "cognitionResult\022Q\n\014alternatives\030\001 \003(\0132;."
+ + "google.cloud.speech.v1p1beta1.SpeechReco"
+ + "gnitionAlternative\022\020\n\010is_final\030\002 \001(\010\022\021\n\t"
+ + "stability\030\003 \001(\002\0222\n\017result_end_time\030\004 \001(\013"
+ + "2\031.google.protobuf.Duration\022\023\n\013channel_t"
+ + "ag\030\005 \001(\005\022\025\n\rlanguage_code\030\006 \001(\t\"\230\001\n\027Spee"
+ + "chRecognitionResult\022Q\n\014alternatives\030\001 \003("
+ + "\0132;.google.cloud.speech.v1p1beta1.Speech"
+ + "RecognitionAlternative\022\023\n\013channel_tag\030\002 "
+ + "\001(\005\022\025\n\rlanguage_code\030\005 \001(\t\"~\n\034SpeechReco"
+ + "gnitionAlternative\022\022\n\ntranscript\030\001 \001(\t\022\022"
+ + "\n\nconfidence\030\002 \001(\002\0226\n\005words\030\003 \003(\0132\'.goog"
+ + "le.cloud.speech.v1p1beta1.WordInfo\"\235\001\n\010W"
+ + "ordInfo\022-\n\nstart_time\030\001 \001(\0132\031.google.pro"
+ + "tobuf.Duration\022+\n\010end_time\030\002 \001(\0132\031.googl"
+ + "e.protobuf.Duration\022\014\n\004word\030\003 \001(\t\022\022\n\ncon"
+ + "fidence\030\004 \001(\002\022\023\n\013speaker_tag\030\005 \001(\0052\332\003\n\006S"
+ + "peech\022\226\001\n\tRecognize\022/.google.cloud.speec"
+ + "h.v1p1beta1.RecognizeRequest\0320.google.cl"
+ + "oud.speech.v1p1beta1.RecognizeResponse\"&"
+ + "\202\323\344\223\002 \"\033/v1p1beta1/speech:recognize:\001*\022\244"
+ + "\001\n\024LongRunningRecognize\022:.google.cloud.s"
+ + "peech.v1p1beta1.LongRunningRecognizeRequ"
+ + "est\032\035.google.longrunning.Operation\"1\202\323\344\223"
+ + "\002+\"&/v1p1beta1/speech:longrunningrecogni"
+ + "ze:\001*\022\217\001\n\022StreamingRecognize\0228.google.cl"
+ + "oud.speech.v1p1beta1.StreamingRecognizeR"
+ + "equest\0329.google.cloud.speech.v1p1beta1.S"
+ + "treamingRecognizeResponse\"\000(\0010\001Bz\n!com.g"
+ + "oogle.cloud.speech.v1p1beta1B\013SpeechProt"
+ + "oP\001ZCgoogle.golang.org/genproto/googleap"
+ + "is/cloud/speech/v1p1beta1;speech\370\001\001b\006pro"
+ + "to3"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -282,12 +292,21 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"EnableAutomaticPunctuation",
"EnableSpeakerDiarization",
"DiarizationSpeakerCount",
+ "DiarizationConfig",
"Metadata",
"Model",
"UseEnhanced",
});
- internal_static_google_cloud_speech_v1p1beta1_RecognitionMetadata_descriptor =
+ internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_descriptor =
getDescriptor().getMessageTypes().get(5);
+ internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_speech_v1p1beta1_SpeakerDiarizationConfig_descriptor,
+ new java.lang.String[] {
+ "EnableSpeakerDiarization", "MinSpeakerCount", "MaxSpeakerCount",
+ });
+ internal_static_google_cloud_speech_v1p1beta1_RecognitionMetadata_descriptor =
+ getDescriptor().getMessageTypes().get(6);
internal_static_google_cloud_speech_v1p1beta1_RecognitionMetadata_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_RecognitionMetadata_descriptor,
@@ -303,7 +322,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"AudioTopic",
});
internal_static_google_cloud_speech_v1p1beta1_SpeechContext_descriptor =
- getDescriptor().getMessageTypes().get(6);
+ getDescriptor().getMessageTypes().get(7);
internal_static_google_cloud_speech_v1p1beta1_SpeechContext_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_SpeechContext_descriptor,
@@ -311,7 +330,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Phrases", "Boost",
});
internal_static_google_cloud_speech_v1p1beta1_RecognitionAudio_descriptor =
- getDescriptor().getMessageTypes().get(7);
+ getDescriptor().getMessageTypes().get(8);
internal_static_google_cloud_speech_v1p1beta1_RecognitionAudio_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_RecognitionAudio_descriptor,
@@ -319,7 +338,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Content", "Uri", "AudioSource",
});
internal_static_google_cloud_speech_v1p1beta1_RecognizeResponse_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_google_cloud_speech_v1p1beta1_RecognizeResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_RecognizeResponse_descriptor,
@@ -327,7 +346,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Results",
});
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeResponse_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(10);
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeResponse_descriptor,
@@ -335,7 +354,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Results",
});
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeMetadata_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(11);
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeMetadata_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_LongRunningRecognizeMetadata_descriptor,
@@ -343,7 +362,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"ProgressPercent", "StartTime", "LastUpdateTime",
});
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognizeResponse_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(12);
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognizeResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognizeResponse_descriptor,
@@ -351,7 +370,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Error", "Results", "SpeechEventType",
});
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognitionResult_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(13);
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognitionResult_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_StreamingRecognitionResult_descriptor,
@@ -359,7 +378,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Alternatives", "IsFinal", "Stability", "ResultEndTime", "ChannelTag", "LanguageCode",
});
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionResult_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(14);
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionResult_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionResult_descriptor,
@@ -367,7 +386,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Alternatives", "ChannelTag", "LanguageCode",
});
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionAlternative_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(15);
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionAlternative_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_SpeechRecognitionAlternative_descriptor,
@@ -375,7 +394,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Transcript", "Confidence", "Words",
});
internal_static_google_cloud_speech_v1p1beta1_WordInfo_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(16);
internal_static_google_cloud_speech_v1p1beta1_WordInfo_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_speech_v1p1beta1_WordInfo_descriptor,
diff --git a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
index dd557c5fd5bf..53ea05957a25 100644
--- a/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
+++ b/google-api-grpc/proto-google-cloud-speech-v1p1beta1/src/main/proto/google/cloud/speech/v1p1beta1/cloud_speech.proto
@@ -306,19 +306,24 @@ message RecognitionConfig {
// *Optional* If 'true', enables speaker detection for each recognized word in
// the top alternative of the recognition result using a speaker_tag provided
// in the WordInfo.
- // Note: When this is true, we send all the words from the beginning of the
+ // Note: Use diarization_config instead.
+ bool enable_speaker_diarization = 16 [deprecated = true];
+
+ // *Optional*
+ // If set, specifies the estimated number of speakers in the conversation.
+ // Defaults to '2'. Ignored unless enable_speaker_diarization is set to true.
+ // Note: Use diarization_config instead.
+ int32 diarization_speaker_count = 17 [deprecated = true];
+
+ // *Optional* Config to enable speaker diarization and set additional
+ // parameters to make diarization better suited for your application.
+ // Note: When this is enabled, we send all the words from the beginning of the
// audio for the top alternative in every consecutive STREAMING responses.
// This is done in order to improve our speaker tags as our models learn to
// identify the speakers in the conversation over time.
// For non-streaming requests, the diarization results will be provided only
// in the top alternative of the FINAL SpeechRecognitionResult.
- bool enable_speaker_diarization = 16;
-
- // *Optional*
- // If set, specifies the estimated number of speakers in the conversation.
- // If not set, defaults to '2'.
- // Ignored unless enable_speaker_diarization is set to true."
- int32 diarization_speaker_count = 17;
+ SpeakerDiarizationConfig diarization_config = 19;
// *Optional* Metadata regarding this request.
RecognitionMetadata metadata = 9;
@@ -368,6 +373,28 @@ message RecognitionConfig {
bool use_enhanced = 14;
}
+message SpeakerDiarizationConfig {
+ // *Optional* If 'true', enables speaker detection for each recognized word in
+ // the top alternative of the recognition result using a speaker_tag provided
+ // in the WordInfo.
+ bool enable_speaker_diarization = 1;
+
+ // Note: Set min_speaker_count = max_speaker_count to fix the number of
+ // speakers to be detected in the audio.
+
+ // *Optional*
+ // Minimum number of speakers in the conversation. This range gives you more
+ // flexibility by allowing the system to automatically determine the correct
+ // number of speakers. If not set, the default value is 2.
+ int32 min_speaker_count = 2;
+
+ // *Optional*
+ // Maximum number of speakers in the conversation. This range gives you more
+ // flexibility by allowing the system to automatically determine the correct
+ // number of speakers. If not set, the default value is 6.
+ int32 max_speaker_count = 3;
+}
+
// Description of audio data to be recognized.
message RecognitionMetadata {
// Use case categories that the audio recognition request can be described
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechSettings.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechSettings.java
index ad051fc9685f..c25b358258a4 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechSettings.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/SpeechSettings.java
@@ -46,8 +46,9 @@
*
*
* The builder of this class is recursive, so contained classes are themselves builders. When - * build() is called, the tree of builders is called to create the complete settings object. For - * example, to set the total timeout of recognize to 30 seconds: + * build() is called, the tree of builders is called to create the complete settings object. + * + *
For example, to set the total timeout of recognize to 30 seconds: * *
*
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java
index 2b5ece8de3da..2ee8683d8658 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/package-info.java
@@ -45,4 +45,7 @@
*
*
*/
+@Generated("by gapic-generator")
package com.google.cloud.speech.v1;
+
+import javax.annotation.Generated;
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java
index 05cd6ef80cfc..7065f86f300a 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1/stub/SpeechStubSettings.java
@@ -65,8 +65,9 @@
*
*
* The builder of this class is recursive, so contained classes are themselves builders. When - * build() is called, the tree of builders is called to create the complete settings object. For - * example, to set the total timeout of recognize to 30 seconds: + * build() is called, the tree of builders is called to create the complete settings object. + * + *
For example, to set the total timeout of recognize to 30 seconds: * *
*
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechSettings.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechSettings.java
index bd43283a1276..b624cd1ff6de 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechSettings.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/SpeechSettings.java
@@ -46,8 +46,9 @@
*
*
* The builder of this class is recursive, so contained classes are themselves builders. When
- * build() is called, the tree of builders is called to create the complete settings object. For
- * example, to set the total timeout of recognize to 30 seconds:
+ * build() is called, the tree of builders is called to create the complete settings object.
+ *
+ *
For example, to set the total timeout of recognize to 30 seconds:
*
*
*
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java
index fc270604948c..b16debe8e84d 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/package-info.java
@@ -45,4 +45,7 @@
*
*
*/
+@Generated("by gapic-generator")
package com.google.cloud.speech.v1p1beta1;
+
+import javax.annotation.Generated;
diff --git a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java
index cf4e44e2a336..7161dfde7b76 100644
--- a/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java
+++ b/google-cloud-clients/google-cloud-speech/src/main/java/com/google/cloud/speech/v1p1beta1/stub/SpeechStubSettings.java
@@ -65,8 +65,9 @@
*
*
* The builder of this class is recursive, so contained classes are themselves builders. When
- * build() is called, the tree of builders is called to create the complete settings object. For
- * example, to set the total timeout of recognize to 30 seconds:
+ * build() is called, the tree of builders is called to create the complete settings object.
+ *
+ *
For example, to set the total timeout of recognize to 30 seconds:
*
*
*
diff --git a/google-cloud-clients/google-cloud-speech/synth.metadata b/google-cloud-clients/google-cloud-speech/synth.metadata
index eda60b61fb76..f0e91fd82320 100644
--- a/google-cloud-clients/google-cloud-speech/synth.metadata
+++ b/google-cloud-clients/google-cloud-speech/synth.metadata
@@ -1,19 +1,19 @@
{
- "updateTime": "2019-07-19T07:58:08.107049Z",
+ "updateTime": "2019-07-25T07:57:41.134771Z",
"sources": [
{
"generator": {
"name": "artman",
- "version": "0.30.1",
- "dockerImage": "googleapis/artman@sha256:f1a2e851e5e012c59e1da4125480bb19878f86a4e7fac4f375f2e819956b5aa3"
+ "version": "0.31.0",
+ "dockerImage": "googleapis/artman@sha256:9aed6bbde54e26d2fcde7aa86d9f64c0278f741e58808c46573e488cbf6098f0"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "f78612e8d008b9678252da84c035da12e92c0093",
- "internalRef": "258869625"
+ "sha": "4b12afe72950f36bef6f196a05f4430e4421a873",
+ "internalRef": "259790363"
}
}
],