The interfaces provided are listed below, along with usage samples. * *
======================= TextToSpeechClient =======================
*
diff --git a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java
index 8c504f5d..4f40b335 100644
--- a/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java
+++ b/google-cloud-texttospeech/src/main/java/com/google/cloud/texttospeech/v1/stub/TextToSpeechStubSettings.java
@@ -208,10 +208,10 @@ public static class Builder extends StubSettings.Builder ======================= TextToSpeechClient =======================
*
diff --git a/grpc-google-cloud-texttospeech-v1/pom.xml b/grpc-google-cloud-texttospeech-v1/pom.xml
index 36506c8e..4cd40634 100644
--- a/grpc-google-cloud-texttospeech-v1/pom.xml
+++ b/grpc-google-cloud-texttospeech-v1/pom.xml
@@ -4,13 +4,13 @@
OGG_OPUS = 3;
*/
OGG_OPUS(3),
+ /**
+ *
+ *
+ *
+ * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ * Audio content returned as MULAW also contains a WAV header.
+ *
+ *
+ * MULAW = 5;
+ */
+ MULAW(5),
+ /**
+ *
+ *
+ *
+ * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law.
+ * Audio content returned as ALAW also contains a WAV header.
+ *
+ *
+ * ALAW = 6;
+ */
+ ALAW(6),
UNRECOGNIZED(-1),
;
@@ -120,6 +142,28 @@ public enum AudioEncoding implements com.google.protobuf.ProtocolMessageEnum {
* OGG_OPUS = 3;
*/
public static final int OGG_OPUS_VALUE = 3;
+ /**
+ *
+ *
+ *
+ * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ * Audio content returned as MULAW also contains a WAV header.
+ *
+ *
+ * MULAW = 5;
+ */
+ public static final int MULAW_VALUE = 5;
+ /**
+ *
+ *
+ *
+ * 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law.
+ * Audio content returned as ALAW also contains a WAV header.
+ *
+ *
+ * ALAW = 6;
+ */
+ public static final int ALAW_VALUE = 6;
public final int getNumber() {
if (this == UNRECOGNIZED) {
@@ -153,6 +197,10 @@ public static AudioEncoding forNumber(int value) {
return MP3;
case 3:
return OGG_OPUS;
+ case 5:
+ return MULAW;
+ case 6:
+ return ALAW;
default:
return null;
}
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java
index 488c7510..19967c29 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequest.java
@@ -120,11 +120,11 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -150,11 +150,11 @@ public java.lang.String getLanguageCode() {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -188,7 +188,7 @@ public final boolean isInitialized() {
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
}
unknownFields.writeTo(output);
@@ -200,7 +200,7 @@ public int getSerializedSize() {
if (size != -1) return size;
size = 0;
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
}
size += unknownFields.getSerializedSize();
@@ -500,11 +500,11 @@ public Builder mergeFrom(
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -529,11 +529,11 @@ public java.lang.String getLanguageCode() {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -558,11 +558,11 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -586,11 +586,11 @@ public Builder setLanguageCode(java.lang.String value) {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -610,11 +610,11 @@ public Builder clearLanguageCode() {
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java
index d71890d9..ce0b67b6 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/ListVoicesRequestOrBuilder.java
@@ -30,11 +30,11 @@ public interface ListVoicesRequestOrBuilder
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -49,11 +49,11 @@ public interface ListVoicesRequestOrBuilder
* Optional. Recommended.
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
* specified, the ListVoices call will only return voices that can be used to
- * synthesize this language_code. E.g. when specifying "en-NZ", you will get
- * supported "en-\*" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-\*" voices.
+ * synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ * supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ * `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ * `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ * also get supported `"yue-\*"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java
index 1ba3b87a..a0a37bdc 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/SsmlVoiceGender.java
@@ -67,7 +67,7 @@ public enum SsmlVoiceGender implements com.google.protobuf.ProtocolMessageEnum {
*
*
*
- * A gender-neutral voice.
+ * A gender-neutral voice. This voice is not yet supported.
*
*
* NEUTRAL = 3;
@@ -114,7 +114,7 @@ public enum SsmlVoiceGender implements com.google.protobuf.ProtocolMessageEnum {
*
*
*
- * A gender-neutral voice.
+ * A gender-neutral voice. This voice is not yet supported.
*
*
* NEUTRAL = 3;
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java
index 2d8ecdd1..a5507e30 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/TextToSpeechProto.java
@@ -72,53 +72,56 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ ".proto\022\034google.cloud.texttospeech.v1\032\034go"
+ "ogle/api/annotations.proto\032\027google/api/c"
+ "lient.proto\032\037google/api/field_behavior.p"
- + "roto\"/\n\021ListVoicesRequest\022\032\n\rlanguage_co"
- + "de\030\001 \001(\tB\003\340A\001\"I\n\022ListVoicesResponse\0223\n\006v"
- + "oices\030\001 \003(\0132#.google.cloud.texttospeech."
- + "v1.Voice\"\224\001\n\005Voice\022\026\n\016language_codes\030\001 \003"
- + "(\t\022\014\n\004name\030\002 \001(\t\022B\n\013ssml_gender\030\003 \001(\0162-."
- + "google.cloud.texttospeech.v1.SsmlVoiceGe"
- + "nder\022!\n\031natural_sample_rate_hertz\030\004 \001(\005\""
- + "\351\001\n\027SynthesizeSpeechRequest\022@\n\005input\030\001 \001"
- + "(\0132,.google.cloud.texttospeech.v1.Synthe"
- + "sisInputB\003\340A\002\022F\n\005voice\030\002 \001(\01322.google.cl"
- + "oud.texttospeech.v1.VoiceSelectionParams"
- + "B\003\340A\002\022D\n\014audio_config\030\003 \001(\0132).google.clo"
- + "ud.texttospeech.v1.AudioConfigB\003\340A\002\"@\n\016S"
- + "ynthesisInput\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030\002 "
- + "\001(\tH\000B\016\n\014input_source\"\204\001\n\024VoiceSelection"
- + "Params\022\032\n\rlanguage_code\030\001 \001(\tB\003\340A\002\022\014\n\004na"
- + "me\030\002 \001(\t\022B\n\013ssml_gender\030\003 \001(\0162-.google.c"
- + "loud.texttospeech.v1.SsmlVoiceGender\"\361\001\n"
- + "\013AudioConfig\022H\n\016audio_encoding\030\001 \001(\0162+.g"
- + "oogle.cloud.texttospeech.v1.AudioEncodin"
- + "gB\003\340A\002\022\035\n\rspeaking_rate\030\002 \001(\001B\006\340A\004\340A\001\022\025\n"
- + "\005pitch\030\003 \001(\001B\006\340A\004\340A\001\022\036\n\016volume_gain_db\030\004"
- + " \001(\001B\006\340A\004\340A\001\022\036\n\021sample_rate_hertz\030\005 \001(\005B"
- + "\003\340A\001\022\"\n\022effects_profile_id\030\006 \003(\tB\006\340A\004\340A\001"
- + "\"1\n\030SynthesizeSpeechResponse\022\025\n\raudio_co"
- + "ntent\030\001 \001(\014*W\n\017SsmlVoiceGender\022!\n\035SSML_V"
- + "OICE_GENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020\001\022\n\n\006F"
- + "EMALE\020\002\022\013\n\007NEUTRAL\020\003*T\n\rAudioEncoding\022\036\n"
- + "\032AUDIO_ENCODING_UNSPECIFIED\020\000\022\014\n\010LINEAR1"
- + "6\020\001\022\007\n\003MP3\020\002\022\014\n\010OGG_OPUS\020\0032\264\003\n\014TextToSpe"
- + "ech\022\223\001\n\nListVoices\022/.google.cloud.textto"
- + "speech.v1.ListVoicesRequest\0320.google.clo"
- + "ud.texttospeech.v1.ListVoicesResponse\"\"\202"
- + "\323\344\223\002\014\022\n/v1/voices\332A\rlanguage_code\022\274\001\n\020Sy"
- + "nthesizeSpeech\0225.google.cloud.texttospee"
- + "ch.v1.SynthesizeSpeechRequest\0326.google.c"
- + "loud.texttospeech.v1.SynthesizeSpeechRes"
- + "ponse\"9\202\323\344\223\002\030\"\023/v1/text:synthesize:\001*\332A\030"
- + "input,voice,audio_config\032O\312A\033texttospeec"
- + "h.googleapis.com\322A.https://www.googleapi"
- + "s.com/auth/cloud-platformB\344\001\n com.google"
- + ".cloud.texttospeech.v1B\021TextToSpeechProt"
- + "oP\001ZHgoogle.golang.org/genproto/googleap"
- + "is/cloud/texttospeech/v1;texttospeech\370\001\001"
- + "\252\002\034Google.Cloud.TextToSpeech.V1\312\002\034Google"
- + "\\Cloud\\TextToSpeech\\V1\352\002\037Google::Cloud::"
- + "TextToSpeech::V1b\006proto3"
+ + "roto\032\031google/api/resource.proto\"/\n\021ListV"
+ + "oicesRequest\022\032\n\rlanguage_code\030\001 \001(\tB\003\340A\001"
+ + "\"I\n\022ListVoicesResponse\0223\n\006voices\030\001 \003(\0132#"
+ + ".google.cloud.texttospeech.v1.Voice\"\224\001\n\005"
+ + "Voice\022\026\n\016language_codes\030\001 \003(\t\022\014\n\004name\030\002 "
+ + "\001(\t\022B\n\013ssml_gender\030\003 \001(\0162-.google.cloud."
+ + "texttospeech.v1.SsmlVoiceGender\022!\n\031natur"
+ + "al_sample_rate_hertz\030\004 \001(\005\"\351\001\n\027Synthesiz"
+ + "eSpeechRequest\022@\n\005input\030\001 \001(\0132,.google.c"
+ + "loud.texttospeech.v1.SynthesisInputB\003\340A\002"
+ + "\022F\n\005voice\030\002 \001(\01322.google.cloud.texttospe"
+ + "ech.v1.VoiceSelectionParamsB\003\340A\002\022D\n\014audi"
+ + "o_config\030\003 \001(\0132).google.cloud.texttospee"
+ + "ch.v1.AudioConfigB\003\340A\002\"@\n\016SynthesisInput"
+ + "\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030\002 \001(\tH\000B\016\n\014inpu"
+ + "t_source\"\204\001\n\024VoiceSelectionParams\022\032\n\rlan"
+ + "guage_code\030\001 \001(\tB\003\340A\002\022\014\n\004name\030\002 \001(\t\022B\n\013s"
+ + "sml_gender\030\003 \001(\0162-.google.cloud.texttosp"
+ + "eech.v1.SsmlVoiceGender\"\361\001\n\013AudioConfig\022"
+ + "H\n\016audio_encoding\030\001 \001(\0162+.google.cloud.t"
+ + "exttospeech.v1.AudioEncodingB\003\340A\002\022\035\n\rspe"
+ + "aking_rate\030\002 \001(\001B\006\340A\004\340A\001\022\025\n\005pitch\030\003 \001(\001B"
+ + "\006\340A\004\340A\001\022\036\n\016volume_gain_db\030\004 \001(\001B\006\340A\004\340A\001\022"
+ + "\036\n\021sample_rate_hertz\030\005 \001(\005B\003\340A\001\022\"\n\022effec"
+ + "ts_profile_id\030\006 \003(\tB\006\340A\004\340A\001\"1\n\030Synthesiz"
+ + "eSpeechResponse\022\025\n\raudio_content\030\001 \001(\014*W"
+ + "\n\017SsmlVoiceGender\022!\n\035SSML_VOICE_GENDER_U"
+ + "NSPECIFIED\020\000\022\010\n\004MALE\020\001\022\n\n\006FEMALE\020\002\022\013\n\007NE"
+ + "UTRAL\020\003*i\n\rAudioEncoding\022\036\n\032AUDIO_ENCODI"
+ + "NG_UNSPECIFIED\020\000\022\014\n\010LINEAR16\020\001\022\007\n\003MP3\020\002\022"
+ + "\014\n\010OGG_OPUS\020\003\022\t\n\005MULAW\020\005\022\010\n\004ALAW\020\0062\264\003\n\014T"
+ + "extToSpeech\022\223\001\n\nListVoices\022/.google.clou"
+ + "d.texttospeech.v1.ListVoicesRequest\0320.go"
+ + "ogle.cloud.texttospeech.v1.ListVoicesRes"
+ + "ponse\"\"\202\323\344\223\002\014\022\n/v1/voices\332A\rlanguage_cod"
+ + "e\022\274\001\n\020SynthesizeSpeech\0225.google.cloud.te"
+ + "xttospeech.v1.SynthesizeSpeechRequest\0326."
+ + "google.cloud.texttospeech.v1.SynthesizeS"
+ + "peechResponse\"9\202\323\344\223\002\030\"\023/v1/text:synthesi"
+ + "ze:\001*\332A\030input,voice,audio_config\032O\312A\033tex"
+ + "ttospeech.googleapis.com\322A.https://www.g"
+ + "oogleapis.com/auth/cloud-platformB\274\002\n co"
+ + "m.google.cloud.texttospeech.v1B\021TextToSp"
+ + "eechProtoP\001ZHgoogle.golang.org/genproto/"
+ + "googleapis/cloud/texttospeech/v1;texttos"
+ + "peech\370\001\001\252\002\034Google.Cloud.TextToSpeech.V1\312"
+ + "\002\034Google\\Cloud\\TextToSpeech\\V1\352\002\037Google:"
+ + ":Cloud::TextToSpeech::V1\352AU\n\033automl.goog"
+ + "leapis.com/Model\0226projects/{project}/loc"
+ + "ations/{location}/models/{model}b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -127,6 +130,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
com.google.api.AnnotationsProto.getDescriptor(),
com.google.api.ClientProto.getDescriptor(),
com.google.api.FieldBehaviorProto.getDescriptor(),
+ com.google.api.ResourceProto.getDescriptor(),
});
internal_static_google_cloud_texttospeech_v1_ListVoicesRequest_descriptor =
getDescriptor().getMessageTypes().get(0);
@@ -204,11 +208,13 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
registry.add(com.google.api.AnnotationsProto.http);
registry.add(com.google.api.ClientProto.methodSignature);
registry.add(com.google.api.ClientProto.oauthScopes);
+ registry.add(com.google.api.ResourceProto.resourceDefinition);
com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor(
descriptor, registry);
com.google.api.AnnotationsProto.getDescriptor();
com.google.api.ClientProto.getDescriptor();
com.google.api.FieldBehaviorProto.getDescriptor();
+ com.google.api.ResourceProto.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java
index 675b411e..32596552 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/Voice.java
@@ -328,7 +328,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < languageCodes_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCodes_.getRaw(i));
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
if (ssmlGender_
@@ -356,7 +356,7 @@ public int getSerializedSize() {
size += dataSize;
size += 1 * getLanguageCodesList().size();
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
if (ssmlGender_
diff --git a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java
index 05c580be..06ad0e56 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java
+++ b/proto-google-cloud-texttospeech-v1/src/main/java/com/google/cloud/texttospeech/v1/VoiceSelectionParams.java
@@ -305,10 +305,10 @@ public final boolean isInitialized() {
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
if (ssmlGender_
@@ -325,10 +325,10 @@ public int getSerializedSize() {
if (size != -1) return size;
size = 0;
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
if (ssmlGender_
diff --git a/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto b/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto
index ebcf44d8..a10bfe60 100644
--- a/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto
+++ b/proto-google-cloud-texttospeech-v1/src/main/proto/google/cloud/texttospeech/v1/cloud_tts.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -20,6 +19,7 @@ package google.cloud.texttospeech.v1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.TextToSpeech.V1";
@@ -29,6 +29,10 @@ option java_outer_classname = "TextToSpeechProto";
option java_package = "com.google.cloud.texttospeech.v1";
option php_namespace = "Google\\Cloud\\TextToSpeech\\V1";
option ruby_package = "Google::Cloud::TextToSpeech::V1";
+option (google.api.resource_definition) = {
+ type: "automl.googleapis.com/Model"
+ pattern: "projects/{project}/locations/{location}/models/{model}"
+};
// Service that implements Google Cloud Text-to-Speech API.
service TextToSpeech {
@@ -59,11 +63,11 @@ message ListVoicesRequest {
// Optional. Recommended.
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. If
// specified, the ListVoices call will only return voices that can be used to
- // synthesize this language_code. E.g. when specifying "en-NZ", you will get
- // supported "en-\*" voices; when specifying "no", you will get supported
- // "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- // will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- // supported "yue-\*" voices.
+ // synthesize this language_code. E.g. when specifying `"en-NZ"`, you will get
+ // supported `"en-\*"` voices; when specifying `"no"`, you will get supported
+ // `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices; specifying
+ // `"zh"` will also get supported `"cmn-\*"` voices; specifying `"zh-hk"` will
+ // also get supported `"yue-\*"` voices.
string language_code = 1 [(google.api.field_behavior) = OPTIONAL];
}
@@ -83,7 +87,7 @@ enum SsmlVoiceGender {
// A female voice.
FEMALE = 2;
- // A gender-neutral voice.
+ // A gender-neutral voice. This voice is not yet supported.
NEUTRAL = 3;
}
@@ -105,6 +109,14 @@ enum AudioEncoding {
// Chrome and Firefox). The quality of the encoding is considerably higher
// than MP3 while using approximately the same bitrate.
OGG_OPUS = 3;
+
+ // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
+ // Audio content returned as MULAW also contains a WAV header.
+ MULAW = 5;
+
+ // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/A-law.
+ // Audio content returned as ALAW also contains a WAV header.
+ ALAW = 6;
}
// The message returned to the client by the `ListVoices` method.
diff --git a/proto-google-cloud-texttospeech-v1beta1/pom.xml b/proto-google-cloud-texttospeech-v1beta1/pom.xml
index 93091255..b163cb25 100644
--- a/proto-google-cloud-texttospeech-v1beta1/pom.xml
+++ b/proto-google-cloud-texttospeech-v1beta1/pom.xml
@@ -4,13 +4,13 @@
+ * Description of the custom voice to be synthesized.
+ *
+ *
+ * Protobuf type {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams}
+ */
+public final class CustomVoiceParams extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.cloud.texttospeech.v1beta1.CustomVoiceParams)
+ CustomVoiceParamsOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use CustomVoiceParams.newBuilder() to construct.
+ private CustomVoiceParams(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private CustomVoiceParams() {
+ model_ = "";
+ reportedUsage_ = 0;
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new CustomVoiceParams();
+ }
+
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet getUnknownFields() {
+ return this.unknownFields;
+ }
+
+ private CustomVoiceParams(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ this();
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 10:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ model_ = s;
+ break;
+ }
+ case 24:
+ {
+ int rawValue = input.readEnum();
+
+ reportedUsage_ = rawValue;
+ break;
+ }
+ default:
+ {
+ if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
+ .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.cloud.texttospeech.v1beta1.TextToSpeechProto
+ .internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.class,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder.class);
+ }
+
+ /**
+ *
+ *
+ *
+ * The usage of the synthesized audio. You must report your honest and
+ * correct usage of the service as it's regulated by contract and will cause
+ * significant difference in billing.
+ *
+ *
+ * Protobuf enum {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage}
+ */
+ public enum ReportedUsage implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ *
+ *
+ *
+ * Request with reported usage unspecified will be rejected.
+ *
+ *
+ * REPORTED_USAGE_UNSPECIFIED = 0;
+ */
+ REPORTED_USAGE_UNSPECIFIED(0),
+ /**
+ *
+ *
+ *
+ * For scenarios where the synthesized audio is not downloadable and can
+ * only be used once. For example, real-time request in IVR system.
+ *
+ *
+ * REALTIME = 1;
+ */
+ REALTIME(1),
+ /**
+ *
+ *
+ *
+ * For scenarios where the synthesized audio is downloadable and can be
+ * reused. For example, the synthesized audio is downloaded, stored in
+ * customer service system and played repeatedly.
+ *
+ *
+ * OFFLINE = 2;
+ */
+ OFFLINE(2),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ *
+ *
+ *
+ * Request with reported usage unspecified will be rejected.
+ *
+ *
+ * REPORTED_USAGE_UNSPECIFIED = 0;
+ */
+ public static final int REPORTED_USAGE_UNSPECIFIED_VALUE = 0;
+ /**
+ *
+ *
+ *
+ * For scenarios where the synthesized audio is not downloadable and can
+ * only be used once. For example, real-time request in IVR system.
+ *
+ *
+ * REALTIME = 1;
+ */
+ public static final int REALTIME_VALUE = 1;
+ /**
+ *
+ *
+ *
+ * For scenarios where the synthesized audio is downloadable and can be
+ * reused. For example, the synthesized audio is downloaded, stored in
+ * customer service system and played repeatedly.
+ *
+ *
+ * OFFLINE = 2;
+ */
+ public static final int OFFLINE_VALUE = 2;
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static ReportedUsage valueOf(int value) {
+ return forNumber(value);
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ */
+ public static ReportedUsage forNumber(int value) {
+ switch (value) {
+ case 0:
+ return REPORTED_USAGE_UNSPECIFIED;
+ case 1:
+ return REALTIME;
+ case 2:
+ return OFFLINE;
+ default:
+ return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The model.
+ */
+ @java.lang.Override
+ public java.lang.String getModel() {
+ java.lang.Object ref = model_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ model_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for model.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getModelBytes() {
+ java.lang.Object ref = model_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ model_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int REPORTED_USAGE_FIELD_NUMBER = 3;
+ private int reportedUsage_;
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The enum numeric value on the wire for reportedUsage.
+ */
+ @java.lang.Override
+ public int getReportedUsageValue() {
+ return reportedUsage_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The reportedUsage.
+ */
+ @java.lang.Override
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage getReportedUsage() {
+ @SuppressWarnings("deprecation")
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage result =
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.valueOf(
+ reportedUsage_);
+ return result == null
+ ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.UNRECOGNIZED
+ : result;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, model_);
+ }
+ if (reportedUsage_
+ != com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage
+ .REPORTED_USAGE_UNSPECIFIED
+ .getNumber()) {
+ output.writeEnum(3, reportedUsage_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(model_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, model_);
+ }
+ if (reportedUsage_
+ != com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage
+ .REPORTED_USAGE_UNSPECIFIED
+ .getNumber()) {
+ size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, reportedUsage_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.texttospeech.v1beta1.CustomVoiceParams)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams other =
+ (com.google.cloud.texttospeech.v1beta1.CustomVoiceParams) obj;
+
+ if (!getModel().equals(other.getModel())) return false;
+ if (reportedUsage_ != other.reportedUsage_) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + MODEL_FIELD_NUMBER;
+ hash = (53 * hash) + getModel().hashCode();
+ hash = (37 * hash) + REPORTED_USAGE_FIELD_NUMBER;
+ hash = (53 * hash) + reportedUsage_;
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Description of the custom voice to be synthesized.
+ *
+ *
+ * Protobuf type {@code google.cloud.texttospeech.v1beta1.CustomVoiceParams}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The model.
+ */
+ public java.lang.String getModel() {
+ java.lang.Object ref = model_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ model_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for model.
+ */
+ public com.google.protobuf.ByteString getModelBytes() {
+ java.lang.Object ref = model_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ model_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @param value The model to set.
+ * @return This builder for chaining.
+ */
+ public Builder setModel(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ model_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearModel() {
+
+ model_ = getDefaultInstance().getModel();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @param value The bytes for model to set.
+ * @return This builder for chaining.
+ */
+ public Builder setModelBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ model_ = value;
+ onChanged();
+ return this;
+ }
+
+ private int reportedUsage_ = 0;
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The enum numeric value on the wire for reportedUsage.
+ */
+ @java.lang.Override
+ public int getReportedUsageValue() {
+ return reportedUsage_;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @param value The enum numeric value on the wire for reportedUsage to set.
+ * @return This builder for chaining.
+ */
+ public Builder setReportedUsageValue(int value) {
+
+ reportedUsage_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The reportedUsage.
+ */
+ @java.lang.Override
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage
+ getReportedUsage() {
+ @SuppressWarnings("deprecation")
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage result =
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.valueOf(
+ reportedUsage_);
+ return result == null
+ ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage.UNRECOGNIZED
+ : result;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @param value The reportedUsage to set.
+ * @return This builder for chaining.
+ */
+ public Builder setReportedUsage(
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ reportedUsage_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearReportedUsage() {
+
+ reportedUsage_ = 0;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.texttospeech.v1beta1.CustomVoiceParams)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.texttospeech.v1beta1.CustomVoiceParams)
+ private static final com.google.cloud.texttospeech.v1beta1.CustomVoiceParams DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.texttospeech.v1beta1.CustomVoiceParams();
+ }
+
+ public static com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The model.
+ */
+ java.lang.String getModel();
+ /**
+ *
+ *
+ *
+ * Required. The name of the AutoML model that synthesizes the custom voice.
+ *
+ *
+ *
+ * string model = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }
+ *
+ *
+ * @return The bytes for model.
+ */
+ com.google.protobuf.ByteString getModelBytes();
+
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The enum numeric value on the wire for reportedUsage.
+ */
+ int getReportedUsageValue();
+ /**
+ *
+ *
+ *
+ * Optional. The usage of the synthesized audio to be reported.
+ *
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage reported_usage = 3 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The reportedUsage.
+ */
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.ReportedUsage getReportedUsage();
+}
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java
index 090a0c54..e429442c 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequest.java
@@ -121,11 +121,11 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -152,11 +152,11 @@ public java.lang.String getLanguageCode() {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -190,7 +190,7 @@ public final boolean isInitialized() {
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
}
unknownFields.writeTo(output);
@@ -202,7 +202,7 @@ public int getSerializedSize() {
if (size != -1) return size;
size = 0;
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
}
size += unknownFields.getSerializedSize();
@@ -504,11 +504,11 @@ public Builder mergeFrom(
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -534,11 +534,11 @@ public java.lang.String getLanguageCode() {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -564,11 +564,11 @@ public com.google.protobuf.ByteString getLanguageCodeBytes() {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -593,11 +593,11 @@ public Builder setLanguageCode(java.lang.String value) {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -618,11 +618,11 @@ public Builder clearLanguageCode() {
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java
index 1bc59e44..66177312 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/ListVoicesRequestOrBuilder.java
@@ -31,11 +31,11 @@ public interface ListVoicesRequestOrBuilder
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
@@ -51,11 +51,11 @@ public interface ListVoicesRequestOrBuilder
* [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
* If not specified, the API will return all supported voices.
* If specified, the ListVoices call will only return voices that can be used
- * to synthesize this language_code. E.g. when specifying "en-NZ", you will
- * get supported "en-NZ" voices; when specifying "no", you will get supported
- * "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- * will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- * supported "yue-hk" voices.
+ * to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ * get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ * supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ * specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ * `"zh-hk"` will also get supported `"yue-hk"` voices.
*
*
* string language_code = 1 [(.google.api.field_behavior) = OPTIONAL];
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java
index b399a583..9acd6a6a 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/TextToSpeechProto.java
@@ -55,6 +55,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -76,66 +80,76 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "d_tts.proto\022!google.cloud.texttospeech.v"
+ "1beta1\032\034google/api/annotations.proto\032\027go"
+ "ogle/api/client.proto\032\037google/api/field_"
- + "behavior.proto\"/\n\021ListVoicesRequest\022\032\n\rl"
- + "anguage_code\030\001 \001(\tB\003\340A\001\"N\n\022ListVoicesRes"
- + "ponse\0228\n\006voices\030\001 \003(\0132(.google.cloud.tex"
- + "ttospeech.v1beta1.Voice\"\231\001\n\005Voice\022\026\n\016lan"
- + "guage_codes\030\001 \003(\t\022\014\n\004name\030\002 \001(\t\022G\n\013ssml_"
- + "gender\030\003 \001(\01622.google.cloud.texttospeech"
- + ".v1beta1.SsmlVoiceGender\022!\n\031natural_samp"
- + "le_rate_hertz\030\004 \001(\005\"\240\003\n\027SynthesizeSpeech"
- + "Request\022E\n\005input\030\001 \001(\01321.google.cloud.te"
- + "xttospeech.v1beta1.SynthesisInputB\003\340A\002\022K"
- + "\n\005voice\030\002 \001(\01327.google.cloud.texttospeec"
- + "h.v1beta1.VoiceSelectionParamsB\003\340A\002\022I\n\014a"
- + "udio_config\030\003 \001(\0132..google.cloud.texttos"
- + "peech.v1beta1.AudioConfigB\003\340A\002\022f\n\024enable"
- + "_time_pointing\030\004 \003(\0162H.google.cloud.text"
- + "tospeech.v1beta1.SynthesizeSpeechRequest"
- + ".TimepointType\">\n\rTimepointType\022\036\n\032TIMEP"
- + "OINT_TYPE_UNSPECIFIED\020\000\022\r\n\tSSML_MARK\020\001\"@"
- + "\n\016SynthesisInput\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml"
- + "\030\002 \001(\tH\000B\016\n\014input_source\"\211\001\n\024VoiceSelect"
- + "ionParams\022\032\n\rlanguage_code\030\001 \001(\tB\003\340A\002\022\014\n"
- + "\004name\030\002 \001(\t\022G\n\013ssml_gender\030\003 \001(\01622.googl"
- + "e.cloud.texttospeech.v1beta1.SsmlVoiceGe"
- + "nder\"\366\001\n\013AudioConfig\022M\n\016audio_encoding\030\001"
- + " \001(\01620.google.cloud.texttospeech.v1beta1"
- + ".AudioEncodingB\003\340A\002\022\035\n\rspeaking_rate\030\002 \001"
- + "(\001B\006\340A\004\340A\001\022\025\n\005pitch\030\003 \001(\001B\006\340A\004\340A\001\022\036\n\016vol"
- + "ume_gain_db\030\004 \001(\001B\006\340A\004\340A\001\022\036\n\021sample_rate"
- + "_hertz\030\005 \001(\005B\003\340A\001\022\"\n\022effects_profile_id\030"
- + "\006 \003(\tB\006\340A\004\340A\001\"\271\001\n\030SynthesizeSpeechRespon"
- + "se\022\025\n\raudio_content\030\001 \001(\014\022@\n\ntimepoints\030"
- + "\002 \003(\0132,.google.cloud.texttospeech.v1beta"
- + "1.Timepoint\022D\n\014audio_config\030\004 \001(\0132..goog"
- + "le.cloud.texttospeech.v1beta1.AudioConfi"
- + "g\"4\n\tTimepoint\022\021\n\tmark_name\030\004 \001(\t\022\024\n\014tim"
- + "e_seconds\030\003 \001(\001*W\n\017SsmlVoiceGender\022!\n\035SS"
- + "ML_VOICE_GENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020\001\022"
- + "\n\n\006FEMALE\020\002\022\013\n\007NEUTRAL\020\003*z\n\rAudioEncodin"
- + "g\022\036\n\032AUDIO_ENCODING_UNSPECIFIED\020\000\022\014\n\010LIN"
- + "EAR16\020\001\022\007\n\003MP3\020\002\022\017\n\013MP3_64_KBPS\020\004\022\014\n\010OGG"
- + "_OPUS\020\003\022\t\n\005MULAW\020\005\022\010\n\004ALAW\020\0062\322\003\n\014TextToS"
- + "peech\022\242\001\n\nListVoices\0224.google.cloud.text"
- + "tospeech.v1beta1.ListVoicesRequest\0325.goo"
- + "gle.cloud.texttospeech.v1beta1.ListVoice"
- + "sResponse\"\'\202\323\344\223\002\021\022\017/v1beta1/voices\332A\rlan"
- + "guage_code\022\313\001\n\020SynthesizeSpeech\022:.google"
- + ".cloud.texttospeech.v1beta1.SynthesizeSp"
- + "eechRequest\032;.google.cloud.texttospeech."
- + "v1beta1.SynthesizeSpeechResponse\">\202\323\344\223\002\035"
- + "\"\030/v1beta1/text:synthesize:\001*\332A\030input,vo"
- + "ice,audio_config\032O\312A\033texttospeech.google"
- + "apis.com\322A.https://www.googleapis.com/au"
- + "th/cloud-platformB\375\001\n%com.google.cloud.t"
- + "exttospeech.v1beta1B\021TextToSpeechProtoP\001"
- + "ZMgoogle.golang.org/genproto/googleapis/"
- + "cloud/texttospeech/v1beta1;texttospeech\370"
- + "\001\001\252\002!Google.Cloud.TextToSpeech.V1Beta1\312\002"
- + "!Google\\Cloud\\TextToSpeech\\V1beta1\352\002$Goo"
- + "gle::Cloud::TextToSpeech::V1beta1b\006proto"
- + "3"
+ + "behavior.proto\032\031google/api/resource.prot"
+ + "o\"/\n\021ListVoicesRequest\022\032\n\rlanguage_code\030"
+ + "\001 \001(\tB\003\340A\001\"N\n\022ListVoicesResponse\0228\n\006voic"
+ + "es\030\001 \003(\0132(.google.cloud.texttospeech.v1b"
+ + "eta1.Voice\"\231\001\n\005Voice\022\026\n\016language_codes\030\001"
+ + " \003(\t\022\014\n\004name\030\002 \001(\t\022G\n\013ssml_gender\030\003 \001(\0162"
+ + "2.google.cloud.texttospeech.v1beta1.Ssml"
+ + "VoiceGender\022!\n\031natural_sample_rate_hertz"
+ + "\030\004 \001(\005\"\240\003\n\027SynthesizeSpeechRequest\022E\n\005in"
+ + "put\030\001 \001(\01321.google.cloud.texttospeech.v1"
+ + "beta1.SynthesisInputB\003\340A\002\022K\n\005voice\030\002 \001(\013"
+ + "27.google.cloud.texttospeech.v1beta1.Voi"
+ + "ceSelectionParamsB\003\340A\002\022I\n\014audio_config\030\003"
+ + " \001(\0132..google.cloud.texttospeech.v1beta1"
+ + ".AudioConfigB\003\340A\002\022f\n\024enable_time_pointin"
+ + "g\030\004 \003(\0162H.google.cloud.texttospeech.v1be"
+ + "ta1.SynthesizeSpeechRequest.TimepointTyp"
+ + "e\">\n\rTimepointType\022\036\n\032TIMEPOINT_TYPE_UNS"
+ + "PECIFIED\020\000\022\r\n\tSSML_MARK\020\001\"@\n\016SynthesisIn"
+ + "put\022\016\n\004text\030\001 \001(\tH\000\022\016\n\004ssml\030\002 \001(\tH\000B\016\n\014i"
+ + "nput_source\"\325\001\n\024VoiceSelectionParams\022\032\n\r"
+ + "language_code\030\001 \001(\tB\003\340A\002\022\014\n\004name\030\002 \001(\t\022G"
+ + "\n\013ssml_gender\030\003 \001(\01622.google.cloud.textt"
+ + "ospeech.v1beta1.SsmlVoiceGender\022J\n\014custo"
+ + "m_voice\030\004 \001(\01324.google.cloud.texttospeec"
+ + "h.v1beta1.CustomVoiceParams\"\366\001\n\013AudioCon"
+ + "fig\022M\n\016audio_encoding\030\001 \001(\01620.google.clo"
+ + "ud.texttospeech.v1beta1.AudioEncodingB\003\340"
+ + "A\002\022\035\n\rspeaking_rate\030\002 \001(\001B\006\340A\004\340A\001\022\025\n\005pit"
+ + "ch\030\003 \001(\001B\006\340A\004\340A\001\022\036\n\016volume_gain_db\030\004 \001(\001"
+ + "B\006\340A\004\340A\001\022\036\n\021sample_rate_hertz\030\005 \001(\005B\003\340A\001"
+ + "\022\"\n\022effects_profile_id\030\006 \003(\tB\006\340A\004\340A\001\"\364\001\n"
+ + "\021CustomVoiceParams\0222\n\005model\030\001 \001(\tB#\340A\002\372A"
+ + "\035\n\033automl.googleapis.com/Model\022_\n\016report"
+ + "ed_usage\030\003 \001(\0162B.google.cloud.texttospee"
+ + "ch.v1beta1.CustomVoiceParams.ReportedUsa"
+ + "geB\003\340A\001\"J\n\rReportedUsage\022\036\n\032REPORTED_USA"
+ + "GE_UNSPECIFIED\020\000\022\014\n\010REALTIME\020\001\022\013\n\007OFFLIN"
+ + "E\020\002\"\271\001\n\030SynthesizeSpeechResponse\022\025\n\raudi"
+ + "o_content\030\001 \001(\014\022@\n\ntimepoints\030\002 \003(\0132,.go"
+ + "ogle.cloud.texttospeech.v1beta1.Timepoin"
+ + "t\022D\n\014audio_config\030\004 \001(\0132..google.cloud.t"
+ + "exttospeech.v1beta1.AudioConfig\"4\n\tTimep"
+ + "oint\022\021\n\tmark_name\030\004 \001(\t\022\024\n\014time_seconds\030"
+ + "\003 \001(\001*W\n\017SsmlVoiceGender\022!\n\035SSML_VOICE_G"
+ + "ENDER_UNSPECIFIED\020\000\022\010\n\004MALE\020\001\022\n\n\006FEMALE\020"
+ + "\002\022\013\n\007NEUTRAL\020\003*z\n\rAudioEncoding\022\036\n\032AUDIO"
+ + "_ENCODING_UNSPECIFIED\020\000\022\014\n\010LINEAR16\020\001\022\007\n"
+ + "\003MP3\020\002\022\017\n\013MP3_64_KBPS\020\004\022\014\n\010OGG_OPUS\020\003\022\t\n"
+ + "\005MULAW\020\005\022\010\n\004ALAW\020\0062\322\003\n\014TextToSpeech\022\242\001\n\n"
+ + "ListVoices\0224.google.cloud.texttospeech.v"
+ + "1beta1.ListVoicesRequest\0325.google.cloud."
+ + "texttospeech.v1beta1.ListVoicesResponse\""
+ + "\'\202\323\344\223\002\021\022\017/v1beta1/voices\332A\rlanguage_code"
+ + "\022\313\001\n\020SynthesizeSpeech\022:.google.cloud.tex"
+ + "ttospeech.v1beta1.SynthesizeSpeechReques"
+ + "t\032;.google.cloud.texttospeech.v1beta1.Sy"
+ + "nthesizeSpeechResponse\">\202\323\344\223\002\035\"\030/v1beta1"
+ + "/text:synthesize:\001*\332A\030input,voice,audio_"
+ + "config\032O\312A\033texttospeech.googleapis.com\322A"
+ + ".https://www.googleapis.com/auth/cloud-p"
+ + "latformB\325\002\n%com.google.cloud.texttospeec"
+ + "h.v1beta1B\021TextToSpeechProtoP\001ZMgoogle.g"
+ + "olang.org/genproto/googleapis/cloud/text"
+ + "tospeech/v1beta1;texttospeech\370\001\001\252\002!Googl"
+ + "e.Cloud.TextToSpeech.V1Beta1\312\002!Google\\Cl"
+ + "oud\\TextToSpeech\\V1beta1\352\002$Google::Cloud"
+ + "::TextToSpeech::V1beta1\352AU\n\033automl.googl"
+ + "eapis.com/Model\0226projects/{project}/loca"
+ + "tions/{location}/models/{model}b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -144,6 +158,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
com.google.api.AnnotationsProto.getDescriptor(),
com.google.api.ClientProto.getDescriptor(),
com.google.api.FieldBehaviorProto.getDescriptor(),
+ com.google.api.ResourceProto.getDescriptor(),
});
internal_static_google_cloud_texttospeech_v1beta1_ListVoicesRequest_descriptor =
getDescriptor().getMessageTypes().get(0);
@@ -191,7 +206,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_texttospeech_v1beta1_VoiceSelectionParams_descriptor,
new java.lang.String[] {
- "LanguageCode", "Name", "SsmlGender",
+ "LanguageCode", "Name", "SsmlGender", "CustomVoice",
});
internal_static_google_cloud_texttospeech_v1beta1_AudioConfig_descriptor =
getDescriptor().getMessageTypes().get(6);
@@ -206,8 +221,16 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"SampleRateHertz",
"EffectsProfileId",
});
- internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor =
+ internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor =
getDescriptor().getMessageTypes().get(7);
+ internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_texttospeech_v1beta1_CustomVoiceParams_descriptor,
+ new java.lang.String[] {
+ "Model", "ReportedUsage",
+ });
+ internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor =
+ getDescriptor().getMessageTypes().get(8);
internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_texttospeech_v1beta1_SynthesizeSpeechResponse_descriptor,
@@ -215,7 +238,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"AudioContent", "Timepoints", "AudioConfig",
});
internal_static_google_cloud_texttospeech_v1beta1_Timepoint_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(9);
internal_static_google_cloud_texttospeech_v1beta1_Timepoint_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_texttospeech_v1beta1_Timepoint_descriptor,
@@ -229,11 +252,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
registry.add(com.google.api.AnnotationsProto.http);
registry.add(com.google.api.ClientProto.methodSignature);
registry.add(com.google.api.ClientProto.oauthScopes);
+ registry.add(com.google.api.ResourceProto.resourceDefinition);
+ registry.add(com.google.api.ResourceProto.resourceReference);
com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor(
descriptor, registry);
com.google.api.AnnotationsProto.getDescriptor();
com.google.api.ClientProto.getDescriptor();
com.google.api.FieldBehaviorProto.getDescriptor();
+ com.google.api.ResourceProto.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Timepoint.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Timepoint.java
index 54d5c937..1868275e 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Timepoint.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Timepoint.java
@@ -201,7 +201,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (timeSeconds_ != 0D) {
output.writeDouble(3, timeSeconds_);
}
- if (!getMarkNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(markName_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 4, markName_);
}
unknownFields.writeTo(output);
@@ -216,7 +216,7 @@ public int getSerializedSize() {
if (timeSeconds_ != 0D) {
size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, timeSeconds_);
}
- if (!getMarkNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(markName_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, markName_);
}
size += unknownFields.getSerializedSize();
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Voice.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Voice.java
index da9db336..4f56f5fa 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Voice.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/Voice.java
@@ -330,7 +330,7 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < languageCodes_.size(); i++) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCodes_.getRaw(i));
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
if (ssmlGender_
@@ -358,7 +358,7 @@ public int getSerializedSize() {
size += dataSize;
size += 1 * getLanguageCodesList().size();
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
if (ssmlGender_
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java
index e584a40b..fd3483d0 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParams.java
@@ -91,6 +91,23 @@ private VoiceSelectionParams(
int rawValue = input.readEnum();
ssmlGender_ = rawValue;
+ break;
+ }
+ case 34:
+ {
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder subBuilder = null;
+ if (customVoice_ != null) {
+ subBuilder = customVoice_.toBuilder();
+ }
+ customVoice_ =
+ input.readMessage(
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.parser(),
+ extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(customVoice_);
+ customVoice_ = subBuilder.buildPartial();
+ }
+
break;
}
default:
@@ -293,6 +310,61 @@ public com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender getSsmlGender() {
: result;
}
+ public static final int CUSTOM_VOICE_FIELD_NUMBER = 4;
+ private com.google.cloud.texttospeech.v1beta1.CustomVoiceParams customVoice_;
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return Whether the customVoice field is set.
+ */
+ @java.lang.Override
+ public boolean hasCustomVoice() {
+ return customVoice_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return The customVoice.
+ */
+ @java.lang.Override
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice() {
+ return customVoice_ == null
+ ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance()
+ : customVoice_;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ @java.lang.Override
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder
+ getCustomVoiceOrBuilder() {
+ return getCustomVoice();
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -307,10 +379,10 @@ public final boolean isInitialized() {
@java.lang.Override
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 1, languageCode_);
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, name_);
}
if (ssmlGender_
@@ -318,6 +390,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
.getNumber()) {
output.writeEnum(3, ssmlGender_);
}
+ if (customVoice_ != null) {
+ output.writeMessage(4, getCustomVoice());
+ }
unknownFields.writeTo(output);
}
@@ -327,10 +402,10 @@ public int getSerializedSize() {
if (size != -1) return size;
size = 0;
- if (!getLanguageCodeBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(languageCode_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, languageCode_);
}
- if (!getNameBytes().isEmpty()) {
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, name_);
}
if (ssmlGender_
@@ -338,6 +413,9 @@ public int getSerializedSize() {
.getNumber()) {
size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, ssmlGender_);
}
+ if (customVoice_ != null) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCustomVoice());
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -357,6 +435,10 @@ public boolean equals(final java.lang.Object obj) {
if (!getLanguageCode().equals(other.getLanguageCode())) return false;
if (!getName().equals(other.getName())) return false;
if (ssmlGender_ != other.ssmlGender_) return false;
+ if (hasCustomVoice() != other.hasCustomVoice()) return false;
+ if (hasCustomVoice()) {
+ if (!getCustomVoice().equals(other.getCustomVoice())) return false;
+ }
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@@ -374,6 +456,10 @@ public int hashCode() {
hash = (53 * hash) + getName().hashCode();
hash = (37 * hash) + SSML_GENDER_FIELD_NUMBER;
hash = (53 * hash) + ssmlGender_;
+ if (hasCustomVoice()) {
+ hash = (37 * hash) + CUSTOM_VOICE_FIELD_NUMBER;
+ hash = (53 * hash) + getCustomVoice().hashCode();
+ }
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -526,6 +612,12 @@ public Builder clear() {
ssmlGender_ = 0;
+ if (customVoiceBuilder_ == null) {
+ customVoice_ = null;
+ } else {
+ customVoice_ = null;
+ customVoiceBuilder_ = null;
+ }
return this;
}
@@ -556,6 +648,11 @@ public com.google.cloud.texttospeech.v1beta1.VoiceSelectionParams buildPartial()
result.languageCode_ = languageCode_;
result.name_ = name_;
result.ssmlGender_ = ssmlGender_;
+ if (customVoiceBuilder_ == null) {
+ result.customVoice_ = customVoice_;
+ } else {
+ result.customVoice_ = customVoiceBuilder_.build();
+ }
onBuilt();
return result;
}
@@ -617,6 +714,9 @@ public Builder mergeFrom(com.google.cloud.texttospeech.v1beta1.VoiceSelectionPar
if (other.ssmlGender_ != 0) {
setSsmlGenderValue(other.getSsmlGenderValue());
}
+ if (other.hasCustomVoice()) {
+ mergeCustomVoice(other.getCustomVoice());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -1027,6 +1127,211 @@ public Builder clearSsmlGender() {
return this;
}
+ private com.google.cloud.texttospeech.v1beta1.CustomVoiceParams customVoice_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder>
+ customVoiceBuilder_;
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return Whether the customVoice field is set.
+ */
+ public boolean hasCustomVoice() {
+ return customVoiceBuilder_ != null || customVoice_ != null;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return The customVoice.
+ */
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice() {
+ if (customVoiceBuilder_ == null) {
+ return customVoice_ == null
+ ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance()
+ : customVoice_;
+ } else {
+ return customVoiceBuilder_.getMessage();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public Builder setCustomVoice(com.google.cloud.texttospeech.v1beta1.CustomVoiceParams value) {
+ if (customVoiceBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ customVoice_ = value;
+ onChanged();
+ } else {
+ customVoiceBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public Builder setCustomVoice(
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder builderForValue) {
+ if (customVoiceBuilder_ == null) {
+ customVoice_ = builderForValue.build();
+ onChanged();
+ } else {
+ customVoiceBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public Builder mergeCustomVoice(com.google.cloud.texttospeech.v1beta1.CustomVoiceParams value) {
+ if (customVoiceBuilder_ == null) {
+ if (customVoice_ != null) {
+ customVoice_ =
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.newBuilder(customVoice_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ customVoice_ = value;
+ }
+ onChanged();
+ } else {
+ customVoiceBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public Builder clearCustomVoice() {
+ if (customVoiceBuilder_ == null) {
+ customVoice_ = null;
+ onChanged();
+ } else {
+ customVoice_ = null;
+ customVoiceBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder getCustomVoiceBuilder() {
+
+ onChanged();
+ return getCustomVoiceFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ public com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder
+ getCustomVoiceOrBuilder() {
+ if (customVoiceBuilder_ != null) {
+ return customVoiceBuilder_.getMessageOrBuilder();
+ } else {
+ return customVoice_ == null
+ ? com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.getDefaultInstance()
+ : customVoice_;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder>
+ getCustomVoiceFieldBuilder() {
+ if (customVoiceBuilder_ == null) {
+ customVoiceBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams.Builder,
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder>(
+ getCustomVoice(), getParentForChildren(), isClean());
+ customVoice_ = null;
+ }
+ return customVoiceBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java
index f222fe0b..d1c6eb1d 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/java/com/google/cloud/texttospeech/v1beta1/VoiceSelectionParamsOrBuilder.java
@@ -127,4 +127,45 @@ public interface VoiceSelectionParamsOrBuilder
* @return The ssmlGender.
*/
com.google.cloud.texttospeech.v1beta1.SsmlVoiceGender getSsmlGender();
+
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return Whether the customVoice field is set.
+ */
+ boolean hasCustomVoice();
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ *
+ * @return The customVoice.
+ */
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParams getCustomVoice();
+ /**
+ *
+ *
+ *
+ * The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ * the service will choose the custom voice matching the specified
+ * configuration.
+ *
+ *
+ * .google.cloud.texttospeech.v1beta1.CustomVoiceParams custom_voice = 4;
+ */
+ com.google.cloud.texttospeech.v1beta1.CustomVoiceParamsOrBuilder getCustomVoiceOrBuilder();
}
diff --git a/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto b/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto
index a70773d7..55a62bf8 100644
--- a/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto
+++ b/proto-google-cloud-texttospeech-v1beta1/src/main/proto/google/cloud/texttospeech/v1beta1/cloud_tts.proto
@@ -19,6 +19,7 @@ package google.cloud.texttospeech.v1beta1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
+import "google/api/resource.proto";
option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.TextToSpeech.V1Beta1";
@@ -28,6 +29,10 @@ option java_outer_classname = "TextToSpeechProto";
option java_package = "com.google.cloud.texttospeech.v1beta1";
option php_namespace = "Google\\Cloud\\TextToSpeech\\V1beta1";
option ruby_package = "Google::Cloud::TextToSpeech::V1beta1";
+option (google.api.resource_definition) = {
+ type: "automl.googleapis.com/Model"
+ pattern: "projects/{project}/locations/{location}/models/{model}"
+};
// Service that implements Google Cloud Text-to-Speech API.
service TextToSpeech {
@@ -59,11 +64,11 @@ message ListVoicesRequest {
// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
// If not specified, the API will return all supported voices.
// If specified, the ListVoices call will only return voices that can be used
- // to synthesize this language_code. E.g. when specifying "en-NZ", you will
- // get supported "en-NZ" voices; when specifying "no", you will get supported
- // "no-\*" (Norwegian) and "nb-\*" (Norwegian Bokmal) voices; specifying "zh"
- // will also get supported "cmn-\*" voices; specifying "zh-hk" will also get
- // supported "yue-hk" voices.
+ // to synthesize this language_code. E.g. when specifying `"en-NZ"`, you will
+ // get supported `"en-NZ"` voices; when specifying `"no"`, you will get
+ // supported `"no-\*"` (Norwegian) and `"nb-\*"` (Norwegian Bokmal) voices;
+ // specifying `"zh"` will also get supported `"cmn-\*"` voices; specifying
+ // `"zh-hk"` will also get supported `"yue-hk"` voices.
string language_code = 1 [(google.api.field_behavior) = OPTIONAL];
}
@@ -208,6 +213,11 @@ message VoiceSelectionParams {
// voice of the appropriate gender is not available, the synthesizer should
// substitute a voice with a different gender rather than failing the request.
SsmlVoiceGender ssml_gender = 3;
+
+ // The configuration for a custom voice. If [CustomVoiceParams.model] is set,
+ // the service will choose the custom voice matching the specified
+ // configuration.
+ CustomVoiceParams custom_voice = 4;
}
// Description of audio data to be synthesized.
@@ -266,6 +276,37 @@ message AudioConfig {
];
}
+// Description of the custom voice to be synthesized.
+message CustomVoiceParams {
+ // The usage of the synthesized audio. You must report your honest and
+ // correct usage of the service as it's regulated by contract and will cause
+ // significant difference in billing.
+ enum ReportedUsage {
+ // Request with reported usage unspecified will be rejected.
+ REPORTED_USAGE_UNSPECIFIED = 0;
+
+ // For scenarios where the synthesized audio is not downloadable and can
+ // only be used once. For example, real-time request in IVR system.
+ REALTIME = 1;
+
+ // For scenarios where the synthesized audio is downloadable and can be
+ // reused. For example, the synthesized audio is downloaded, stored in
+ // customer service system and played repeatedly.
+ OFFLINE = 2;
+ }
+
+ // Required. The name of the AutoML model that synthesizes the custom voice.
+ string model = 1 [
+ (google.api.field_behavior) = REQUIRED,
+ (google.api.resource_reference) = {
+ type: "automl.googleapis.com/Model"
+ }
+ ];
+
+ // Optional. The usage of the synthesized audio to be reported.
+ ReportedUsage reported_usage = 3 [(google.api.field_behavior) = OPTIONAL];
+}
+
// The message returned to the client by the `SynthesizeSpeech` method.
message SynthesizeSpeechResponse {
// The audio data bytes encoded as specified in the request, including the
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index e2b358e8..0e255389 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -29,7 +29,7 @@