Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings
This repository was archived by the owner on Dec 31, 2023. It is now read-only.

Commit 0fd78dc

Browse filesBrowse files
fix: Add async context manager return types (#490)
* fix: Add async context manager return types chore: Mock return_value should not populate oneof message fields chore: Support snippet generation for services that only support REST transport chore: Update gapic-generator-python to v1.11.0 PiperOrigin-RevId: 545430278 Source-Link: googleapis/googleapis@601b532 Source-Link: https://github.com/googleapis/googleapis-gen/commit/b3f18d0f6560a855022fd058865e7620479d7af9 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjNmMThkMGY2NTYwYTg1NTAyMmZkMDU4ODY1ZTc2MjA0NzlkN2FmOSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent e46ffd2 commit 0fd78dc
Copy full SHA for 0fd78dc

File tree

Expand file treeCollapse file tree

8 files changed

+38
-82
lines changed
Filter options
Expand file treeCollapse file tree

8 files changed

+38
-82
lines changed

‎google/cloud/automl_v1/services/auto_ml/async_client.py

Copy file name to clipboardExpand all lines: google/cloud/automl_v1/services/auto_ml/async_client.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2561,7 +2561,7 @@ async def sample_list_model_evaluations():
25612561
# Done; return the response.
25622562
return response
25632563

2564-
async def __aenter__(self):
2564+
async def __aenter__(self) -> "AutoMlAsyncClient":
25652565
return self
25662566

25672567
async def __aexit__(self, exc_type, exc, tb):

‎google/cloud/automl_v1/services/prediction_service/async_client.py

Copy file name to clipboardExpand all lines: google/cloud/automl_v1/services/prediction_service/async_client.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -673,7 +673,7 @@ async def sample_batch_predict():
673673
# Done; return the response.
674674
return response
675675

676-
async def __aenter__(self):
676+
async def __aenter__(self) -> "PredictionServiceAsyncClient":
677677
return self
678678

679679
async def __aexit__(self, exc_type, exc, tb):

‎google/cloud/automl_v1beta1/services/auto_ml/async_client.py

Copy file name to clipboardExpand all lines: google/cloud/automl_v1beta1/services/auto_ml/async_client.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3241,7 +3241,7 @@ async def sample_list_model_evaluations():
32413241
# Done; return the response.
32423242
return response
32433243

3244-
async def __aenter__(self):
3244+
async def __aenter__(self) -> "AutoMlAsyncClient":
32453245
return self
32463246

32473247
async def __aexit__(self, exc_type, exc, tb):

‎google/cloud/automl_v1beta1/services/prediction_service/async_client.py

Copy file name to clipboardExpand all lines: google/cloud/automl_v1beta1/services/prediction_service/async_client.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,7 @@ async def sample_batch_predict():
638638
# Done; return the response.
639639
return response
640640

641-
async def __aenter__(self):
641+
async def __aenter__(self) -> "PredictionServiceAsyncClient":
642642
return self
643643

644644
async def __aexit__(self, exc_type, exc, tb):

‎samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json

Copy file name to clipboardExpand all lines: samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
],
99
"language": "PYTHON",
1010
"name": "google-cloud-automl",
11-
"version": "2.11.1"
11+
"version": "0.1.0"
1212
},
1313
"snippets": [
1414
{

‎samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json

Copy file name to clipboardExpand all lines: samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
],
99
"language": "PYTHON",
1010
"name": "google-cloud-automl",
11-
"version": "2.11.1"
11+
"version": "0.1.0"
1212
},
1313
"snippets": [
1414
{

‎tests/unit/gapic/automl_v1/test_auto_ml.py

Copy file name to clipboardExpand all lines: tests/unit/gapic/automl_v1/test_auto_ml.py
+12-36Lines changed: 12 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -969,9 +969,6 @@ def test_get_dataset(request_type, transport: str = "grpc"):
969969
description="description_value",
970970
example_count=1396,
971971
etag="etag_value",
972-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
973-
source_language_code="source_language_code_value"
974-
),
975972
)
976973
response = client.get_dataset(request)
977974

@@ -1602,9 +1599,11 @@ async def test_list_datasets_async_pages():
16021599
RuntimeError,
16031600
)
16041601
pages = []
1605-
async for page_ in (
1602+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
1603+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
1604+
async for page_ in ( # pragma: no branch
16061605
await client.list_datasets(request={})
1607-
).pages: # pragma: no branch
1606+
).pages:
16081607
pages.append(page_)
16091608
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
16101609
assert page_.raw_page.next_page_token == token
@@ -1636,9 +1635,6 @@ def test_update_dataset(request_type, transport: str = "grpc"):
16361635
description="description_value",
16371636
example_count=1396,
16381637
etag="etag_value",
1639-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
1640-
source_language_code="source_language_code_value"
1641-
),
16421638
)
16431639
response = client.update_dataset(request)
16441640

@@ -3165,9 +3161,6 @@ def test_get_model(request_type, transport: str = "grpc"):
31653161
dataset_id="dataset_id_value",
31663162
deployment_state=model.Model.DeploymentState.DEPLOYED,
31673163
etag="etag_value",
3168-
translation_model_metadata=translation.TranslationModelMetadata(
3169-
base_model="base_model_value"
3170-
),
31713164
)
31723165
response = client.get_model(request)
31733166

@@ -3798,9 +3791,11 @@ async def test_list_models_async_pages():
37983791
RuntimeError,
37993792
)
38003793
pages = []
3801-
async for page_ in (
3794+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
3795+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
3796+
async for page_ in ( # pragma: no branch
38023797
await client.list_models(request={})
3803-
).pages: # pragma: no branch
3798+
).pages:
38043799
pages.append(page_)
38053800
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
38063801
assert page_.raw_page.next_page_token == token
@@ -4058,9 +4053,6 @@ def test_update_model(request_type, transport: str = "grpc"):
40584053
dataset_id="dataset_id_value",
40594054
deployment_state=gca_model.Model.DeploymentState.DEPLOYED,
40604055
etag="etag_value",
4061-
translation_model_metadata=translation.TranslationModelMetadata(
4062-
base_model="base_model_value"
4063-
),
40644056
)
40654057
response = client.update_model(request)
40664058

@@ -5052,9 +5044,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"):
50525044
annotation_spec_id="annotation_spec_id_value",
50535045
display_name="display_name_value",
50545046
evaluated_example_count=2446,
5055-
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
5056-
au_prc=0.634
5057-
),
50585047
)
50595048
response = client.get_model_evaluation(request)
50605049

@@ -5730,9 +5719,11 @@ async def test_list_model_evaluations_async_pages():
57305719
RuntimeError,
57315720
)
57325721
pages = []
5733-
async for page_ in (
5722+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
5723+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
5724+
async for page_ in ( # pragma: no branch
57345725
await client.list_model_evaluations(request={})
5735-
).pages: # pragma: no branch
5726+
).pages:
57365727
pages.append(page_)
57375728
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
57385729
assert page_.raw_page.next_page_token == token
@@ -6075,9 +6066,6 @@ def test_get_dataset_rest(request_type):
60756066
description="description_value",
60766067
example_count=1396,
60776068
etag="etag_value",
6078-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
6079-
source_language_code="source_language_code_value"
6080-
),
60816069
)
60826070

60836071
# Wrap the value into a proper Response obj
@@ -6698,9 +6686,6 @@ def test_update_dataset_rest(request_type):
66986686
description="description_value",
66996687
example_count=1396,
67006688
etag="etag_value",
6701-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
6702-
source_language_code="source_language_code_value"
6703-
),
67046689
)
67056690

67066691
# Wrap the value into a proper Response obj
@@ -8432,9 +8417,6 @@ def test_get_model_rest(request_type):
84328417
dataset_id="dataset_id_value",
84338418
deployment_state=model.Model.DeploymentState.DEPLOYED,
84348419
etag="etag_value",
8435-
translation_model_metadata=translation.TranslationModelMetadata(
8436-
base_model="base_model_value"
8437-
),
84388420
)
84398421

84408422
# Wrap the value into a proper Response obj
@@ -9328,9 +9310,6 @@ def test_update_model_rest(request_type):
93289310
dataset_id="dataset_id_value",
93299311
deployment_state=gca_model.Model.DeploymentState.DEPLOYED,
93309312
etag="etag_value",
9331-
translation_model_metadata=translation.TranslationModelMetadata(
9332-
base_model="base_model_value"
9333-
),
93349313
)
93359314

93369315
# Wrap the value into a proper Response obj
@@ -10447,9 +10426,6 @@ def test_get_model_evaluation_rest(request_type):
1044710426
annotation_spec_id="annotation_spec_id_value",
1044810427
display_name="display_name_value",
1044910428
evaluated_example_count=2446,
10450-
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
10451-
au_prc=0.634
10452-
),
1045310429
)
1045410430

1045510431
# Wrap the value into a proper Response obj

‎tests/unit/gapic/automl_v1beta1/test_auto_ml.py

Copy file name to clipboardExpand all lines: tests/unit/gapic/automl_v1beta1/test_auto_ml.py
+20-40Lines changed: 20 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -718,9 +718,6 @@ def test_create_dataset(request_type, transport: str = "grpc"):
718718
description="description_value",
719719
example_count=1396,
720720
etag="etag_value",
721-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
722-
source_language_code="source_language_code_value"
723-
),
724721
)
725722
response = client.create_dataset(request)
726723

@@ -999,9 +996,6 @@ def test_get_dataset(request_type, transport: str = "grpc"):
999996
description="description_value",
1000997
example_count=1396,
1001998
etag="etag_value",
1002-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
1003-
source_language_code="source_language_code_value"
1004-
),
1005999
)
10061000
response = client.get_dataset(request)
10071001

@@ -1632,9 +1626,11 @@ async def test_list_datasets_async_pages():
16321626
RuntimeError,
16331627
)
16341628
pages = []
1635-
async for page_ in (
1629+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
1630+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
1631+
async for page_ in ( # pragma: no branch
16361632
await client.list_datasets(request={})
1637-
).pages: # pragma: no branch
1633+
).pages:
16381634
pages.append(page_)
16391635
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
16401636
assert page_.raw_page.next_page_token == token
@@ -1666,9 +1662,6 @@ def test_update_dataset(request_type, transport: str = "grpc"):
16661662
description="description_value",
16671663
example_count=1396,
16681664
etag="etag_value",
1669-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
1670-
source_language_code="source_language_code_value"
1671-
),
16721665
)
16731666
response = client.update_dataset(request)
16741667

@@ -3563,9 +3556,11 @@ async def test_list_table_specs_async_pages():
35633556
RuntimeError,
35643557
)
35653558
pages = []
3566-
async for page_ in (
3559+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
3560+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
3561+
async for page_ in ( # pragma: no branch
35673562
await client.list_table_specs(request={})
3568-
).pages: # pragma: no branch
3563+
).pages:
35693564
pages.append(page_)
35703565
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
35713566
assert page_.raw_page.next_page_token == token
@@ -4511,9 +4506,11 @@ async def test_list_column_specs_async_pages():
45114506
RuntimeError,
45124507
)
45134508
pages = []
4514-
async for page_ in (
4509+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
4510+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
4511+
async for page_ in ( # pragma: no branch
45154512
await client.list_column_specs(request={})
4516-
).pages: # pragma: no branch
4513+
).pages:
45174514
pages.append(page_)
45184515
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
45194516
assert page_.raw_page.next_page_token == token
@@ -5058,9 +5055,6 @@ def test_get_model(request_type, transport: str = "grpc"):
50585055
display_name="display_name_value",
50595056
dataset_id="dataset_id_value",
50605057
deployment_state=model.Model.DeploymentState.DEPLOYED,
5061-
translation_model_metadata=translation.TranslationModelMetadata(
5062-
base_model="base_model_value"
5063-
),
50645058
)
50655059
response = client.get_model(request)
50665060

@@ -5688,9 +5682,11 @@ async def test_list_models_async_pages():
56885682
RuntimeError,
56895683
)
56905684
pages = []
5691-
async for page_ in (
5685+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
5686+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
5687+
async for page_ in ( # pragma: no branch
56925688
await client.list_models(request={})
5693-
).pages: # pragma: no branch
5689+
).pages:
56945690
pages.append(page_)
56955691
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
56965692
assert page_.raw_page.next_page_token == token
@@ -6931,9 +6927,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"):
69316927
annotation_spec_id="annotation_spec_id_value",
69326928
display_name="display_name_value",
69336929
evaluated_example_count=2446,
6934-
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
6935-
au_prc=0.634
6936-
),
69376930
)
69386931
response = client.get_model_evaluation(request)
69396932

@@ -7599,9 +7592,11 @@ async def test_list_model_evaluations_async_pages():
75997592
RuntimeError,
76007593
)
76017594
pages = []
7602-
async for page_ in (
7595+
# Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch`
7596+
# See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372
7597+
async for page_ in ( # pragma: no branch
76037598
await client.list_model_evaluations(request={})
7604-
).pages: # pragma: no branch
7599+
).pages:
76057600
pages.append(page_)
76067601
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
76077602
assert page_.raw_page.next_page_token == token
@@ -7660,9 +7655,6 @@ def test_create_dataset_rest(request_type):
76607655
description="description_value",
76617656
example_count=1396,
76627657
etag="etag_value",
7663-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
7664-
source_language_code="source_language_code_value"
7665-
),
76667658
)
76677659

76687660
# Wrap the value into a proper Response obj
@@ -7977,9 +7969,6 @@ def test_get_dataset_rest(request_type):
79777969
description="description_value",
79787970
example_count=1396,
79797971
etag="etag_value",
7980-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
7981-
source_language_code="source_language_code_value"
7982-
),
79837972
)
79847973

79857974
# Wrap the value into a proper Response obj
@@ -8611,9 +8600,6 @@ def test_update_dataset_rest(request_type):
86118600
description="description_value",
86128601
example_count=1396,
86138602
etag="etag_value",
8614-
translation_dataset_metadata=translation.TranslationDatasetMetadata(
8615-
source_language_code="source_language_code_value"
8616-
),
86178603
)
86188604

86198605
# Wrap the value into a proper Response obj
@@ -12373,9 +12359,6 @@ def test_get_model_rest(request_type):
1237312359
display_name="display_name_value",
1237412360
dataset_id="dataset_id_value",
1237512361
deployment_state=model.Model.DeploymentState.DEPLOYED,
12376-
translation_model_metadata=translation.TranslationModelMetadata(
12377-
base_model="base_model_value"
12378-
),
1237912362
)
1238012363

1238112364
# Wrap the value into a proper Response obj
@@ -14307,9 +14290,6 @@ def test_get_model_evaluation_rest(request_type):
1430714290
annotation_spec_id="annotation_spec_id_value",
1430814291
display_name="display_name_value",
1430914292
evaluated_example_count=2446,
14310-
classification_evaluation_metrics=classification.ClassificationEvaluationMetrics(
14311-
au_prc=0.634
14312-
),
1431314293
)
1431414294

1431514295
# Wrap the value into a proper Response obj

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.