Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 74bcfbd

Browse filesBrowse files
feat(generative-ai): Add code samples for Gemini models (GoogleCloudPlatform#11016)
* feat(generative_ai): Add code samples for Gemini models * feat(generative_ai): Add Gemini sample for step by step user Guide * feat(generative_ai): Add Gemini sample for step by step user Guide * feat(generative_ai): Add Gemini sample for step by step user Guide * feat(generative_ai): Add Gemini sample for Multi-image example * feat(generative_ai): Add Gemini sample for single turn video example * feat(generative_ai): Add Gemini sample for multiturn chat * feat(generative_ai): Add Gemini sample for safety settings * feat(generative_ai): Add Gemini sample for count token * feat(generative_ai): Add Gemini sample's test cases * fix(generative_ai): lint suggestions * fix(generative_ai): fix test cases
1 parent a78dae5 commit 74bcfbd
Copy full SHA for 74bcfbd
Expand file treeCollapse file tree

8 files changed

+418
-1
lines changed

‎generative_ai/gemini_chat_example.py

Copy file name to clipboard
+73Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
def chat_text_example(project_id: str, location: str) -> str:
17+
# [START aiplatform_gemini_multiturn_chat]
18+
import vertexai
19+
from vertexai.preview.generative_models import GenerativeModel, ChatSession
20+
21+
# TODO(developer): Update and un-comment below lines
22+
# project_id = "PROJECT_ID"
23+
# location = "us-central1"
24+
vertexai.init(project=project_id, location=location)
25+
26+
model = GenerativeModel("gemini-pro")
27+
chat = model.start_chat()
28+
29+
def get_chat_response(chat: ChatSession, prompt: str) -> str:
30+
response = chat.send_message(prompt)
31+
return response.text
32+
33+
prompt = "Hello."
34+
print(get_chat_response(chat, prompt))
35+
36+
prompt = "What are all the colors in a rainbow?"
37+
print(get_chat_response(chat, prompt))
38+
39+
prompt = "Why does it appear when it rains?"
40+
print(get_chat_response(chat, prompt))
41+
# [END aiplatform_gemini_multiturn_chat]
42+
return get_chat_response(chat, "Hello")
43+
44+
45+
def chat_stream_example(project_id: str, location: str) -> str:
46+
# [START aiplatform_gemini_multiturn_chat_stream]
47+
import vertexai
48+
from vertexai.preview.generative_models import GenerativeModel, ChatSession
49+
50+
# TODO(developer): Update and un-comment below lines
51+
# project_id = "PROJECT_ID"
52+
# location = "us-central1"
53+
vertexai.init(project=project_id, location=location)
54+
model = GenerativeModel("gemini-pro")
55+
chat = model.start_chat()
56+
57+
def get_chat_response(chat: ChatSession, prompt: str) -> str:
58+
text_response = []
59+
responses = chat.send_message(prompt, stream=True)
60+
for chunk in responses:
61+
text_response.append(chunk.text)
62+
return "".join(text_response)
63+
64+
prompt = "Hello."
65+
print(get_chat_response(chat, prompt))
66+
67+
prompt = "What are all the colors in a rainbow?"
68+
print(get_chat_response(chat, prompt))
69+
70+
prompt = "Why does it appear when it rains?"
71+
print(get_chat_response(chat, prompt))
72+
# [END aiplatform_gemini_multiturn_chat_stream]
73+
return get_chat_response(chat, "Hello")
+39Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
# [START aiplatform_gemini_token_count]
17+
import vertexai
18+
from vertexai.preview.generative_models import GenerativeModel
19+
20+
21+
def generate_text(project_id: str, location: str) -> str:
22+
# Initialize Vertex AI
23+
vertexai.init(project=project_id, location=location)
24+
25+
# Load the model
26+
model = GenerativeModel("gemini-pro")
27+
28+
# prompt tokens count
29+
print(model.count_tokens("why is sky blue?"))
30+
31+
# Load example images
32+
response = model.generate_content("why is sky blue?")
33+
34+
# response tokens count
35+
print(response._raw_response.usage_metadata)
36+
return response.text
37+
38+
39+
# [END aiplatform_gemini_token_count]

‎generative_ai/gemini_guide_example.py

Copy file name to clipboard
+57Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# [START aiplatform_gemini_get_started]
16+
# [START aiplatform_gemini_guide_step1]
17+
# TODO(developer): Vertex AI SDK - uncomment below & run
18+
# pip3 install --upgrade --user google-cloud-aiplatform
19+
# gcloud auth application-default login
20+
# [END aiplatform_gemini_guide_step1]
21+
22+
23+
def generate_text(project_id: str, location: str) -> str:
24+
# [START aiplatform_gemini_guide_step2]
25+
# Initialize Vertex AI
26+
import vertexai
27+
28+
# TODO(developer): Update and un-comment below lines
29+
# project_id = "PROJECT_ID"
30+
# location = "us-central1"
31+
32+
vertexai.init(project=project_id, location=location)
33+
# [END aiplatform_gemini_guide_step2]
34+
35+
# [START aiplatform_gemini_guide_step3]
36+
from vertexai.preview.generative_models import GenerativeModel, Part
37+
38+
# [END aiplatform_gemini_guide_step3]
39+
40+
# [START aiplatform_gemini_guide_step4]
41+
multimodal_model = GenerativeModel("gemini-pro-vision")
42+
# [END aiplatform_gemini_guide_step4]
43+
44+
# [START aiplatform_gemini_guide_step5]
45+
response = multimodal_model.generate_content(
46+
[
47+
"what is shown in this image?",
48+
Part.from_uri(
49+
"gs://generativeai-downloads/images/scones.jpg", mime_type="image/jpeg"
50+
),
51+
]
52+
)
53+
print(response)
54+
# [END aiplatform_gemini_guide_step5]
55+
return response.text
56+
57+
# [END aiplatform_gemini_get_started]
+49Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import vertexai
16+
17+
18+
def generate_text_multimodal(project_id: str, location: str) -> str:
19+
# Initialize Vertex AI
20+
vertexai.init(project=project_id, location=location)
21+
22+
# [START aiplatform_gemini_single_turn_multi_image]
23+
import http.client
24+
import typing
25+
import urllib.request
26+
from vertexai.preview.generative_models import GenerativeModel, Image
27+
28+
# create helper function
29+
def load_image_from_url(image_url: str) -> Image:
30+
with urllib.request.urlopen(image_url) as response:
31+
response = typing.cast(http.client.HTTPResponse, response)
32+
image_bytes = response.read()
33+
return Image.from_bytes(image_bytes)
34+
35+
# Load images from Cloud Storage URI
36+
landmark1 = load_image_from_url("https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark1.png")
37+
landmark2 = load_image_from_url("https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark2.png")
38+
landmark3 = load_image_from_url("https://storage.googleapis.com/cloud-samples-data/vertex-ai/llm/prompts/landmark3.png")
39+
40+
# Pass multimodal prompt
41+
model = GenerativeModel("gemini-pro-vision")
42+
response = model.generate_content(
43+
[landmark1, "city: Rome, Landmark: the Colosseum",
44+
landmark2, "city: Beijing, Landmark: Forbidden City",
45+
landmark3, ]
46+
)
47+
print(response)
48+
# [END aiplatform_gemini_single_turn_multi_image]
49+
return response.text
+58Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import vertexai
16+
17+
# [START aiplatform_gemini_safety_settings]
18+
from vertexai.preview import generative_models
19+
20+
21+
def generate_text(project_id: str, location: str, image: str) -> str:
22+
# Initialize Vertex AI
23+
vertexai.init(project=project_id, location=location)
24+
25+
# Load the model
26+
model = generative_models.GenerativeModel("gemini-pro-vision")
27+
28+
# Generation config
29+
config = {"max_output_tokens": 2048, "temperature": 0.4, "top_p": 1, "top_k": 32}
30+
31+
# Safety config
32+
safety_config = {
33+
generative_models.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: generative_models.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
34+
generative_models.HarmCategory.HARM_CATEGORY_HARASSMENT: generative_models.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
35+
}
36+
37+
# Generate content
38+
responses = model.generate_content(
39+
[image, "Add your prompt here"],
40+
generation_config=config,
41+
stream=True,
42+
safety_settings=safety_config,
43+
)
44+
45+
text_responses = []
46+
for response in responses:
47+
print(response.text)
48+
text_responses.append(response.text)
49+
return "".join(text_responses)
50+
51+
# [END aiplatform_gemini_safety_settings]
52+
53+
54+
# if __name__ == '__main__':
55+
# import base64
56+
# base64_image_data = base64.b64encode(open('scones.jpg', 'rb').read()).decode("utf-8")
57+
# image = generative_models.Part.from_data(data=base64.b64decode(base64_image_data), mime_type="image/png")
58+
# generate_text(image)
+40Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# Copyright 2023 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# https://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
16+
# [START aiplatform_gemini_single_turn_video]
17+
import vertexai
18+
19+
from vertexai.preview.generative_models import GenerativeModel, Part
20+
21+
22+
def generate_text(project_id: str, location: str) -> str:
23+
# Initialize Vertex AI
24+
vertexai.init(project=project_id, location=location)
25+
# Load the model
26+
vision_model = GenerativeModel("gemini-pro-vision")
27+
# Generate text
28+
response = vision_model.generate_content(
29+
[
30+
"What is in the video?",
31+
Part.from_uri(
32+
"gs://cloud-samples-data/video/animals.mp4", mime_type="video/mp4"
33+
),
34+
]
35+
)
36+
print(response)
37+
return response.text
38+
39+
40+
# [END aiplatform_gemini_single_turn_video]

‎generative_ai/requirements.txt

Copy file name to clipboard
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
pandas==1.3.5; python_version == '3.7'
22
pandas==2.0.1; python_version > '3.7'
3-
google-cloud-aiplatform[pipelines]==1.31.0
3+
google-cloud-aiplatform[pipelines]==1.38.0
44
google-auth==2.17.3

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.