Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit f329eef

Browse filesBrowse files
authored
Examples and tests for previous_response_id (openai#512)
Examples + tests
1 parent b978b43 commit f329eef
Copy full SHA for f329eef

File tree

3 files changed

+158
-0
lines changed
Filter options

3 files changed

+158
-0
lines changed
+66Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import asyncio
2+
3+
from agents import Agent, Runner
4+
5+
"""This demonstrates usage of the `previous_response_id` parameter to continue a conversation.
6+
The second run passes the previous response ID to the model, which allows it to continue the
7+
conversation without re-sending the previous messages.
8+
9+
Notes:
10+
1. This only applies to the OpenAI Responses API. Other models will ignore this parameter.
11+
2. Responses are only stored for 30 days as of this writing, so in production you should
12+
store the response ID along with an expiration date; if the response is no longer valid,
13+
you'll need to re-send the previous conversation history.
14+
"""
15+
16+
17+
async def main():
18+
agent = Agent(
19+
name="Assistant",
20+
instructions="You are a helpful assistant. be VERY concise.",
21+
)
22+
23+
result = await Runner.run(agent, "What is the largest country in South America?")
24+
print(result.final_output)
25+
# Brazil
26+
27+
result = await Runner.run(
28+
agent,
29+
"What is the capital of that country?",
30+
previous_response_id=result.last_response_id,
31+
)
32+
print(result.final_output)
33+
# Brasilia
34+
35+
36+
async def main_stream():
37+
agent = Agent(
38+
name="Assistant",
39+
instructions="You are a helpful assistant. be VERY concise.",
40+
)
41+
42+
result = Runner.run_streamed(agent, "What is the largest country in South America?")
43+
44+
async for event in result.stream_events():
45+
if event.type == "raw_response_event" and event.data.type == "response.output_text.delta":
46+
print(event.data.delta, end="", flush=True)
47+
48+
print()
49+
50+
result = Runner.run_streamed(
51+
agent,
52+
"What is the capital of that country?",
53+
previous_response_id=result.last_response_id,
54+
)
55+
56+
async for event in result.stream_events():
57+
if event.type == "raw_response_event" and event.data.type == "response.output_text.delta":
58+
print(event.data.delta, end="", flush=True)
59+
60+
61+
if __name__ == "__main__":
62+
is_stream = input("Run in stream mode? (y/n): ")
63+
if is_stream == "y":
64+
asyncio.run(main_stream())
65+
else:
66+
asyncio.run(main())

‎tests/fake_model.py

Copy file name to clipboardExpand all lines: tests/fake_model.py
+9Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ async def get_response(
6363
"model_settings": model_settings,
6464
"tools": tools,
6565
"output_schema": output_schema,
66+
"previous_response_id": previous_response_id,
6667
}
6768

6869
with generation_span(disabled=not self.tracing_enabled) as span:
@@ -98,6 +99,14 @@ async def stream_response(
9899
*,
99100
previous_response_id: str | None,
100101
) -> AsyncIterator[TResponseStreamEvent]:
102+
self.last_turn_args = {
103+
"system_instructions": system_instructions,
104+
"input": input,
105+
"model_settings": model_settings,
106+
"tools": tools,
107+
"output_schema": output_schema,
108+
"previous_response_id": previous_response_id,
109+
}
101110
with generation_span(disabled=not self.tracing_enabled) as span:
102111
output = self.get_next_output()
103112
if isinstance(output, Exception):

‎tests/test_agent_runner.py

Copy file name to clipboardExpand all lines: tests/test_agent_runner.py
+83Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -662,3 +662,86 @@ async def test_model_settings_override():
662662
# temperature is overridden by Runner.run, but max_tokens is not
663663
assert model.last_turn_args["model_settings"].temperature == 0.5
664664
assert model.last_turn_args["model_settings"].max_tokens == 1000
665+
666+
667+
@pytest.mark.asyncio
668+
async def test_previous_response_id_passed_between_runs():
669+
"""Test that previous_response_id is passed to the model on subsequent runs."""
670+
model = FakeModel()
671+
model.set_next_output([get_text_message("done")])
672+
agent = Agent(name="test", model=model)
673+
674+
assert model.last_turn_args.get("previous_response_id") is None
675+
await Runner.run(agent, input="test", previous_response_id="resp-non-streamed-test")
676+
assert model.last_turn_args.get("previous_response_id") == "resp-non-streamed-test"
677+
678+
679+
@pytest.mark.asyncio
680+
async def test_multi_turn_previous_response_id_passed_between_runs():
681+
"""Test that previous_response_id is passed to the model on subsequent runs."""
682+
683+
model = FakeModel()
684+
agent = Agent(
685+
name="test",
686+
model=model,
687+
tools=[get_function_tool("foo", "tool_result")],
688+
)
689+
690+
model.add_multiple_turn_outputs(
691+
[
692+
# First turn: a message and tool call
693+
[get_text_message("a_message"), get_function_tool_call("foo", json.dumps({"a": "b"}))],
694+
# Second turn: text message
695+
[get_text_message("done")],
696+
]
697+
)
698+
699+
assert model.last_turn_args.get("previous_response_id") is None
700+
await Runner.run(agent, input="test", previous_response_id="resp-test-123")
701+
assert model.last_turn_args.get("previous_response_id") == "resp-test-123"
702+
703+
704+
@pytest.mark.asyncio
705+
async def test_previous_response_id_passed_between_runs_streamed():
706+
"""Test that previous_response_id is passed to the model on subsequent streamed runs."""
707+
model = FakeModel()
708+
model.set_next_output([get_text_message("done")])
709+
agent = Agent(
710+
name="test",
711+
model=model,
712+
)
713+
714+
assert model.last_turn_args.get("previous_response_id") is None
715+
result = Runner.run_streamed(agent, input="test", previous_response_id="resp-stream-test")
716+
async for _ in result.stream_events():
717+
pass
718+
719+
assert model.last_turn_args.get("previous_response_id") == "resp-stream-test"
720+
721+
722+
@pytest.mark.asyncio
723+
async def test_previous_response_id_passed_between_runs_streamed_multi_turn():
724+
"""Test that previous_response_id is passed to the model on subsequent streamed runs."""
725+
726+
model = FakeModel()
727+
agent = Agent(
728+
name="test",
729+
model=model,
730+
tools=[get_function_tool("foo", "tool_result")],
731+
)
732+
733+
model.add_multiple_turn_outputs(
734+
[
735+
# First turn: a message and tool call
736+
[get_text_message("a_message"), get_function_tool_call("foo", json.dumps({"a": "b"}))],
737+
# Second turn: text message
738+
[get_text_message("done")],
739+
]
740+
)
741+
742+
assert model.last_turn_args.get("previous_response_id") is None
743+
result = Runner.run_streamed(agent, input="test", previous_response_id="resp-stream-test")
744+
async for _ in result.stream_events():
745+
pass
746+
747+
assert model.last_turn_args.get("previous_response_id") == "resp-stream-test"

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.