diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 36925cfe97..73077f4afb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.79.0" + ".": "1.80.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index afa33d93bd..4b4f19c91f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml -openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b -config_hash: d8d5fda350f6db77c784f35429741a2e +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a5651cb97f86d1e2531af6aef8c5230f1ea350560fbae790ca2e481b30a6c217.yml +openapi_spec_hash: 66a5104fd3bb43383cf919225df7a6fd +config_hash: bb657c3fed232a56930035de3aaed936 diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ec3e61533..6517b7d1b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.80.0 (2025-05-21) + +Full Changelog: [v1.79.0...v1.80.0](https://github.com/openai/openai-python/compare/v1.79.0...v1.80.0) + +### Features + +* **api:** new API tools ([d36ae52](https://github.com/openai/openai-python/commit/d36ae528d55fe87067c4b8c6b2c947cbad5e5002)) + + +### Chores + +* **docs:** grammar improvements ([e746145](https://github.com/openai/openai-python/commit/e746145a12b5335d8841aff95c91bbbde8bae8e3)) + ## 1.79.0 (2025-05-16) Full Changelog: [v1.78.1...v1.79.0](https://github.com/openai/openai-python/compare/v1.78.1...v1.79.0) diff --git a/SECURITY.md b/SECURITY.md index 3b3bd8a662..4adb0c54f1 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,13 +16,13 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by OpenAI please follow the respective company's security reporting guidelines. +or products provided by OpenAI, please follow the respective company's security reporting guidelines. ### OpenAI Terms and Policies Our Security Policy can be found at [Security Policy URL](https://openai.com/policies/coordinated-vulnerability-disclosure-policy). -Please contact disclosure@openai.com for any questions or concerns regarding security of our services. +Please contact disclosure@openai.com for any questions or concerns regarding the security of our services. --- diff --git a/api.md b/api.md index 496e5548b3..4eb3c09c66 100644 --- a/api.md +++ b/api.md @@ -717,6 +717,10 @@ from openai.types.responses import ( ResponseFunctionToolCallItem, ResponseFunctionToolCallOutputItem, ResponseFunctionWebSearch, + ResponseImageGenCallCompletedEvent, + ResponseImageGenCallGeneratingEvent, + ResponseImageGenCallInProgressEvent, + ResponseImageGenCallPartialImageEvent, ResponseInProgressEvent, ResponseIncludable, ResponseIncompleteEvent, @@ -730,6 +734,14 @@ from openai.types.responses import ( ResponseInputMessageItem, ResponseInputText, ResponseItem, + ResponseMcpCallArgumentsDeltaEvent, + ResponseMcpCallArgumentsDoneEvent, + ResponseMcpCallCompletedEvent, + ResponseMcpCallFailedEvent, + ResponseMcpCallInProgressEvent, + ResponseMcpListToolsCompletedEvent, + ResponseMcpListToolsFailedEvent, + ResponseMcpListToolsInProgressEvent, ResponseOutputAudio, ResponseOutputItem, ResponseOutputItemAddedEvent, @@ -737,7 +749,13 @@ from openai.types.responses import ( ResponseOutputMessage, ResponseOutputRefusal, ResponseOutputText, + ResponseOutputTextAnnotationAddedEvent, + ResponseQueuedEvent, + ResponseReasoningDeltaEvent, + ResponseReasoningDoneEvent, ResponseReasoningItem, + ResponseReasoningSummaryDeltaEvent, + ResponseReasoningSummaryDoneEvent, ResponseReasoningSummaryPartAddedEvent, ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, diff --git a/pyproject.toml b/pyproject.toml index 5affe3c483..3c3d246a18 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.79.0" +version = "1.80.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 641c3a7a72..f5621f92a7 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,7 +59,7 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith('transcript.'): + if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): data = sse.json() if is_mapping(data) and data.get("error"): message = None @@ -161,7 +161,7 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith('transcript.'): + if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): data = sse.json() if is_mapping(data) and data.get("error"): message = None diff --git a/src/openai/_version.py b/src/openai/_version.py index 77c73cdfd9..7bf2bbc038 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.79.0" # x-release-please-version +__version__ = "1.80.0" # x-release-please-version diff --git a/src/openai/helpers/local_audio_player.py b/src/openai/helpers/local_audio_player.py index eed68aa21d..8f12c27a56 100644 --- a/src/openai/helpers/local_audio_player.py +++ b/src/openai/helpers/local_audio_player.py @@ -65,7 +65,7 @@ async def play( if input.dtype == np.int16 and self.dtype == np.float32: audio_content = (input.astype(np.float32) / 32767.0).reshape(-1, self.channels) elif input.dtype == np.float32: - audio_content = cast('npt.NDArray[np.float32]', input) + audio_content = cast("npt.NDArray[np.float32]", input) else: raise ValueError(f"Unsupported dtype: {input.dtype}") else: diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index a189dcf937..235f912405 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -103,6 +103,13 @@ def parse_response( or output.type == "file_search_call" or output.type == "web_search_call" or output.type == "reasoning" + or output.type == "mcp_call" + or output.type == "mcp_approval_request" + or output.type == "image_generation_call" + or output.type == "code_interpreter_call" + or output.type == "local_shell_call" + or output.type == "mcp_list_tools" + or output.type == 'exec' ): output_list.append(output) elif TYPE_CHECKING: # type: ignore diff --git a/src/openai/lib/streaming/responses/_events.py b/src/openai/lib/streaming/responses/_events.py index 0cdc5992ee..09b84488b5 100644 --- a/src/openai/lib/streaming/responses/_events.py +++ b/src/openai/lib/streaming/responses/_events.py @@ -9,6 +9,7 @@ ParsedResponse, ResponseErrorEvent, ResponseFailedEvent, + ResponseQueuedEvent, ResponseCreatedEvent, ResponseTextDoneEvent as RawResponseTextDoneEvent, ResponseAudioDoneEvent, @@ -19,22 +20,39 @@ ResponseInProgressEvent, ResponseRefusalDoneEvent, ResponseRefusalDeltaEvent, + ResponseMcpCallFailedEvent, + ResponseReasoningDoneEvent, ResponseOutputItemDoneEvent, + ResponseReasoningDeltaEvent, ResponseContentPartDoneEvent, ResponseOutputItemAddedEvent, ResponseContentPartAddedEvent, + ResponseMcpCallCompletedEvent, + ResponseMcpCallInProgressEvent, + ResponseMcpListToolsFailedEvent, ResponseAudioTranscriptDoneEvent, ResponseTextAnnotationDeltaEvent, ResponseAudioTranscriptDeltaEvent, + ResponseMcpCallArgumentsDoneEvent, + ResponseReasoningSummaryDoneEvent, + ResponseImageGenCallCompletedEvent, + ResponseMcpCallArgumentsDeltaEvent, + ResponseMcpListToolsCompletedEvent, + ResponseReasoningSummaryDeltaEvent, + ResponseImageGenCallGeneratingEvent, + ResponseImageGenCallInProgressEvent, + ResponseMcpListToolsInProgressEvent, ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallSearchingEvent, ResponseFileSearchCallCompletedEvent, ResponseFileSearchCallSearchingEvent, ResponseWebSearchCallInProgressEvent, ResponseFileSearchCallInProgressEvent, + ResponseImageGenCallPartialImageEvent, ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDoneEvent, ResponseFunctionCallArgumentsDoneEvent, + ResponseOutputTextAnnotationAddedEvent, ResponseReasoningSummaryPartAddedEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, @@ -109,6 +127,24 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseImageGenCallCompletedEvent, + ResponseImageGenCallInProgressEvent, + ResponseImageGenCallGeneratingEvent, + ResponseImageGenCallPartialImageEvent, + ResponseMcpCallCompletedEvent, + ResponseMcpCallArgumentsDeltaEvent, + ResponseMcpCallArgumentsDoneEvent, + ResponseMcpCallFailedEvent, + ResponseMcpCallInProgressEvent, + ResponseMcpListToolsCompletedEvent, + ResponseMcpListToolsFailedEvent, + ResponseMcpListToolsInProgressEvent, + ResponseOutputTextAnnotationAddedEvent, + ResponseQueuedEvent, + ResponseReasoningDeltaEvent, + ResponseReasoningSummaryDeltaEvent, + ResponseReasoningSummaryDoneEvent, + ResponseReasoningDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index bca8210a83..208f6e8b05 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -449,7 +449,7 @@ async def create( extra_headers: Send extra headers extra_query: Add additional query parameters to the request - """ + """ @overload async def create( diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index a905bc34b1..ad9576983f 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -77,6 +77,7 @@ def create( *, input: Union[str, ResponseInputParam], model: ResponsesModel, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -132,6 +133,9 @@ def create( [model guide](https://platform.openai.com/docs/models) to browse and compare available models. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -267,6 +271,7 @@ def create( input: Union[str, ResponseInputParam], model: ResponsesModel, stream: Literal[True], + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -328,6 +333,9 @@ def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -456,6 +464,7 @@ def create( input: Union[str, ResponseInputParam], model: ResponsesModel, stream: bool, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -517,6 +526,9 @@ def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -644,6 +656,7 @@ def create( *, input: Union[str, ResponseInputParam], model: ResponsesModel, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -674,6 +687,7 @@ def create( { "input": input, "model": model, + "background": background, "include": include, "instructions": instructions, "max_output_tokens": max_output_tokens, @@ -965,6 +979,7 @@ async def create( *, input: Union[str, ResponseInputParam], model: ResponsesModel, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1020,6 +1035,9 @@ async def create( [model guide](https://platform.openai.com/docs/models) to browse and compare available models. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -1155,6 +1173,7 @@ async def create( input: Union[str, ResponseInputParam], model: ResponsesModel, stream: Literal[True], + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1216,6 +1235,9 @@ async def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -1344,6 +1366,7 @@ async def create( input: Union[str, ResponseInputParam], model: ResponsesModel, stream: bool, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1405,6 +1428,9 @@ async def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + background: Whether to run the model response in the background. + [Learn more](https://platform.openai.com/docs/guides/background). + include: Specify additional output data to include in the model response. Currently supported values are: @@ -1532,6 +1558,7 @@ async def create( *, input: Union[str, ResponseInputParam], model: ResponsesModel, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, @@ -1562,6 +1589,7 @@ async def create( { "input": input, "model": model, + "background": background, "include": include, "instructions": instructions, "max_output_tokens": max_output_tokens, diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 22fd2a0802..5cb00904f7 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -38,6 +38,7 @@ from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent +from .response_queued_event import ResponseQueuedEvent as ResponseQueuedEvent from .response_stream_event import ResponseStreamEvent as ResponseStreamEvent from .web_search_tool_param import WebSearchToolParam as WebSearchToolParam from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam @@ -75,8 +76,11 @@ from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam +from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall +from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent +from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem @@ -85,15 +89,27 @@ from .response_content_part_added_event import ResponseContentPartAddedEvent as ResponseContentPartAddedEvent from .response_format_text_config_param import ResponseFormatTextConfigParam as ResponseFormatTextConfigParam from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam +from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList +from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam +from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent as ResponseTextAnnotationDeltaEvent from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) +from .response_reasoning_summary_done_event import ( + ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent, +) +from .response_mcp_call_arguments_done_event import ( + ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent, +) +from .response_reasoning_summary_delta_event import ( + ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent, +) from .response_computer_tool_call_output_item import ( ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, ) @@ -103,21 +119,42 @@ from .response_function_tool_call_output_item import ( ResponseFunctionToolCallOutputItem as ResponseFunctionToolCallOutputItem, ) +from .response_image_gen_call_completed_event import ( + ResponseImageGenCallCompletedEvent as ResponseImageGenCallCompletedEvent, +) +from .response_mcp_call_arguments_delta_event import ( + ResponseMcpCallArgumentsDeltaEvent as ResponseMcpCallArgumentsDeltaEvent, +) +from .response_mcp_list_tools_completed_event import ( + ResponseMcpListToolsCompletedEvent as ResponseMcpListToolsCompletedEvent, +) +from .response_image_gen_call_generating_event import ( + ResponseImageGenCallGeneratingEvent as ResponseImageGenCallGeneratingEvent, +) from .response_web_search_call_completed_event import ( ResponseWebSearchCallCompletedEvent as ResponseWebSearchCallCompletedEvent, ) from .response_web_search_call_searching_event import ( ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent, ) +from .response_code_interpreter_tool_call_param import ( + ResponseCodeInterpreterToolCallParam as ResponseCodeInterpreterToolCallParam, +) from .response_file_search_call_completed_event import ( ResponseFileSearchCallCompletedEvent as ResponseFileSearchCallCompletedEvent, ) from .response_file_search_call_searching_event import ( ResponseFileSearchCallSearchingEvent as ResponseFileSearchCallSearchingEvent, ) +from .response_image_gen_call_in_progress_event import ( + ResponseImageGenCallInProgressEvent as ResponseImageGenCallInProgressEvent, +) from .response_input_message_content_list_param import ( ResponseInputMessageContentListParam as ResponseInputMessageContentListParam, ) +from .response_mcp_list_tools_in_progress_event import ( + ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent, +) from .response_reasoning_summary_part_done_event import ( ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, ) @@ -133,6 +170,12 @@ from .response_function_call_arguments_done_event import ( ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, ) +from .response_image_gen_call_partial_image_event import ( + ResponseImageGenCallPartialImageEvent as ResponseImageGenCallPartialImageEvent, +) +from .response_output_text_annotation_added_event import ( + ResponseOutputTextAnnotationAddedEvent as ResponseOutputTextAnnotationAddedEvent, +) from .response_reasoning_summary_part_added_event import ( ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent, ) diff --git a/src/openai/types/responses/parsed_response.py b/src/openai/types/responses/parsed_response.py index 1263dfd648..923e9debba 100644 --- a/src/openai/types/responses/parsed_response.py +++ b/src/openai/types/responses/parsed_response.py @@ -7,6 +7,14 @@ from .response import Response from ..._models import GenericModel from ..._utils._transform import PropertyInfo +from .response_output_item import ( + McpCall, + McpListTools, + LocalShellCall, + McpApprovalRequest, + ImageGenerationCall, + LocalShellCallAction, +) from .response_output_text import ResponseOutputText from .response_output_message import ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal @@ -15,6 +23,7 @@ from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall __all__ = ["ParsedResponse", "ParsedResponseOutputMessage", "ParsedResponseOutputText"] @@ -55,6 +64,13 @@ class ParsedResponseFunctionToolCall(ResponseFunctionToolCall): ResponseFunctionWebSearch, ResponseComputerToolCall, ResponseReasoningItem, + McpCall, + McpApprovalRequest, + ImageGenerationCall, + LocalShellCall, + LocalShellCallAction, + McpListTools, + ResponseCodeInterpreterToolCall, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 254f7e204b..14656f5aec 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -128,6 +128,12 @@ class Response(BaseModel): We generally recommend altering this or `temperature` but not both. """ + background: Optional[bool] = None + """Whether to run the model response in the background. + + [Learn more](https://platform.openai.com/docs/guides/background). + """ + max_output_tokens: Optional[int] = None """ An upper bound for the number of tokens that can be generated for a response, @@ -173,7 +179,8 @@ class Response(BaseModel): status: Optional[ResponseStatus] = None """The status of the response generation. - One of `completed`, `failed`, `in_progress`, or `incomplete`. + One of `completed`, `failed`, `in_progress`, `cancelled`, `queued`, or + `incomplete`. """ text: Optional[ResponseTextConfig] = None diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index d5a5057074..762542f398 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo @@ -50,3 +50,6 @@ class ResponseCodeInterpreterToolCall(BaseModel): type: Literal["code_interpreter_call"] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" + + container_id: Optional[str] = None + """The ID of the container used to run the code.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py new file mode 100644 index 0000000000..be0f909a6a --- /dev/null +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ResponseCodeInterpreterToolCallParam", "Result", "ResultLogs", "ResultFiles", "ResultFilesFile"] + + +class ResultLogs(TypedDict, total=False): + logs: Required[str] + """The logs of the code interpreter tool call.""" + + type: Required[Literal["logs"]] + """The type of the code interpreter text output. Always `logs`.""" + + +class ResultFilesFile(TypedDict, total=False): + file_id: Required[str] + """The ID of the file.""" + + mime_type: Required[str] + """The MIME type of the file.""" + + +class ResultFiles(TypedDict, total=False): + files: Required[Iterable[ResultFilesFile]] + + type: Required[Literal["files"]] + """The type of the code interpreter file output. Always `files`.""" + + +Result: TypeAlias = Union[ResultLogs, ResultFiles] + + +class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the code interpreter tool call.""" + + code: Required[str] + """The code to run.""" + + results: Required[Iterable[Result]] + """The results of the code interpreter tool call.""" + + status: Required[Literal["in_progress", "interpreting", "completed"]] + """The status of the code interpreter tool call.""" + + type: Required[Literal["code_interpreter_call"]] + """The type of the code interpreter tool call. Always `code_interpreter_call`.""" + + container_id: str + """The ID of the container used to run the code.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 972d413926..d7bb5817c2 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -46,6 +46,12 @@ class ResponseCreateParamsBase(TypedDict, total=False): available models. """ + background: Optional[bool] + """Whether to run the model response in the background. + + [Learn more](https://platform.openai.com/docs/guides/background). + """ + include: Optional[List[ResponseIncludable]] """Specify additional output data to include in the model response. diff --git a/src/openai/types/responses/response_image_gen_call_completed_event.py b/src/openai/types/responses/response_image_gen_call_completed_event.py new file mode 100644 index 0000000000..fd499f909e --- /dev/null +++ b/src/openai/types/responses/response_image_gen_call_completed_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseImageGenCallCompletedEvent"] + + +class ResponseImageGenCallCompletedEvent(BaseModel): + item_id: str + """The unique identifier of the image generation item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.image_generation_call.completed"] + """The type of the event. Always 'response.image_generation_call.completed'.""" diff --git a/src/openai/types/responses/response_image_gen_call_generating_event.py b/src/openai/types/responses/response_image_gen_call_generating_event.py new file mode 100644 index 0000000000..6e7e3efe5c --- /dev/null +++ b/src/openai/types/responses/response_image_gen_call_generating_event.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseImageGenCallGeneratingEvent"] + + +class ResponseImageGenCallGeneratingEvent(BaseModel): + item_id: str + """The unique identifier of the image generation item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.image_generation_call.generating"] + """The type of the event. Always 'response.image_generation_call.generating'.""" + + sequence_number: Optional[int] = None + """The sequence number of the image generation item being processed.""" diff --git a/src/openai/types/responses/response_image_gen_call_in_progress_event.py b/src/openai/types/responses/response_image_gen_call_in_progress_event.py new file mode 100644 index 0000000000..b36ff5fa47 --- /dev/null +++ b/src/openai/types/responses/response_image_gen_call_in_progress_event.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseImageGenCallInProgressEvent"] + + +class ResponseImageGenCallInProgressEvent(BaseModel): + item_id: str + """The unique identifier of the image generation item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + sequence_number: int + """The sequence number of the image generation item being processed.""" + + type: Literal["response.image_generation_call.in_progress"] + """The type of the event. Always 'response.image_generation_call.in_progress'.""" diff --git a/src/openai/types/responses/response_image_gen_call_partial_image_event.py b/src/openai/types/responses/response_image_gen_call_partial_image_event.py new file mode 100644 index 0000000000..e69c95fb33 --- /dev/null +++ b/src/openai/types/responses/response_image_gen_call_partial_image_event.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseImageGenCallPartialImageEvent"] + + +class ResponseImageGenCallPartialImageEvent(BaseModel): + item_id: str + """The unique identifier of the image generation item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + partial_image_b64: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + partial_image_index: int + """ + 0-based index for the partial image (backend is 1-based, but this is 0-based for + the user). + """ + + sequence_number: int + """The sequence number of the image generation item being processed.""" + + type: Literal["response.image_generation_call.partial_image"] + """The type of the event. Always 'response.image_generation_call.partial_image'.""" diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 290953a0ef..70cd9116a9 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Union, Iterable, Optional +from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from .easy_input_message_param import EasyInputMessageParam @@ -12,6 +12,7 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -21,6 +22,15 @@ "ComputerCallOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", "ItemReference", ] @@ -108,6 +118,159 @@ class FunctionCallOutput(TypedDict, total=False): """ +class ImageGenerationCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the image generation call.""" + + result: Required[Optional[str]] + """The generated image encoded in base64.""" + + status: Required[Literal["in_progress", "completed", "generating", "failed"]] + """The status of the image generation call.""" + + type: Required[Literal["image_generation_call"]] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(TypedDict, total=False): + command: Required[List[str]] + """The command to run.""" + + env: Required[Dict[str, str]] + """Environment variables to set for the command.""" + + type: Required[Literal["exec"]] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] + """Optional user to run the command as.""" + + working_directory: Optional[str] + """Optional working directory to run the command in.""" + + +class LocalShellCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the local shell call.""" + + action: Required[LocalShellCallAction] + """Execute a shell command on the server.""" + + call_id: Required[str] + """The unique ID of the local shell tool call generated by the model.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the local shell call.""" + + type: Required[Literal["local_shell_call"]] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(TypedDict, total=False): + id: Required[str] + """The unique ID of the local shell tool call generated by the model.""" + + output: Required[str] + """A JSON string of the output of the local shell tool call.""" + + type: Required[Literal["local_shell_call_output"]] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(TypedDict, total=False): + input_schema: Required[object] + """The JSON schema describing the tool's input.""" + + name: Required[str] + """The name of the tool.""" + + annotations: Optional[object] + """Additional annotations about the tool.""" + + description: Optional[str] + """The description of the tool.""" + + +class McpListTools(TypedDict, total=False): + id: Required[str] + """The unique ID of the list.""" + + server_label: Required[str] + """The label of the MCP server.""" + + tools: Required[Iterable[McpListToolsTool]] + """The tools available on the server.""" + + type: Required[Literal["mcp_list_tools"]] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(TypedDict, total=False): + id: Required[str] + """The unique ID of the approval request.""" + + arguments: Required[str] + """A JSON string of arguments for the tool.""" + + name: Required[str] + """The name of the tool to run.""" + + server_label: Required[str] + """The label of the MCP server making the request.""" + + type: Required[Literal["mcp_approval_request"]] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(TypedDict, total=False): + approval_request_id: Required[str] + """The ID of the approval request being answered.""" + + approve: Required[bool] + """Whether the request was approved.""" + + type: Required[Literal["mcp_approval_response"]] + """The type of the item. Always `mcp_approval_response`.""" + + id: Optional[str] + """The unique ID of the approval response""" + + reason: Optional[str] + """Optional reason for the decision.""" + + +class McpCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the tool call.""" + + arguments: Required[str] + """A JSON string of the arguments passed to the tool.""" + + name: Required[str] + """The name of the tool that was run.""" + + server_label: Required[str] + """The label of the MCP server running the tool.""" + + type: Required[Literal["mcp_call"]] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] + """The error from the tool call, if any.""" + + output: Optional[str] + """The output from the tool call.""" + + class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" @@ -127,5 +290,13 @@ class ItemReference(TypedDict, total=False): ResponseFunctionToolCallParam, FunctionCallOutput, ResponseReasoningItemParam, + ImageGenerationCall, + ResponseCodeInterpreterToolCallParam, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, ItemReference, ] diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index b24182697a..024998671f 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import List, Union, Iterable, Optional +from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from .easy_input_message_param import EasyInputMessageParam @@ -12,6 +12,7 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -22,6 +23,15 @@ "ComputerCallOutput", "ComputerCallOutputAcknowledgedSafetyCheck", "FunctionCallOutput", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", "ItemReference", ] @@ -109,6 +119,159 @@ class FunctionCallOutput(TypedDict, total=False): """ +class ImageGenerationCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the image generation call.""" + + result: Required[Optional[str]] + """The generated image encoded in base64.""" + + status: Required[Literal["in_progress", "completed", "generating", "failed"]] + """The status of the image generation call.""" + + type: Required[Literal["image_generation_call"]] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(TypedDict, total=False): + command: Required[List[str]] + """The command to run.""" + + env: Required[Dict[str, str]] + """Environment variables to set for the command.""" + + type: Required[Literal["exec"]] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] + """Optional user to run the command as.""" + + working_directory: Optional[str] + """Optional working directory to run the command in.""" + + +class LocalShellCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the local shell call.""" + + action: Required[LocalShellCallAction] + """Execute a shell command on the server.""" + + call_id: Required[str] + """The unique ID of the local shell tool call generated by the model.""" + + status: Required[Literal["in_progress", "completed", "incomplete"]] + """The status of the local shell call.""" + + type: Required[Literal["local_shell_call"]] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(TypedDict, total=False): + id: Required[str] + """The unique ID of the local shell tool call generated by the model.""" + + output: Required[str] + """A JSON string of the output of the local shell tool call.""" + + type: Required[Literal["local_shell_call_output"]] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(TypedDict, total=False): + input_schema: Required[object] + """The JSON schema describing the tool's input.""" + + name: Required[str] + """The name of the tool.""" + + annotations: Optional[object] + """Additional annotations about the tool.""" + + description: Optional[str] + """The description of the tool.""" + + +class McpListTools(TypedDict, total=False): + id: Required[str] + """The unique ID of the list.""" + + server_label: Required[str] + """The label of the MCP server.""" + + tools: Required[Iterable[McpListToolsTool]] + """The tools available on the server.""" + + type: Required[Literal["mcp_list_tools"]] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(TypedDict, total=False): + id: Required[str] + """The unique ID of the approval request.""" + + arguments: Required[str] + """A JSON string of arguments for the tool.""" + + name: Required[str] + """The name of the tool to run.""" + + server_label: Required[str] + """The label of the MCP server making the request.""" + + type: Required[Literal["mcp_approval_request"]] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(TypedDict, total=False): + approval_request_id: Required[str] + """The ID of the approval request being answered.""" + + approve: Required[bool] + """Whether the request was approved.""" + + type: Required[Literal["mcp_approval_response"]] + """The type of the item. Always `mcp_approval_response`.""" + + id: Optional[str] + """The unique ID of the approval response""" + + reason: Optional[str] + """Optional reason for the decision.""" + + +class McpCall(TypedDict, total=False): + id: Required[str] + """The unique ID of the tool call.""" + + arguments: Required[str] + """A JSON string of the arguments passed to the tool.""" + + name: Required[str] + """The name of the tool that was run.""" + + server_label: Required[str] + """The label of the MCP server running the tool.""" + + type: Required[Literal["mcp_call"]] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] + """The error from the tool call, if any.""" + + output: Optional[str] + """The output from the tool call.""" + + class ItemReference(TypedDict, total=False): id: Required[str] """The ID of the item to reference.""" @@ -128,6 +291,14 @@ class ItemReference(TypedDict, total=False): ResponseFunctionToolCallParam, FunctionCallOutput, ResponseReasoningItemParam, + ImageGenerationCall, + ResponseCodeInterpreterToolCallParam, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, ItemReference, ] diff --git a/src/openai/types/responses/response_item.py b/src/openai/types/responses/response_item.py index dc8d67d0f2..cba89390ed 100644 --- a/src/openai/types/responses/response_item.py +++ b/src/openai/types/responses/response_item.py @@ -1,19 +1,186 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union -from typing_extensions import Annotated, TypeAlias +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo +from ..._models import BaseModel from .response_output_message import ResponseOutputMessage from .response_computer_tool_call import ResponseComputerToolCall from .response_input_message_item import ResponseInputMessageItem from .response_function_web_search import ResponseFunctionWebSearch from .response_file_search_tool_call import ResponseFileSearchToolCall from .response_function_tool_call_item import ResponseFunctionToolCallItem +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall from .response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem from .response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem -__all__ = ["ResponseItem"] +__all__ = [ + "ResponseItem", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", +] + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(BaseModel): + id: str + """The unique ID of the local shell tool call generated by the model.""" + + output: str + """A JSON string of the output of the local shell tool call.""" + + type: Literal["local_shell_call_output"] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(BaseModel): + id: str + """The unique ID of the approval response""" + + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + reason: Optional[str] = None + """Optional reason for the decision.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + ResponseItem: TypeAlias = Annotated[ Union[ @@ -25,6 +192,14 @@ ResponseFunctionWebSearch, ResponseFunctionToolCallItem, ResponseFunctionToolCallOutputItem, + ImageGenerationCall, + ResponseCodeInterpreterToolCall, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py new file mode 100644 index 0000000000..ad6738a3b8 --- /dev/null +++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallArgumentsDeltaEvent"] + + +class ResponseMcpCallArgumentsDeltaEvent(BaseModel): + delta: object + """The partial update to the arguments for the MCP tool call.""" + + item_id: str + """The unique identifier of the MCP tool call item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.mcp_call.arguments_delta"] + """The type of the event. Always 'response.mcp_call.arguments_delta'.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py new file mode 100644 index 0000000000..4095cedb0f --- /dev/null +++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallArgumentsDoneEvent"] + + +class ResponseMcpCallArgumentsDoneEvent(BaseModel): + arguments: object + """The finalized arguments for the MCP tool call.""" + + item_id: str + """The unique identifier of the MCP tool call item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.mcp_call.arguments_done"] + """The type of the event. Always 'response.mcp_call.arguments_done'.""" diff --git a/src/openai/types/responses/response_mcp_call_completed_event.py b/src/openai/types/responses/response_mcp_call_completed_event.py new file mode 100644 index 0000000000..63b1b65b31 --- /dev/null +++ b/src/openai/types/responses/response_mcp_call_completed_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallCompletedEvent"] + + +class ResponseMcpCallCompletedEvent(BaseModel): + type: Literal["response.mcp_call.completed"] + """The type of the event. Always 'response.mcp_call.completed'.""" diff --git a/src/openai/types/responses/response_mcp_call_failed_event.py b/src/openai/types/responses/response_mcp_call_failed_event.py new file mode 100644 index 0000000000..1f94f4d17e --- /dev/null +++ b/src/openai/types/responses/response_mcp_call_failed_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallFailedEvent"] + + +class ResponseMcpCallFailedEvent(BaseModel): + type: Literal["response.mcp_call.failed"] + """The type of the event. Always 'response.mcp_call.failed'.""" diff --git a/src/openai/types/responses/response_mcp_call_in_progress_event.py b/src/openai/types/responses/response_mcp_call_in_progress_event.py new file mode 100644 index 0000000000..a90508a13c --- /dev/null +++ b/src/openai/types/responses/response_mcp_call_in_progress_event.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpCallInProgressEvent"] + + +class ResponseMcpCallInProgressEvent(BaseModel): + item_id: str + """The unique identifier of the MCP tool call item being processed.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.mcp_call.in_progress"] + """The type of the event. Always 'response.mcp_call.in_progress'.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_completed_event.py b/src/openai/types/responses/response_mcp_list_tools_completed_event.py new file mode 100644 index 0000000000..c6a921b5bc --- /dev/null +++ b/src/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpListToolsCompletedEvent"] + + +class ResponseMcpListToolsCompletedEvent(BaseModel): + type: Literal["response.mcp_list_tools.completed"] + """The type of the event. Always 'response.mcp_list_tools.completed'.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_failed_event.py b/src/openai/types/responses/response_mcp_list_tools_failed_event.py new file mode 100644 index 0000000000..639a2356db --- /dev/null +++ b/src/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpListToolsFailedEvent"] + + +class ResponseMcpListToolsFailedEvent(BaseModel): + type: Literal["response.mcp_list_tools.failed"] + """The type of the event. Always 'response.mcp_list_tools.failed'.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py new file mode 100644 index 0000000000..41c2334fee --- /dev/null +++ b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseMcpListToolsInProgressEvent"] + + +class ResponseMcpListToolsInProgressEvent(BaseModel): + type: Literal["response.mcp_list_tools.in_progress"] + """The type of the event. Always 'response.mcp_list_tools.in_progress'.""" diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py index f1e9693195..62f8f6fb3f 100644 --- a/src/openai/types/responses/response_output_item.py +++ b/src/openai/types/responses/response_output_item.py @@ -1,17 +1,151 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union -from typing_extensions import Annotated, TypeAlias +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo +from ..._models import BaseModel from .response_output_message import ResponseOutputMessage from .response_reasoning_item import ResponseReasoningItem from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall + +__all__ = [ + "ResponseOutputItem", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "McpCall", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", +] + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" -__all__ = ["ResponseOutputItem"] ResponseOutputItem: TypeAlias = Annotated[ Union[ @@ -21,6 +155,12 @@ ResponseFunctionWebSearch, ResponseComputerToolCall, ResponseReasoningItem, + ImageGenerationCall, + ResponseCodeInterpreterToolCall, + LocalShellCall, + McpCall, + McpListTools, + McpApprovalRequest, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response_output_text_annotation_added_event.py b/src/openai/types/responses/response_output_text_annotation_added_event.py new file mode 100644 index 0000000000..8e9e340b6b --- /dev/null +++ b/src/openai/types/responses/response_output_text_annotation_added_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseOutputTextAnnotationAddedEvent"] + + +class ResponseOutputTextAnnotationAddedEvent(BaseModel): + annotation: object + """The annotation object being added. (See annotation schema for details.)""" + + annotation_index: int + """The index of the annotation within the content part.""" + + content_index: int + """The index of the content part within the output item.""" + + item_id: str + """The unique identifier of the item to which the annotation is being added.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.output_text_annotation.added"] + """The type of the event. Always 'response.output_text_annotation.added'.""" diff --git a/src/openai/types/responses/response_queued_event.py b/src/openai/types/responses/response_queued_event.py new file mode 100644 index 0000000000..90981d60d6 --- /dev/null +++ b/src/openai/types/responses/response_queued_event.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .response import Response +from ..._models import BaseModel + +__all__ = ["ResponseQueuedEvent"] + + +class ResponseQueuedEvent(BaseModel): + response: Response + """The full response object that is queued.""" + + type: Literal["response.queued"] + """The type of the event. Always 'response.queued'.""" diff --git a/src/openai/types/responses/response_reasoning_delta_event.py b/src/openai/types/responses/response_reasoning_delta_event.py new file mode 100644 index 0000000000..5520c45c73 --- /dev/null +++ b/src/openai/types/responses/response_reasoning_delta_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningDeltaEvent"] + + +class ResponseReasoningDeltaEvent(BaseModel): + content_index: int + """The index of the reasoning content part within the output item.""" + + delta: object + """The partial update to the reasoning content.""" + + item_id: str + """The unique identifier of the item for which reasoning is being updated.""" + + output_index: int + """The index of the output item in the response's output array.""" + + type: Literal["response.reasoning.delta"] + """The type of the event. Always 'response.reasoning.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_done_event.py b/src/openai/types/responses/response_reasoning_done_event.py new file mode 100644 index 0000000000..8b059f469f --- /dev/null +++ b/src/openai/types/responses/response_reasoning_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningDoneEvent"] + + +class ResponseReasoningDoneEvent(BaseModel): + content_index: int + """The index of the reasoning content part within the output item.""" + + item_id: str + """The unique identifier of the item for which reasoning is finalized.""" + + output_index: int + """The index of the output item in the response's output array.""" + + text: str + """The finalized reasoning text.""" + + type: Literal["response.reasoning.done"] + """The type of the event. Always 'response.reasoning.done'.""" diff --git a/src/openai/types/responses/response_reasoning_summary_delta_event.py b/src/openai/types/responses/response_reasoning_summary_delta_event.py new file mode 100644 index 0000000000..1f52d042af --- /dev/null +++ b/src/openai/types/responses/response_reasoning_summary_delta_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningSummaryDeltaEvent"] + + +class ResponseReasoningSummaryDeltaEvent(BaseModel): + delta: object + """The partial update to the reasoning summary content.""" + + item_id: str + """ + The unique identifier of the item for which the reasoning summary is being + updated. + """ + + output_index: int + """The index of the output item in the response's output array.""" + + summary_index: int + """The index of the summary part within the output item.""" + + type: Literal["response.reasoning_summary.delta"] + """The type of the event. Always 'response.reasoning_summary.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_summary_done_event.py b/src/openai/types/responses/response_reasoning_summary_done_event.py new file mode 100644 index 0000000000..f3f9f5428c --- /dev/null +++ b/src/openai/types/responses/response_reasoning_summary_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningSummaryDoneEvent"] + + +class ResponseReasoningSummaryDoneEvent(BaseModel): + item_id: str + """The unique identifier of the item for which the reasoning summary is finalized.""" + + output_index: int + """The index of the output item in the response's output array.""" + + summary_index: int + """The index of the summary part within the output item.""" + + text: str + """The finalized reasoning summary text.""" + + type: Literal["response.reasoning_summary.done"] + """The type of the event. Always 'response.reasoning_summary.done'.""" diff --git a/src/openai/types/responses/response_status.py b/src/openai/types/responses/response_status.py index 934d17cda3..a7887b92d2 100644 --- a/src/openai/types/responses/response_status.py +++ b/src/openai/types/responses/response_status.py @@ -4,4 +4,4 @@ __all__ = ["ResponseStatus"] -ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "incomplete"] +ResponseStatus: TypeAlias = Literal["completed", "failed", "in_progress", "cancelled", "queued", "incomplete"] diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index 07c18bd217..e6e59a760a 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -6,6 +6,7 @@ from ..._utils import PropertyInfo from .response_error_event import ResponseErrorEvent from .response_failed_event import ResponseFailedEvent +from .response_queued_event import ResponseQueuedEvent from .response_created_event import ResponseCreatedEvent from .response_completed_event import ResponseCompletedEvent from .response_text_done_event import ResponseTextDoneEvent @@ -16,22 +17,39 @@ from .response_in_progress_event import ResponseInProgressEvent from .response_refusal_done_event import ResponseRefusalDoneEvent from .response_refusal_delta_event import ResponseRefusalDeltaEvent +from .response_reasoning_done_event import ResponseReasoningDoneEvent +from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent +from .response_reasoning_delta_event import ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent from .response_output_item_added_event import ResponseOutputItemAddedEvent from .response_content_part_added_event import ResponseContentPartAddedEvent +from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent +from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent +from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent from .response_text_annotation_delta_event import ResponseTextAnnotationDeltaEvent from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent +from .response_reasoning_summary_done_event import ResponseReasoningSummaryDoneEvent +from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent +from .response_reasoning_summary_delta_event import ResponseReasoningSummaryDeltaEvent +from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent +from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent +from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent +from .response_image_gen_call_generating_event import ResponseImageGenCallGeneratingEvent from .response_web_search_call_completed_event import ResponseWebSearchCallCompletedEvent from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent +from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent +from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent +from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent +from .response_output_text_annotation_added_event import ResponseOutputTextAnnotationAddedEvent from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent @@ -81,6 +99,24 @@ ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallInProgressEvent, ResponseWebSearchCallSearchingEvent, + ResponseImageGenCallCompletedEvent, + ResponseImageGenCallGeneratingEvent, + ResponseImageGenCallInProgressEvent, + ResponseImageGenCallPartialImageEvent, + ResponseMcpCallArgumentsDeltaEvent, + ResponseMcpCallArgumentsDoneEvent, + ResponseMcpCallCompletedEvent, + ResponseMcpCallFailedEvent, + ResponseMcpCallInProgressEvent, + ResponseMcpListToolsCompletedEvent, + ResponseMcpListToolsFailedEvent, + ResponseMcpListToolsInProgressEvent, + ResponseOutputTextAnnotationAddedEvent, + ResponseQueuedEvent, + ResponseReasoningDeltaEvent, + ResponseReasoningDoneEvent, + ResponseReasoningSummaryDeltaEvent, + ResponseReasoningSummaryDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index d96abdbe5a..0d80cdc89d 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -1,16 +1,175 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union -from typing_extensions import Annotated, TypeAlias +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo +from ..._models import BaseModel from .computer_tool import ComputerTool from .function_tool import FunctionTool from .web_search_tool import WebSearchTool from .file_search_tool import FileSearchTool -__all__ = ["Tool"] +__all__ = [ + "Tool", + "Mcp", + "McpAllowedTools", + "McpAllowedToolsMcpAllowedToolsFilter", + "McpRequireApproval", + "McpRequireApprovalMcpToolApprovalFilter", + "McpRequireApprovalMcpToolApprovalFilterAlways", + "McpRequireApprovalMcpToolApprovalFilterNever", + "CodeInterpreter", + "CodeInterpreterContainer", + "CodeInterpreterContainerCodeInterpreterToolAuto", + "ImageGeneration", + "ImageGenerationInputImageMask", + "LocalShell", +] + + +class McpAllowedToolsMcpAllowedToolsFilter(BaseModel): + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None] + + +class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + tool_names: Optional[List[str]] = None + """List of tools that require approval.""" + + +class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + tool_names: Optional[List[str]] = None + """List of tools that do not require approval.""" + + +class McpRequireApprovalMcpToolApprovalFilter(BaseModel): + always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None + """A list of tools that always require approval.""" + + never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None + """A list of tools that never require approval.""" + + tool_names: Optional[List[str]] = None + """List of allowed tool names.""" + + +McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] + + +class Mcp(BaseModel): + server_label: str + """A label for this MCP server, used to identify it in tool calls.""" + + server_url: str + """The URL for the MCP server.""" + + type: Literal["mcp"] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[McpAllowedTools] = None + """List of allowed tool names or a filter object.""" + + headers: Optional[Dict[str, str]] = None + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[McpRequireApproval] = None + """Specify which of the MCP server's tools require approval.""" + + +class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): + type: Literal["auto"] + """Always `auto`.""" + + file_ids: Optional[List[str]] = None + """An optional list of uploaded files to make available to your code.""" + + +CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto] + + +class CodeInterpreter(BaseModel): + container: CodeInterpreterContainer + """The code interpreter container. + + Can be a container ID or an object that specifies uploaded file IDs to make + available to your code. + """ + + type: Literal["code_interpreter"] + """The type of the code interpreter tool. Always `code_interpreter`.""" + + +class ImageGenerationInputImageMask(BaseModel): + file_id: Optional[str] = None + """File ID for the mask image.""" + + image_url: Optional[str] = None + """Base64-encoded mask image.""" + + +class ImageGeneration(BaseModel): + type: Literal["image_generation"] + """The type of the image generation tool. Always `image_generation`.""" + + background: Optional[Literal["transparent", "opaque", "auto"]] = None + """Background type for the generated image. + + One of `transparent`, `opaque`, or `auto`. Default: `auto`. + """ + + input_image_mask: Optional[ImageGenerationInputImageMask] = None + """Optional mask for inpainting. + + Contains `image_url` (string, optional) and `file_id` (string, optional). + """ + + model: Optional[Literal["gpt-image-1"]] = None + """The image generation model to use. Default: `gpt-image-1`.""" + + moderation: Optional[Literal["auto", "low"]] = None + """Moderation level for the generated image. Default: `auto`.""" + + output_compression: Optional[int] = None + """Compression level for the output image. Default: 100.""" + + output_format: Optional[Literal["png", "webp", "jpeg"]] = None + """The output format of the generated image. + + One of `png`, `webp`, or `jpeg`. Default: `png`. + """ + + partial_images: Optional[int] = None + """ + Number of partial images to generate in streaming mode, from 0 (default value) + to 3. + """ + + quality: Optional[Literal["low", "medium", "high", "auto"]] = None + """The quality of the generated image. + + One of `low`, `medium`, `high`, or `auto`. Default: `auto`. + """ + + size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None + """The size of the generated image. + + One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`. + """ + + +class LocalShell(BaseModel): + type: Literal["local_shell"] + """The type of the local shell tool. Always `local_shell`.""" + Tool: TypeAlias = Annotated[ - Union[FileSearchTool, FunctionTool, WebSearchTool, ComputerTool], PropertyInfo(discriminator="type") + Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool_choice_types.py b/src/openai/types/responses/tool_choice_types.py index 4942808f14..b968324383 100644 --- a/src/openai/types/responses/tool_choice_types.py +++ b/src/openai/types/responses/tool_choice_types.py @@ -8,7 +8,15 @@ class ToolChoiceTypes(BaseModel): - type: Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + type: Literal[ + "file_search", + "web_search_preview", + "computer_use_preview", + "web_search_preview_2025_03_11", + "image_generation", + "code_interpreter", + "mcp", + ] """The type of hosted tool the model should to use. Learn more about @@ -19,4 +27,7 @@ class ToolChoiceTypes(BaseModel): - `file_search` - `web_search_preview` - `computer_use_preview` + - `code_interpreter` + - `mcp` + - `image_generation` """ diff --git a/src/openai/types/responses/tool_choice_types_param.py b/src/openai/types/responses/tool_choice_types_param.py index b14f2a9eb0..175900750c 100644 --- a/src/openai/types/responses/tool_choice_types_param.py +++ b/src/openai/types/responses/tool_choice_types_param.py @@ -9,7 +9,15 @@ class ToolChoiceTypesParam(TypedDict, total=False): type: Required[ - Literal["file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11"] + Literal[ + "file_search", + "web_search_preview", + "computer_use_preview", + "web_search_preview_2025_03_11", + "image_generation", + "code_interpreter", + "mcp", + ] ] """The type of hosted tool the model should to use. @@ -21,4 +29,7 @@ class ToolChoiceTypesParam(TypedDict, total=False): - `file_search` - `web_search_preview` - `computer_use_preview` + - `code_interpreter` + - `mcp` + - `image_generation` """ diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 200c347005..e9da040908 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -2,8 +2,8 @@ from __future__ import annotations -from typing import Union -from typing_extensions import TypeAlias +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict from .computer_tool_param import ComputerToolParam from .function_tool_param import FunctionToolParam @@ -11,8 +11,174 @@ from .file_search_tool_param import FileSearchToolParam from ..chat.chat_completion_tool_param import ChatCompletionToolParam -__all__ = ["ToolParam"] +__all__ = [ + "ToolParam", + "Mcp", + "McpAllowedTools", + "McpAllowedToolsMcpAllowedToolsFilter", + "McpRequireApproval", + "McpRequireApprovalMcpToolApprovalFilter", + "McpRequireApprovalMcpToolApprovalFilterAlways", + "McpRequireApprovalMcpToolApprovalFilterNever", + "CodeInterpreter", + "CodeInterpreterContainer", + "CodeInterpreterContainerCodeInterpreterToolAuto", + "ImageGeneration", + "ImageGenerationInputImageMask", + "LocalShell", +] -ToolParam: TypeAlias = Union[FileSearchToolParam, FunctionToolParam, WebSearchToolParam, ComputerToolParam] +class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False): + tool_names: List[str] + """List of allowed tool names.""" + +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter] + + +class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + tool_names: List[str] + """List of tools that require approval.""" + + +class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + tool_names: List[str] + """List of tools that do not require approval.""" + + +class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): + always: McpRequireApprovalMcpToolApprovalFilterAlways + """A list of tools that always require approval.""" + + never: McpRequireApprovalMcpToolApprovalFilterNever + """A list of tools that never require approval.""" + + tool_names: List[str] + """List of allowed tool names.""" + + +McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] + + +class Mcp(TypedDict, total=False): + server_label: Required[str] + """A label for this MCP server, used to identify it in tool calls.""" + + server_url: Required[str] + """The URL for the MCP server.""" + + type: Required[Literal["mcp"]] + """The type of the MCP tool. Always `mcp`.""" + + allowed_tools: Optional[McpAllowedTools] + """List of allowed tool names or a filter object.""" + + headers: Optional[Dict[str, str]] + """Optional HTTP headers to send to the MCP server. + + Use for authentication or other purposes. + """ + + require_approval: Optional[McpRequireApproval] + """Specify which of the MCP server's tools require approval.""" + + +class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): + type: Required[Literal["auto"]] + """Always `auto`.""" + + file_ids: List[str] + """An optional list of uploaded files to make available to your code.""" + + +CodeInterpreterContainer: TypeAlias = Union[str, CodeInterpreterContainerCodeInterpreterToolAuto] + + +class CodeInterpreter(TypedDict, total=False): + container: Required[CodeInterpreterContainer] + """The code interpreter container. + + Can be a container ID or an object that specifies uploaded file IDs to make + available to your code. + """ + + type: Required[Literal["code_interpreter"]] + """The type of the code interpreter tool. Always `code_interpreter`.""" + + +class ImageGenerationInputImageMask(TypedDict, total=False): + file_id: str + """File ID for the mask image.""" + + image_url: str + """Base64-encoded mask image.""" + + +class ImageGeneration(TypedDict, total=False): + type: Required[Literal["image_generation"]] + """The type of the image generation tool. Always `image_generation`.""" + + background: Literal["transparent", "opaque", "auto"] + """Background type for the generated image. + + One of `transparent`, `opaque`, or `auto`. Default: `auto`. + """ + + input_image_mask: ImageGenerationInputImageMask + """Optional mask for inpainting. + + Contains `image_url` (string, optional) and `file_id` (string, optional). + """ + + model: Literal["gpt-image-1"] + """The image generation model to use. Default: `gpt-image-1`.""" + + moderation: Literal["auto", "low"] + """Moderation level for the generated image. Default: `auto`.""" + + output_compression: int + """Compression level for the output image. Default: 100.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format of the generated image. + + One of `png`, `webp`, or `jpeg`. Default: `png`. + """ + + partial_images: int + """ + Number of partial images to generate in streaming mode, from 0 (default value) + to 3. + """ + + quality: Literal["low", "medium", "high", "auto"] + """The quality of the generated image. + + One of `low`, `medium`, `high`, or `auto`. Default: `auto`. + """ + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the generated image. + + One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`. + """ + + +class LocalShell(TypedDict, total=False): + type: Required[Literal["local_shell"]] + """The type of the local shell tool. Always `local_shell`.""" + + +ToolParam: TypeAlias = Union[ + FunctionToolParam, + FileSearchToolParam, + WebSearchToolParam, + ComputerToolParam, + Mcp, + CodeInterpreter, + ImageGeneration, + LocalShell, +] + + ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam] diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 3753af8fdb..d7f72ce50d 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -30,6 +30,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.create( input="string", model="gpt-4o", + background=True, include=["file_search_call.results"], instructions="instructions", max_output_tokens=0, @@ -49,18 +50,11 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: tool_choice="none", tools=[ { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", } ], top_p=1, @@ -110,6 +104,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: input="string", model="gpt-4o", stream=True, + background=True, include=["file_search_call.results"], instructions="instructions", max_output_tokens=0, @@ -128,18 +123,11 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: tool_choice="none", tools=[ { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", } ], top_p=1, @@ -276,6 +264,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn response = await async_client.responses.create( input="string", model="gpt-4o", + background=True, include=["file_search_call.results"], instructions="instructions", max_output_tokens=0, @@ -295,18 +284,11 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn tool_choice="none", tools=[ { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", } ], top_p=1, @@ -356,6 +338,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn input="string", model="gpt-4o", stream=True, + background=True, include=["file_search_call.results"], instructions="instructions", max_output_tokens=0, @@ -374,18 +357,11 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn tool_choice="none", tools=[ { - "type": "file_search", - "vector_store_ids": ["string"], - "filters": { - "key": "key", - "type": "eq", - "value": "string", - }, - "max_num_results": 0, - "ranking_options": { - "ranker": "auto", - "score_threshold": 0, - }, + "name": "name", + "parameters": {"foo": "bar"}, + "strict": True, + "type": "function", + "description": "description", } ], top_p=1,