From 71058dd6f0a8e2e837f1b9edc91bc61a07b7837d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 21 May 2025 13:39:24 -0500
Subject: [PATCH] release: 1.81.0 (#2368)
* feat(api): add container endpoint
* release: 1.81.0
---------
Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
---
.release-please-manifest.json | 2 +-
.stats.yml | 8 +-
CHANGELOG.md | 8 +
api.md | 37 ++
pyproject.toml | 2 +-
src/openai/__init__.py | 1 +
src/openai/_client.py | 38 ++
src/openai/_module_client.py | 8 +
src/openai/_version.py | 2 +-
.../lib/streaming/responses/_responses.py | 4 +
src/openai/resources/__init__.py | 14 +
src/openai/resources/containers/__init__.py | 33 ++
src/openai/resources/containers/containers.py | 511 +++++++++++++++++
.../resources/containers/files/__init__.py | 33 ++
.../resources/containers/files/content.py | 166 ++++++
.../resources/containers/files/files.py | 532 ++++++++++++++++++
src/openai/resources/responses/responses.py | 86 +++
src/openai/types/__init__.py | 5 +
src/openai/types/container_create_params.py | 29 +
src/openai/types/container_create_response.py | 40 ++
src/openai/types/container_list_params.py | 30 +
src/openai/types/container_list_response.py | 40 ++
.../types/container_retrieve_response.py | 40 ++
src/openai/types/containers/__init__.py | 9 +
.../types/containers/file_create_params.py | 17 +
.../types/containers/file_create_response.py | 30 +
.../types/containers/file_list_params.py | 30 +
.../types/containers/file_list_response.py | 30 +
.../containers/file_retrieve_response.py | 30 +
src/openai/types/containers/files/__init__.py | 3 +
.../responses/response_audio_delta_event.py | 3 +
.../responses/response_audio_done_event.py | 3 +
.../response_audio_transcript_delta_event.py | 3 +
.../response_audio_transcript_done_event.py | 3 +
..._code_interpreter_call_code_delta_event.py | 3 +
...e_code_interpreter_call_code_done_event.py | 3 +
...e_code_interpreter_call_completed_event.py | 3 +
...code_interpreter_call_in_progress_event.py | 3 +
...ode_interpreter_call_interpreting_event.py | 3 +
.../responses/response_completed_event.py | 3 +
.../response_content_part_added_event.py | 3 +
.../response_content_part_done_event.py | 3 +
.../types/responses/response_created_event.py | 3 +
.../types/responses/response_error_event.py | 3 +
.../types/responses/response_failed_event.py | 3 +
...sponse_file_search_call_completed_event.py | 3 +
...onse_file_search_call_in_progress_event.py | 3 +
...sponse_file_search_call_searching_event.py | 3 +
...nse_function_call_arguments_delta_event.py | 3 +
...onse_function_call_arguments_done_event.py | 3 +
...response_image_gen_call_completed_event.py | 3 +
...esponse_image_gen_call_generating_event.py | 7 +-
.../responses/response_in_progress_event.py | 3 +
.../responses/response_incomplete_event.py | 3 +
...response_mcp_call_arguments_delta_event.py | 3 +
.../response_mcp_call_arguments_done_event.py | 3 +
.../response_mcp_call_completed_event.py | 3 +
.../response_mcp_call_failed_event.py | 3 +
.../response_mcp_call_in_progress_event.py | 3 +
...response_mcp_list_tools_completed_event.py | 3 +
.../response_mcp_list_tools_failed_event.py | 3 +
...sponse_mcp_list_tools_in_progress_event.py | 3 +
.../response_output_item_added_event.py | 3 +
.../response_output_item_done_event.py | 3 +
...onse_output_text_annotation_added_event.py | 3 +
.../types/responses/response_queued_event.py | 3 +
.../response_reasoning_delta_event.py | 3 +
.../response_reasoning_done_event.py | 3 +
.../response_reasoning_summary_delta_event.py | 3 +
.../response_reasoning_summary_done_event.py | 3 +
...onse_reasoning_summary_part_added_event.py | 3 +
...ponse_reasoning_summary_part_done_event.py | 3 +
...onse_reasoning_summary_text_delta_event.py | 3 +
...ponse_reasoning_summary_text_done_event.py | 3 +
.../responses/response_refusal_delta_event.py | 3 +
.../responses/response_refusal_done_event.py | 3 +
.../response_text_annotation_delta_event.py | 3 +
.../responses/response_text_delta_event.py | 3 +
.../responses/response_text_done_event.py | 3 +
tests/api_resources/containers/__init__.py | 1 +
.../containers/files/__init__.py | 1 +
.../containers/files/test_content.py | 116 ++++
tests/api_resources/containers/test_files.py | 409 ++++++++++++++
tests/api_resources/test_containers.py | 333 +++++++++++
tests/api_resources/test_responses.py | 76 +++
85 files changed, 2894 insertions(+), 11 deletions(-)
create mode 100644 src/openai/resources/containers/__init__.py
create mode 100644 src/openai/resources/containers/containers.py
create mode 100644 src/openai/resources/containers/files/__init__.py
create mode 100644 src/openai/resources/containers/files/content.py
create mode 100644 src/openai/resources/containers/files/files.py
create mode 100644 src/openai/types/container_create_params.py
create mode 100644 src/openai/types/container_create_response.py
create mode 100644 src/openai/types/container_list_params.py
create mode 100644 src/openai/types/container_list_response.py
create mode 100644 src/openai/types/container_retrieve_response.py
create mode 100644 src/openai/types/containers/__init__.py
create mode 100644 src/openai/types/containers/file_create_params.py
create mode 100644 src/openai/types/containers/file_create_response.py
create mode 100644 src/openai/types/containers/file_list_params.py
create mode 100644 src/openai/types/containers/file_list_response.py
create mode 100644 src/openai/types/containers/file_retrieve_response.py
create mode 100644 src/openai/types/containers/files/__init__.py
create mode 100644 tests/api_resources/containers/__init__.py
create mode 100644 tests/api_resources/containers/files/__init__.py
create mode 100644 tests/api_resources/containers/files/test_content.py
create mode 100644 tests/api_resources/containers/test_files.py
create mode 100644 tests/api_resources/test_containers.py
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 73077f4afb..7f7687b9f1 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.80.0"
+ ".": "1.81.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 4b4f19c91f..41319e5e5b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 101
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a5651cb97f86d1e2531af6aef8c5230f1ea350560fbae790ca2e481b30a6c217.yml
-openapi_spec_hash: 66a5104fd3bb43383cf919225df7a6fd
-config_hash: bb657c3fed232a56930035de3aaed936
+configured_endpoints: 111
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6af14840a810139bf407013167ce1c8fb21b6ef8eb0cc3db58b51af7d52c4b5a.yml
+openapi_spec_hash: 3241bde6b273cfec0035e522bd07985d
+config_hash: 7367b68a4e7db36885c1a886f57b17f6
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6517b7d1b7..09e88ffaee 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 1.81.0 (2025-05-21)
+
+Full Changelog: [v1.80.0...v1.81.0](https://github.com/openai/openai-python/compare/v1.80.0...v1.81.0)
+
+### Features
+
+* **api:** add container endpoint ([054a210](https://github.com/openai/openai-python/commit/054a210289d7e0db22d2d2a61bbe4d4d9cc0cb47))
+
## 1.80.0 (2025-05-21)
Full Changelog: [v1.79.0...v1.80.0](https://github.com/openai/openai-python/compare/v1.79.0...v1.80.0)
diff --git a/api.md b/api.md
index 4eb3c09c66..57ac67f9f1 100644
--- a/api.md
+++ b/api.md
@@ -785,6 +785,7 @@ Methods:
- client.responses.create(\*\*params) -> Response
- client.responses.retrieve(response_id, \*\*params) -> Response
- client.responses.delete(response_id) -> None
+- client.responses.cancel(response_id) -> None
## InputItems
@@ -859,3 +860,39 @@ Methods:
- client.evals.runs.output_items.retrieve(output_item_id, \*, eval_id, run_id) -> OutputItemRetrieveResponse
- client.evals.runs.output_items.list(run_id, \*, eval_id, \*\*params) -> SyncCursorPage[OutputItemListResponse]
+
+# Containers
+
+Types:
+
+```python
+from openai.types import ContainerCreateResponse, ContainerRetrieveResponse, ContainerListResponse
+```
+
+Methods:
+
+- client.containers.create(\*\*params) -> ContainerCreateResponse
+- client.containers.retrieve(container_id) -> ContainerRetrieveResponse
+- client.containers.list(\*\*params) -> SyncCursorPage[ContainerListResponse]
+- client.containers.delete(container_id) -> None
+
+## Files
+
+Types:
+
+```python
+from openai.types.containers import FileCreateResponse, FileRetrieveResponse, FileListResponse
+```
+
+Methods:
+
+- client.containers.files.create(container_id, \*\*params) -> FileCreateResponse
+- client.containers.files.retrieve(file_id, \*, container_id) -> FileRetrieveResponse
+- client.containers.files.list(container_id, \*\*params) -> SyncCursorPage[FileListResponse]
+- client.containers.files.delete(file_id, \*, container_id) -> None
+
+### Content
+
+Methods:
+
+- client.containers.files.content.retrieve(file_id, \*, container_id) -> None
diff --git a/pyproject.toml b/pyproject.toml
index 3c3d246a18..48de070573 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.80.0"
+version = "1.81.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/__init__.py b/src/openai/__init__.py
index 6b21a9af23..92beeb5da1 100644
--- a/src/openai/__init__.py
+++ b/src/openai/__init__.py
@@ -363,6 +363,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
batches as batches,
uploads as uploads,
responses as responses,
+ containers as containers,
embeddings as embeddings,
completions as completions,
fine_tuning as fine_tuning,
diff --git a/src/openai/_client.py b/src/openai/_client.py
index b251ab0917..4ed9a2f52e 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -46,6 +46,7 @@
batches,
uploads,
responses,
+ containers,
embeddings,
completions,
fine_tuning,
@@ -65,6 +66,7 @@
from .resources.moderations import Moderations, AsyncModerations
from .resources.uploads.uploads import Uploads, AsyncUploads
from .resources.responses.responses import Responses, AsyncResponses
+ from .resources.containers.containers import Containers, AsyncContainers
from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning
from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores
@@ -244,6 +246,12 @@ def evals(self) -> Evals:
return Evals(self)
+ @cached_property
+ def containers(self) -> Containers:
+ from .resources.containers import Containers
+
+ return Containers(self)
+
@cached_property
def with_raw_response(self) -> OpenAIWithRawResponse:
return OpenAIWithRawResponse(self)
@@ -539,6 +547,12 @@ def evals(self) -> AsyncEvals:
return AsyncEvals(self)
+ @cached_property
+ def containers(self) -> AsyncContainers:
+ from .resources.containers import AsyncContainers
+
+ return AsyncContainers(self)
+
@cached_property
def with_raw_response(self) -> AsyncOpenAIWithRawResponse:
return AsyncOpenAIWithRawResponse(self)
@@ -757,6 +771,12 @@ def evals(self) -> evals.EvalsWithRawResponse:
return EvalsWithRawResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.ContainersWithRawResponse:
+ from .resources.containers import ContainersWithRawResponse
+
+ return ContainersWithRawResponse(self._client.containers)
+
class AsyncOpenAIWithRawResponse:
_client: AsyncOpenAI
@@ -854,6 +874,12 @@ def evals(self) -> evals.AsyncEvalsWithRawResponse:
return AsyncEvalsWithRawResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.AsyncContainersWithRawResponse:
+ from .resources.containers import AsyncContainersWithRawResponse
+
+ return AsyncContainersWithRawResponse(self._client.containers)
+
class OpenAIWithStreamedResponse:
_client: OpenAI
@@ -951,6 +977,12 @@ def evals(self) -> evals.EvalsWithStreamingResponse:
return EvalsWithStreamingResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.ContainersWithStreamingResponse:
+ from .resources.containers import ContainersWithStreamingResponse
+
+ return ContainersWithStreamingResponse(self._client.containers)
+
class AsyncOpenAIWithStreamedResponse:
_client: AsyncOpenAI
@@ -1048,6 +1080,12 @@ def evals(self) -> evals.AsyncEvalsWithStreamingResponse:
return AsyncEvalsWithStreamingResponse(self._client.evals)
+ @cached_property
+ def containers(self) -> containers.AsyncContainersWithStreamingResponse:
+ from .resources.containers import AsyncContainersWithStreamingResponse
+
+ return AsyncContainersWithStreamingResponse(self._client.containers)
+
Client = OpenAI
diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py
index dd601f9be9..fb7c754917 100644
--- a/src/openai/_module_client.py
+++ b/src/openai/_module_client.py
@@ -19,6 +19,7 @@
from .resources.moderations import Moderations
from .resources.uploads.uploads import Uploads
from .resources.responses.responses import Responses
+ from .resources.containers.containers import Containers
from .resources.fine_tuning.fine_tuning import FineTuning
from .resources.vector_stores.vector_stores import VectorStores
@@ -92,6 +93,12 @@ def __load__(self) -> Embeddings:
return _load_client().embeddings
+class ContainersProxy(LazyProxy["Containers"]):
+ @override
+ def __load__(self) -> Containers:
+ return _load_client().containers
+
+
class CompletionsProxy(LazyProxy["Completions"]):
@override
def __load__(self) -> Completions:
@@ -127,6 +134,7 @@ def __load__(self) -> VectorStores:
uploads: Uploads = UploadsProxy().__as_proxied__()
responses: Responses = ResponsesProxy().__as_proxied__()
embeddings: Embeddings = EmbeddingsProxy().__as_proxied__()
+containers: Containers = ContainersProxy().__as_proxied__()
completions: Completions = CompletionsProxy().__as_proxied__()
moderations: Moderations = ModerationsProxy().__as_proxied__()
fine_tuning: FineTuning = FineTuningProxy().__as_proxied__()
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 7bf2bbc038..56a8bcaef4 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.80.0" # x-release-please-version
+__version__ = "1.81.0" # x-release-please-version
diff --git a/src/openai/lib/streaming/responses/_responses.py b/src/openai/lib/streaming/responses/_responses.py
index f8f4b64174..0e1e6c0e04 100644
--- a/src/openai/lib/streaming/responses/_responses.py
+++ b/src/openai/lib/streaming/responses/_responses.py
@@ -251,6 +251,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven
delta=event.delta,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.output_text.delta",
snapshot=content.text,
)
@@ -268,6 +269,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven
content_index=event.content_index,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.output_text.done",
text=event.text,
parsed=parse_text(event.text, text_format=self._text_format),
@@ -283,6 +285,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven
delta=event.delta,
item_id=event.item_id,
output_index=event.output_index,
+ sequence_number=event.sequence_number,
type="response.function_call_arguments.delta",
snapshot=output.arguments,
)
@@ -295,6 +298,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven
events.append(
build(
ResponseCompletedEvent,
+ sequence_number=event.sequence_number,
type="response.completed",
response=response,
)
diff --git a/src/openai/resources/__init__.py b/src/openai/resources/__init__.py
index 8612dec797..82c9f037d9 100644
--- a/src/openai/resources/__init__.py
+++ b/src/openai/resources/__init__.py
@@ -72,6 +72,14 @@
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
+from .containers import (
+ Containers,
+ AsyncContainers,
+ ContainersWithRawResponse,
+ AsyncContainersWithRawResponse,
+ ContainersWithStreamingResponse,
+ AsyncContainersWithStreamingResponse,
+)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
@@ -198,4 +206,10 @@
"AsyncEvalsWithRawResponse",
"EvalsWithStreamingResponse",
"AsyncEvalsWithStreamingResponse",
+ "Containers",
+ "AsyncContainers",
+ "ContainersWithRawResponse",
+ "AsyncContainersWithRawResponse",
+ "ContainersWithStreamingResponse",
+ "AsyncContainersWithStreamingResponse",
]
diff --git a/src/openai/resources/containers/__init__.py b/src/openai/resources/containers/__init__.py
new file mode 100644
index 0000000000..dc1936780b
--- /dev/null
+++ b/src/openai/resources/containers/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .containers import (
+ Containers,
+ AsyncContainers,
+ ContainersWithRawResponse,
+ AsyncContainersWithRawResponse,
+ ContainersWithStreamingResponse,
+ AsyncContainersWithStreamingResponse,
+)
+
+__all__ = [
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+ "Containers",
+ "AsyncContainers",
+ "ContainersWithRawResponse",
+ "AsyncContainersWithRawResponse",
+ "ContainersWithStreamingResponse",
+ "AsyncContainersWithStreamingResponse",
+]
diff --git a/src/openai/resources/containers/containers.py b/src/openai/resources/containers/containers.py
new file mode 100644
index 0000000000..71e5e6b08d
--- /dev/null
+++ b/src/openai/resources/containers/containers.py
@@ -0,0 +1,511 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ...types import container_list_params, container_create_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from .files.files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from ...pagination import SyncCursorPage, AsyncCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.container_list_response import ContainerListResponse
+from ...types.container_create_response import ContainerCreateResponse
+from ...types.container_retrieve_response import ContainerRetrieveResponse
+
+__all__ = ["Containers", "AsyncContainers"]
+
+
+class Containers(SyncAPIResource):
+ @cached_property
+ def files(self) -> Files:
+ return Files(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ContainersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContainersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContainersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContainersWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerCreateResponse:
+ """
+ Create Container
+
+ Args:
+ name: Name of the container to create.
+
+ expires_after: Container expiration time in seconds relative to the 'anchor' time.
+
+ file_ids: IDs of files to copy to the container.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/containers",
+ body=maybe_transform(
+ {
+ "name": name,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ },
+ container_create_params.ContainerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerRetrieveResponse:
+ """
+ Retrieve Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[ContainerListResponse]:
+ """List Containers
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/containers",
+ page=SyncCursorPage[ContainerListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ container_list_params.ContainerListParams,
+ ),
+ ),
+ model=ContainerListResponse,
+ )
+
+ def delete(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncContainers(AsyncAPIResource):
+ @cached_property
+ def files(self) -> AsyncFiles:
+ return AsyncFiles(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncContainersWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContainersWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContainersWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContainersWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ expires_after: container_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
+ file_ids: List[str] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerCreateResponse:
+ """
+ Create Container
+
+ Args:
+ name: Name of the container to create.
+
+ expires_after: Container expiration time in seconds relative to the 'anchor' time.
+
+ file_ids: IDs of files to copy to the container.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/containers",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "expires_after": expires_after,
+ "file_ids": file_ids,
+ },
+ container_create_params.ContainerCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ContainerRetrieveResponse:
+ """
+ Retrieve Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return await self._get(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ContainerRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[ContainerListResponse, AsyncCursorPage[ContainerListResponse]]:
+ """List Containers
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get_api_list(
+ "/containers",
+ page=AsyncCursorPage[ContainerListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ container_list_params.ContainerListParams,
+ ),
+ ),
+ model=ContainerListResponse,
+ )
+
+ async def delete(
+ self,
+ container_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/containers/{container_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ContainersWithRawResponse:
+ def __init__(self, containers: Containers) -> None:
+ self._containers = containers
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ containers.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithRawResponse:
+ return FilesWithRawResponse(self._containers.files)
+
+
+class AsyncContainersWithRawResponse:
+ def __init__(self, containers: AsyncContainers) -> None:
+ self._containers = containers
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ containers.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithRawResponse:
+ return AsyncFilesWithRawResponse(self._containers.files)
+
+
+class ContainersWithStreamingResponse:
+ def __init__(self, containers: Containers) -> None:
+ self._containers = containers
+
+ self.create = to_streamed_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ containers.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> FilesWithStreamingResponse:
+ return FilesWithStreamingResponse(self._containers.files)
+
+
+class AsyncContainersWithStreamingResponse:
+ def __init__(self, containers: AsyncContainers) -> None:
+ self._containers = containers
+
+ self.create = async_to_streamed_response_wrapper(
+ containers.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ containers.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ containers.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ containers.delete,
+ )
+
+ @cached_property
+ def files(self) -> AsyncFilesWithStreamingResponse:
+ return AsyncFilesWithStreamingResponse(self._containers.files)
diff --git a/src/openai/resources/containers/files/__init__.py b/src/openai/resources/containers/files/__init__.py
new file mode 100644
index 0000000000..f71f7dbf55
--- /dev/null
+++ b/src/openai/resources/containers/files/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+
+__all__ = [
+ "Content",
+ "AsyncContent",
+ "ContentWithRawResponse",
+ "AsyncContentWithRawResponse",
+ "ContentWithStreamingResponse",
+ "AsyncContentWithStreamingResponse",
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
+]
diff --git a/src/openai/resources/containers/files/content.py b/src/openai/resources/containers/files/content.py
new file mode 100644
index 0000000000..1aa2d1729d
--- /dev/null
+++ b/src/openai/resources/containers/files/content.py
@@ -0,0 +1,166 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from .... import _legacy_response
+from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...._base_client import make_request_options
+
+__all__ = ["Content", "AsyncContent"]
+
+
+class Content(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ContentWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Retrieve Container File Content
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._get(
+ f"/containers/{container_id}/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncContent(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncContentWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncContentWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncContentWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncContentWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Retrieve Container File Content
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._get(
+ f"/containers/{container_id}/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class ContentWithRawResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithRawResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ content.retrieve,
+ )
+
+
+class ContentWithStreamingResponse:
+ def __init__(self, content: Content) -> None:
+ self._content = content
+
+ self.retrieve = to_streamed_response_wrapper(
+ content.retrieve,
+ )
+
+
+class AsyncContentWithStreamingResponse:
+ def __init__(self, content: AsyncContent) -> None:
+ self._content = content
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ content.retrieve,
+ )
diff --git a/src/openai/resources/containers/files/files.py b/src/openai/resources/containers/files/files.py
new file mode 100644
index 0000000000..88b6594301
--- /dev/null
+++ b/src/openai/resources/containers/files/files.py
@@ -0,0 +1,532 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal
+
+import httpx
+
+from .... import _legacy_response
+from .content import (
+ Content,
+ AsyncContent,
+ ContentWithRawResponse,
+ AsyncContentWithRawResponse,
+ ContentWithStreamingResponse,
+ AsyncContentWithStreamingResponse,
+)
+from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, FileTypes
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ....pagination import SyncCursorPage, AsyncCursorPage
+from ...._base_client import AsyncPaginator, make_request_options
+from ....types.containers import file_list_params, file_create_params
+from ....types.containers.file_list_response import FileListResponse
+from ....types.containers.file_create_response import FileCreateResponse
+from ....types.containers.file_retrieve_response import FileRetrieveResponse
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+ @cached_property
+ def content(self) -> Content:
+ return Content(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> FilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return FilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return FilesWithStreamingResponse(self)
+
+ def create(
+ self,
+ container_id: str,
+ *,
+ file: FileTypes | NotGiven = NOT_GIVEN,
+ file_id: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Create a Container File
+
+ You can send either a multipart/form-data request with the raw file content, or
+ a JSON request with a file ID.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ file_id: Name of the file to create.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._post(
+ f"/containers/{container_id}/files",
+ body=maybe_transform(
+ {
+ "file": file,
+ "file_id": file_id,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileRetrieveResponse:
+ """
+ Retrieve Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return self._get(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileRetrieveResponse,
+ )
+
+ def list(
+ self,
+ container_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncCursorPage[FileListResponse]:
+ """List Container files
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get_api_list(
+ f"/containers/{container_id}/files",
+ page=SyncCursorPage[FileListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=FileListResponse,
+ )
+
+ def delete(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncFiles(AsyncAPIResource):
+ @cached_property
+ def content(self) -> AsyncContent:
+ return AsyncContent(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncFilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncFilesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ container_id: str,
+ *,
+ file: FileTypes | NotGiven = NOT_GIVEN,
+ file_id: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Create a Container File
+
+ You can send either a multipart/form-data request with the raw file content, or
+ a JSON request with a file ID.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ file_id: Name of the file to create.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return await self._post(
+ f"/containers/{container_id}/files",
+ body=await async_maybe_transform(
+ {
+ "file": file,
+ "file_id": file_id,
+ },
+ file_create_params.FileCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileRetrieveResponse:
+ """
+ Retrieve Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return await self._get(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileRetrieveResponse,
+ )
+
+ def list(
+ self,
+ container_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[FileListResponse, AsyncCursorPage[FileListResponse]]:
+ """List Container files
+
+ Args:
+ after: A cursor for use in pagination.
+
+ `after` is an object ID that defines your place
+ in the list. For instance, if you make a list request and receive 100 objects,
+ ending with obj_foo, your subsequent call can include after=obj_foo in order to
+ fetch the next page of the list.
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ order and `desc` for descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ return self._get_api_list(
+ f"/containers/{container_id}/files",
+ page=AsyncCursorPage[FileListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ },
+ file_list_params.FileListParams,
+ ),
+ ),
+ model=FileListResponse,
+ )
+
+ async def delete(
+ self,
+ file_id: str,
+ *,
+ container_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """
+ Delete Container File
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not container_id:
+ raise ValueError(f"Expected a non-empty value for `container_id` but received {container_id!r}")
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/containers/{container_id}/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
+
+class FilesWithRawResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithRawResponse:
+ return ContentWithRawResponse(self._files.content)
+
+
+class AsyncFilesWithRawResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ files.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ files.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithRawResponse:
+ return AsyncContentWithRawResponse(self._files.content)
+
+
+class FilesWithStreamingResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> ContentWithStreamingResponse:
+ return ContentWithStreamingResponse(self._files.content)
+
+
+class AsyncFilesWithStreamingResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = async_to_streamed_response_wrapper(
+ files.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ files.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ files.delete,
+ )
+
+ @cached_property
+ def content(self) -> AsyncContentWithStreamingResponse:
+ return AsyncContentWithStreamingResponse(self._files.content)
diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py
index ad9576983f..4a456b82ea 100644
--- a/src/openai/resources/responses/responses.py
+++ b/src/openai/resources/responses/responses.py
@@ -948,6 +948,43 @@ def delete(
cast_to=NoneType,
)
+ def cancel(
+ self,
+ response_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """Cancels a model response with the given ID.
+
+ Only responses created with the
+ `background` parameter set to `true` can be cancelled.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not response_id:
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._post(
+ f"/responses/{response_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
class AsyncResponses(AsyncAPIResource):
@cached_property
@@ -1851,6 +1888,43 @@ async def delete(
cast_to=NoneType,
)
+ async def cancel(
+ self,
+ response_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> None:
+ """Cancels a model response with the given ID.
+
+ Only responses created with the
+ `background` parameter set to `true` can be cancelled.
+ [Learn more](https://platform.openai.com/docs/guides/background).
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not response_id:
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._post(
+ f"/responses/{response_id}/cancel",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NoneType,
+ )
+
class ResponsesWithRawResponse:
def __init__(self, responses: Responses) -> None:
@@ -1865,6 +1939,9 @@ def __init__(self, responses: Responses) -> None:
self.delete = _legacy_response.to_raw_response_wrapper(
responses.delete,
)
+ self.cancel = _legacy_response.to_raw_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> InputItemsWithRawResponse:
@@ -1884,6 +1961,9 @@ def __init__(self, responses: AsyncResponses) -> None:
self.delete = _legacy_response.async_to_raw_response_wrapper(
responses.delete,
)
+ self.cancel = _legacy_response.async_to_raw_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsWithRawResponse:
@@ -1903,6 +1983,9 @@ def __init__(self, responses: Responses) -> None:
self.delete = to_streamed_response_wrapper(
responses.delete,
)
+ self.cancel = to_streamed_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> InputItemsWithStreamingResponse:
@@ -1922,6 +2005,9 @@ def __init__(self, responses: AsyncResponses) -> None:
self.delete = async_to_streamed_response_wrapper(
responses.delete,
)
+ self.cancel = async_to_streamed_response_wrapper(
+ responses.cancel,
+ )
@cached_property
def input_items(self) -> AsyncInputItemsWithStreamingResponse:
diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py
index bf5493fd62..453b26f555 100644
--- a/src/openai/types/__init__.py
+++ b/src/openai/types/__init__.py
@@ -56,19 +56,24 @@
from .upload_create_params import UploadCreateParams as UploadCreateParams
from .vector_store_deleted import VectorStoreDeleted as VectorStoreDeleted
from .audio_response_format import AudioResponseFormat as AudioResponseFormat
+from .container_list_params import ContainerListParams as ContainerListParams
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse
from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy
from .upload_complete_params import UploadCompleteParams as UploadCompleteParams
+from .container_create_params import ContainerCreateParams as ContainerCreateParams
+from .container_list_response import ContainerListResponse as ContainerListResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
from .moderation_create_params import ModerationCreateParams as ModerationCreateParams
from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams
+from .container_create_response import ContainerCreateResponse as ContainerCreateResponse
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse
from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams
from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams
from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams
+from .container_retrieve_response import ContainerRetrieveResponse as ContainerRetrieveResponse
from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam
from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam
from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse
diff --git a/src/openai/types/container_create_params.py b/src/openai/types/container_create_params.py
new file mode 100644
index 0000000000..bd27334933
--- /dev/null
+++ b/src/openai/types/container_create_params.py
@@ -0,0 +1,29 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["ContainerCreateParams", "ExpiresAfter"]
+
+
+class ContainerCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """Name of the container to create."""
+
+ expires_after: ExpiresAfter
+ """Container expiration time in seconds relative to the 'anchor' time."""
+
+ file_ids: List[str]
+ """IDs of files to copy to the container."""
+
+
+class ExpiresAfter(TypedDict, total=False):
+ anchor: Required[Literal["last_active_at"]]
+ """Time anchor for the expiration time.
+
+ Currently only 'last_active_at' is supported.
+ """
+
+ minutes: Required[int]
diff --git a/src/openai/types/container_create_response.py b/src/openai/types/container_create_response.py
new file mode 100644
index 0000000000..c0ccc45a1c
--- /dev/null
+++ b/src/openai/types/container_create_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerCreateResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerCreateResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
diff --git a/src/openai/types/container_list_params.py b/src/openai/types/container_list_params.py
new file mode 100644
index 0000000000..4821a87d18
--- /dev/null
+++ b/src/openai/types/container_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["ContainerListParams"]
+
+
+class ContainerListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/src/openai/types/container_list_response.py b/src/openai/types/container_list_response.py
new file mode 100644
index 0000000000..2d9c11d8a4
--- /dev/null
+++ b/src/openai/types/container_list_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerListResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerListResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
diff --git a/src/openai/types/container_retrieve_response.py b/src/openai/types/container_retrieve_response.py
new file mode 100644
index 0000000000..eab291b34f
--- /dev/null
+++ b/src/openai/types/container_retrieve_response.py
@@ -0,0 +1,40 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["ContainerRetrieveResponse", "ExpiresAfter"]
+
+
+class ExpiresAfter(BaseModel):
+ anchor: Optional[Literal["last_active_at"]] = None
+ """The reference point for the expiration."""
+
+ minutes: Optional[int] = None
+ """The number of minutes after the anchor before the container expires."""
+
+
+class ContainerRetrieveResponse(BaseModel):
+ id: str
+ """Unique identifier for the container."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the container was created."""
+
+ name: str
+ """Name of the container."""
+
+ object: str
+ """The type of this object."""
+
+ status: str
+ """Status of the container (e.g., active, deleted)."""
+
+ expires_after: Optional[ExpiresAfter] = None
+ """
+ The container will expire after this time period. The anchor is the reference
+ point for the expiration. The minutes is the number of minutes after the anchor
+ before the container expires.
+ """
diff --git a/src/openai/types/containers/__init__.py b/src/openai/types/containers/__init__.py
new file mode 100644
index 0000000000..7d555ad3a4
--- /dev/null
+++ b/src/openai/types/containers/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .file_list_params import FileListParams as FileListParams
+from .file_create_params import FileCreateParams as FileCreateParams
+from .file_list_response import FileListResponse as FileListResponse
+from .file_create_response import FileCreateResponse as FileCreateResponse
+from .file_retrieve_response import FileRetrieveResponse as FileRetrieveResponse
diff --git a/src/openai/types/containers/file_create_params.py b/src/openai/types/containers/file_create_params.py
new file mode 100644
index 0000000000..1e41330017
--- /dev/null
+++ b/src/openai/types/containers/file_create_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+from ..._types import FileTypes
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+ file: FileTypes
+ """The File object (not file name) to be uploaded."""
+
+ file_id: str
+ """Name of the file to create."""
diff --git a/src/openai/types/containers/file_create_response.py b/src/openai/types/containers/file_create_response.py
new file mode 100644
index 0000000000..4a652483fc
--- /dev/null
+++ b/src/openai/types/containers/file_create_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileCreateResponse"]
+
+
+class FileCreateResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
diff --git a/src/openai/types/containers/file_list_params.py b/src/openai/types/containers/file_list_params.py
new file mode 100644
index 0000000000..3565acaf36
--- /dev/null
+++ b/src/openai/types/containers/file_list_params.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["FileListParams"]
+
+
+class FileListParams(TypedDict, total=False):
+ after: str
+ """A cursor for use in pagination.
+
+ `after` is an object ID that defines your place in the list. For instance, if
+ you make a list request and receive 100 objects, ending with obj_foo, your
+ subsequent call can include after=obj_foo in order to fetch the next page of the
+ list.
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """Sort order by the `created_at` timestamp of the objects.
+
+ `asc` for ascending order and `desc` for descending order.
+ """
diff --git a/src/openai/types/containers/file_list_response.py b/src/openai/types/containers/file_list_response.py
new file mode 100644
index 0000000000..e5eee38d99
--- /dev/null
+++ b/src/openai/types/containers/file_list_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileListResponse"]
+
+
+class FileListResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
diff --git a/src/openai/types/containers/file_retrieve_response.py b/src/openai/types/containers/file_retrieve_response.py
new file mode 100644
index 0000000000..37fb0e43dd
--- /dev/null
+++ b/src/openai/types/containers/file_retrieve_response.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileRetrieveResponse"]
+
+
+class FileRetrieveResponse(BaseModel):
+ id: str
+ """Unique identifier for the file."""
+
+ bytes: int
+ """Size of the file in bytes."""
+
+ container_id: str
+ """The container this file belongs to."""
+
+ created_at: int
+ """Unix timestamp (in seconds) when the file was created."""
+
+ object: Literal["container.file"]
+ """The type of this object (`container.file`)."""
+
+ path: str
+ """Path of the file in the container."""
+
+ source: str
+ """Source of the file (e.g., `user`, `assistant`)."""
diff --git a/src/openai/types/containers/files/__init__.py b/src/openai/types/containers/files/__init__.py
new file mode 100644
index 0000000000..f8ee8b14b1
--- /dev/null
+++ b/src/openai/types/containers/files/__init__.py
@@ -0,0 +1,3 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
diff --git a/src/openai/types/responses/response_audio_delta_event.py b/src/openai/types/responses/response_audio_delta_event.py
index f3d77fac52..6fb7887b80 100644
--- a/src/openai/types/responses/response_audio_delta_event.py
+++ b/src/openai/types/responses/response_audio_delta_event.py
@@ -11,5 +11,8 @@ class ResponseAudioDeltaEvent(BaseModel):
delta: str
"""A chunk of Base64 encoded response audio bytes."""
+ sequence_number: int
+ """A sequence number for this chunk of the stream response."""
+
type: Literal["response.audio.delta"]
"""The type of the event. Always `response.audio.delta`."""
diff --git a/src/openai/types/responses/response_audio_done_event.py b/src/openai/types/responses/response_audio_done_event.py
index 5654f8e398..2592ae8dcd 100644
--- a/src/openai/types/responses/response_audio_done_event.py
+++ b/src/openai/types/responses/response_audio_done_event.py
@@ -8,5 +8,8 @@
class ResponseAudioDoneEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of the delta."""
+
type: Literal["response.audio.done"]
"""The type of the event. Always `response.audio.done`."""
diff --git a/src/openai/types/responses/response_audio_transcript_delta_event.py b/src/openai/types/responses/response_audio_transcript_delta_event.py
index 69b6660f3f..830c133d61 100644
--- a/src/openai/types/responses/response_audio_transcript_delta_event.py
+++ b/src/openai/types/responses/response_audio_transcript_delta_event.py
@@ -11,5 +11,8 @@ class ResponseAudioTranscriptDeltaEvent(BaseModel):
delta: str
"""The partial transcript of the audio response."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.audio.transcript.delta"]
"""The type of the event. Always `response.audio.transcript.delta`."""
diff --git a/src/openai/types/responses/response_audio_transcript_done_event.py b/src/openai/types/responses/response_audio_transcript_done_event.py
index 1a20319f83..e39f501cf0 100644
--- a/src/openai/types/responses/response_audio_transcript_done_event.py
+++ b/src/openai/types/responses/response_audio_transcript_done_event.py
@@ -8,5 +8,8 @@
class ResponseAudioTranscriptDoneEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.audio.transcript.done"]
"""The type of the event. Always `response.audio.transcript.done`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
index 7527238d06..f25b3f3cab 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_delta_event.py
@@ -14,5 +14,8 @@ class ResponseCodeInterpreterCallCodeDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.code.delta"]
"""The type of the event. Always `response.code_interpreter_call.code.delta`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
index f84d4cf3e8..bf1868cf0f 100644
--- a/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_code_done_event.py
@@ -14,5 +14,8 @@ class ResponseCodeInterpreterCallCodeDoneEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.code.done"]
"""The type of the event. Always `response.code_interpreter_call.code.done`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_completed_event.py b/src/openai/types/responses/response_code_interpreter_call_completed_event.py
index b0cb73fb72..3a3a718971 100644
--- a/src/openai/types/responses/response_code_interpreter_call_completed_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_completed_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.completed"]
"""The type of the event. Always `response.code_interpreter_call.completed`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
index 64b739f308..d1c8230919 100644
--- a/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_in_progress_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.in_progress"]
"""The type of the event. Always `response.code_interpreter_call.in_progress`."""
diff --git a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
index 3100eac175..7f4d294f56 100644
--- a/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
+++ b/src/openai/types/responses/response_code_interpreter_call_interpreting_event.py
@@ -15,5 +15,8 @@ class ResponseCodeInterpreterCallInterpretingEvent(BaseModel):
output_index: int
"""The index of the output item that the code interpreter call is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.code_interpreter_call.interpreting"]
"""The type of the event. Always `response.code_interpreter_call.interpreting`."""
diff --git a/src/openai/types/responses/response_completed_event.py b/src/openai/types/responses/response_completed_event.py
index a944f248ef..8a2bd51f75 100644
--- a/src/openai/types/responses/response_completed_event.py
+++ b/src/openai/types/responses/response_completed_event.py
@@ -12,5 +12,8 @@ class ResponseCompletedEvent(BaseModel):
response: Response
"""Properties of the completed response."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.completed"]
"""The type of the event. Always `response.completed`."""
diff --git a/src/openai/types/responses/response_content_part_added_event.py b/src/openai/types/responses/response_content_part_added_event.py
index 93f5ec4b0c..11e0ac7c92 100644
--- a/src/openai/types/responses/response_content_part_added_event.py
+++ b/src/openai/types/responses/response_content_part_added_event.py
@@ -26,5 +26,8 @@ class ResponseContentPartAddedEvent(BaseModel):
part: Part
"""The content part that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.content_part.added"]
"""The type of the event. Always `response.content_part.added`."""
diff --git a/src/openai/types/responses/response_content_part_done_event.py b/src/openai/types/responses/response_content_part_done_event.py
index 4ec0739877..e1b411bb45 100644
--- a/src/openai/types/responses/response_content_part_done_event.py
+++ b/src/openai/types/responses/response_content_part_done_event.py
@@ -26,5 +26,8 @@ class ResponseContentPartDoneEvent(BaseModel):
part: Part
"""The content part that is done."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.content_part.done"]
"""The type of the event. Always `response.content_part.done`."""
diff --git a/src/openai/types/responses/response_created_event.py b/src/openai/types/responses/response_created_event.py
index 7a524cec87..73a9d700d4 100644
--- a/src/openai/types/responses/response_created_event.py
+++ b/src/openai/types/responses/response_created_event.py
@@ -12,5 +12,8 @@ class ResponseCreatedEvent(BaseModel):
response: Response
"""The response that was created."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.created"]
"""The type of the event. Always `response.created`."""
diff --git a/src/openai/types/responses/response_error_event.py b/src/openai/types/responses/response_error_event.py
index 1b7e605d02..826c395125 100644
--- a/src/openai/types/responses/response_error_event.py
+++ b/src/openai/types/responses/response_error_event.py
@@ -18,5 +18,8 @@ class ResponseErrorEvent(BaseModel):
param: Optional[str] = None
"""The error parameter."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["error"]
"""The type of the event. Always `error`."""
diff --git a/src/openai/types/responses/response_failed_event.py b/src/openai/types/responses/response_failed_event.py
index 3e8f75d8c4..cdd3d7d808 100644
--- a/src/openai/types/responses/response_failed_event.py
+++ b/src/openai/types/responses/response_failed_event.py
@@ -12,5 +12,8 @@ class ResponseFailedEvent(BaseModel):
response: Response
"""The response that failed."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.failed"]
"""The type of the event. Always `response.failed`."""
diff --git a/src/openai/types/responses/response_file_search_call_completed_event.py b/src/openai/types/responses/response_file_search_call_completed_event.py
index 4b86083369..08e51b2d3f 100644
--- a/src/openai/types/responses/response_file_search_call_completed_event.py
+++ b/src/openai/types/responses/response_file_search_call_completed_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is initiated."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.completed"]
"""The type of the event. Always `response.file_search_call.completed`."""
diff --git a/src/openai/types/responses/response_file_search_call_in_progress_event.py b/src/openai/types/responses/response_file_search_call_in_progress_event.py
index eb42e3dad6..63840a649f 100644
--- a/src/openai/types/responses/response_file_search_call_in_progress_event.py
+++ b/src/openai/types/responses/response_file_search_call_in_progress_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is initiated."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.in_progress"]
"""The type of the event. Always `response.file_search_call.in_progress`."""
diff --git a/src/openai/types/responses/response_file_search_call_searching_event.py b/src/openai/types/responses/response_file_search_call_searching_event.py
index 3cd8905de6..706c8c57ad 100644
--- a/src/openai/types/responses/response_file_search_call_searching_event.py
+++ b/src/openai/types/responses/response_file_search_call_searching_event.py
@@ -14,5 +14,8 @@ class ResponseFileSearchCallSearchingEvent(BaseModel):
output_index: int
"""The index of the output item that the file search call is searching."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.file_search_call.searching"]
"""The type of the event. Always `response.file_search_call.searching`."""
diff --git a/src/openai/types/responses/response_function_call_arguments_delta_event.py b/src/openai/types/responses/response_function_call_arguments_delta_event.py
index 0989b7caeb..c6bc5dfad7 100644
--- a/src/openai/types/responses/response_function_call_arguments_delta_event.py
+++ b/src/openai/types/responses/response_function_call_arguments_delta_event.py
@@ -19,5 +19,8 @@ class ResponseFunctionCallArgumentsDeltaEvent(BaseModel):
The index of the output item that the function-call arguments delta is added to.
"""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.function_call_arguments.delta"]
"""The type of the event. Always `response.function_call_arguments.delta`."""
diff --git a/src/openai/types/responses/response_function_call_arguments_done_event.py b/src/openai/types/responses/response_function_call_arguments_done_event.py
index 1d805a57c6..875e7a6875 100644
--- a/src/openai/types/responses/response_function_call_arguments_done_event.py
+++ b/src/openai/types/responses/response_function_call_arguments_done_event.py
@@ -17,4 +17,7 @@ class ResponseFunctionCallArgumentsDoneEvent(BaseModel):
output_index: int
"""The index of the output item."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.function_call_arguments.done"]
diff --git a/src/openai/types/responses/response_image_gen_call_completed_event.py b/src/openai/types/responses/response_image_gen_call_completed_event.py
index fd499f909e..a554273ed0 100644
--- a/src/openai/types/responses/response_image_gen_call_completed_event.py
+++ b/src/openai/types/responses/response_image_gen_call_completed_event.py
@@ -14,5 +14,8 @@ class ResponseImageGenCallCompletedEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.image_generation_call.completed"]
"""The type of the event. Always 'response.image_generation_call.completed'."""
diff --git a/src/openai/types/responses/response_image_gen_call_generating_event.py b/src/openai/types/responses/response_image_gen_call_generating_event.py
index 6e7e3efe5c..74b4f57333 100644
--- a/src/openai/types/responses/response_image_gen_call_generating_event.py
+++ b/src/openai/types/responses/response_image_gen_call_generating_event.py
@@ -1,6 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
from typing_extensions import Literal
from ..._models import BaseModel
@@ -15,8 +14,8 @@ class ResponseImageGenCallGeneratingEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of the image generation item being processed."""
+
type: Literal["response.image_generation_call.generating"]
"""The type of the event. Always 'response.image_generation_call.generating'."""
-
- sequence_number: Optional[int] = None
- """The sequence number of the image generation item being processed."""
diff --git a/src/openai/types/responses/response_in_progress_event.py b/src/openai/types/responses/response_in_progress_event.py
index 7d96cbb8ad..b82e10b357 100644
--- a/src/openai/types/responses/response_in_progress_event.py
+++ b/src/openai/types/responses/response_in_progress_event.py
@@ -12,5 +12,8 @@ class ResponseInProgressEvent(BaseModel):
response: Response
"""The response that is in progress."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.in_progress"]
"""The type of the event. Always `response.in_progress`."""
diff --git a/src/openai/types/responses/response_incomplete_event.py b/src/openai/types/responses/response_incomplete_event.py
index 742b789c7e..63c969a428 100644
--- a/src/openai/types/responses/response_incomplete_event.py
+++ b/src/openai/types/responses/response_incomplete_event.py
@@ -12,5 +12,8 @@ class ResponseIncompleteEvent(BaseModel):
response: Response
"""The response that was incomplete."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.incomplete"]
"""The type of the event. Always `response.incomplete`."""
diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py
index ad6738a3b8..d6651e6999 100644
--- a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py
+++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py
@@ -17,5 +17,8 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.arguments_delta"]
"""The type of the event. Always 'response.mcp_call.arguments_delta'."""
diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py
index 4095cedb0f..a7ce46ad36 100644
--- a/src/openai/types/responses/response_mcp_call_arguments_done_event.py
+++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py
@@ -17,5 +17,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.arguments_done"]
"""The type of the event. Always 'response.mcp_call.arguments_done'."""
diff --git a/src/openai/types/responses/response_mcp_call_completed_event.py b/src/openai/types/responses/response_mcp_call_completed_event.py
index 63b1b65b31..009fbc3c60 100644
--- a/src/openai/types/responses/response_mcp_call_completed_event.py
+++ b/src/openai/types/responses/response_mcp_call_completed_event.py
@@ -8,5 +8,8 @@
class ResponseMcpCallCompletedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.completed"]
"""The type of the event. Always 'response.mcp_call.completed'."""
diff --git a/src/openai/types/responses/response_mcp_call_failed_event.py b/src/openai/types/responses/response_mcp_call_failed_event.py
index 1f94f4d17e..e6edc6ded5 100644
--- a/src/openai/types/responses/response_mcp_call_failed_event.py
+++ b/src/openai/types/responses/response_mcp_call_failed_event.py
@@ -8,5 +8,8 @@
class ResponseMcpCallFailedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.failed"]
"""The type of the event. Always 'response.mcp_call.failed'."""
diff --git a/src/openai/types/responses/response_mcp_call_in_progress_event.py b/src/openai/types/responses/response_mcp_call_in_progress_event.py
index a90508a13c..401c316851 100644
--- a/src/openai/types/responses/response_mcp_call_in_progress_event.py
+++ b/src/openai/types/responses/response_mcp_call_in_progress_event.py
@@ -14,5 +14,8 @@ class ResponseMcpCallInProgressEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_call.in_progress"]
"""The type of the event. Always 'response.mcp_call.in_progress'."""
diff --git a/src/openai/types/responses/response_mcp_list_tools_completed_event.py b/src/openai/types/responses/response_mcp_list_tools_completed_event.py
index c6a921b5bc..6290c3cf9f 100644
--- a/src/openai/types/responses/response_mcp_list_tools_completed_event.py
+++ b/src/openai/types/responses/response_mcp_list_tools_completed_event.py
@@ -8,5 +8,8 @@
class ResponseMcpListToolsCompletedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.completed"]
"""The type of the event. Always 'response.mcp_list_tools.completed'."""
diff --git a/src/openai/types/responses/response_mcp_list_tools_failed_event.py b/src/openai/types/responses/response_mcp_list_tools_failed_event.py
index 639a2356db..1f6e325b36 100644
--- a/src/openai/types/responses/response_mcp_list_tools_failed_event.py
+++ b/src/openai/types/responses/response_mcp_list_tools_failed_event.py
@@ -8,5 +8,8 @@
class ResponseMcpListToolsFailedEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.failed"]
"""The type of the event. Always 'response.mcp_list_tools.failed'."""
diff --git a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
index 41c2334fee..236e5fe6e7 100644
--- a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
+++ b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py
@@ -8,5 +8,8 @@
class ResponseMcpListToolsInProgressEvent(BaseModel):
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.mcp_list_tools.in_progress"]
"""The type of the event. Always 'response.mcp_list_tools.in_progress'."""
diff --git a/src/openai/types/responses/response_output_item_added_event.py b/src/openai/types/responses/response_output_item_added_event.py
index 7344fb9a6c..7cd2a3946d 100644
--- a/src/openai/types/responses/response_output_item_added_event.py
+++ b/src/openai/types/responses/response_output_item_added_event.py
@@ -15,5 +15,8 @@ class ResponseOutputItemAddedEvent(BaseModel):
output_index: int
"""The index of the output item that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_item.added"]
"""The type of the event. Always `response.output_item.added`."""
diff --git a/src/openai/types/responses/response_output_item_done_event.py b/src/openai/types/responses/response_output_item_done_event.py
index a0a871a019..37d3694cf7 100644
--- a/src/openai/types/responses/response_output_item_done_event.py
+++ b/src/openai/types/responses/response_output_item_done_event.py
@@ -15,5 +15,8 @@ class ResponseOutputItemDoneEvent(BaseModel):
output_index: int
"""The index of the output item that was marked done."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_item.done"]
"""The type of the event. Always `response.output_item.done`."""
diff --git a/src/openai/types/responses/response_output_text_annotation_added_event.py b/src/openai/types/responses/response_output_text_annotation_added_event.py
index 8e9e340b6b..ce96790c92 100644
--- a/src/openai/types/responses/response_output_text_annotation_added_event.py
+++ b/src/openai/types/responses/response_output_text_annotation_added_event.py
@@ -23,5 +23,8 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_text_annotation.added"]
"""The type of the event. Always 'response.output_text_annotation.added'."""
diff --git a/src/openai/types/responses/response_queued_event.py b/src/openai/types/responses/response_queued_event.py
index 90981d60d6..40257408a4 100644
--- a/src/openai/types/responses/response_queued_event.py
+++ b/src/openai/types/responses/response_queued_event.py
@@ -12,5 +12,8 @@ class ResponseQueuedEvent(BaseModel):
response: Response
"""The full response object that is queued."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.queued"]
"""The type of the event. Always 'response.queued'."""
diff --git a/src/openai/types/responses/response_reasoning_delta_event.py b/src/openai/types/responses/response_reasoning_delta_event.py
index 5520c45c73..f37d3d370c 100644
--- a/src/openai/types/responses/response_reasoning_delta_event.py
+++ b/src/openai/types/responses/response_reasoning_delta_event.py
@@ -20,5 +20,8 @@ class ResponseReasoningDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.reasoning.delta"]
"""The type of the event. Always 'response.reasoning.delta'."""
diff --git a/src/openai/types/responses/response_reasoning_done_event.py b/src/openai/types/responses/response_reasoning_done_event.py
index 8b059f469f..9f8b127d7e 100644
--- a/src/openai/types/responses/response_reasoning_done_event.py
+++ b/src/openai/types/responses/response_reasoning_done_event.py
@@ -17,6 +17,9 @@ class ResponseReasoningDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
text: str
"""The finalized reasoning text."""
diff --git a/src/openai/types/responses/response_reasoning_summary_delta_event.py b/src/openai/types/responses/response_reasoning_summary_delta_event.py
index 1f52d042af..519a4f24ac 100644
--- a/src/openai/types/responses/response_reasoning_summary_delta_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_delta_event.py
@@ -20,6 +20,9 @@ class ResponseReasoningSummaryDeltaEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the output item."""
diff --git a/src/openai/types/responses/response_reasoning_summary_done_event.py b/src/openai/types/responses/response_reasoning_summary_done_event.py
index f3f9f5428c..98bcf9cb9d 100644
--- a/src/openai/types/responses/response_reasoning_summary_done_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_done_event.py
@@ -14,6 +14,9 @@ class ResponseReasoningSummaryDoneEvent(BaseModel):
output_index: int
"""The index of the output item in the response's output array."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the output item."""
diff --git a/src/openai/types/responses/response_reasoning_summary_part_added_event.py b/src/openai/types/responses/response_reasoning_summary_part_added_event.py
index fd11520170..dc755b253a 100644
--- a/src/openai/types/responses/response_reasoning_summary_part_added_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_part_added_event.py
@@ -25,6 +25,9 @@ class ResponseReasoningSummaryPartAddedEvent(BaseModel):
part: Part
"""The summary part that was added."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
diff --git a/src/openai/types/responses/response_reasoning_summary_part_done_event.py b/src/openai/types/responses/response_reasoning_summary_part_done_event.py
index 7f30189a49..7cc0b56d66 100644
--- a/src/openai/types/responses/response_reasoning_summary_part_done_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_part_done_event.py
@@ -25,6 +25,9 @@ class ResponseReasoningSummaryPartDoneEvent(BaseModel):
part: Part
"""The completed summary part."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
diff --git a/src/openai/types/responses/response_reasoning_summary_text_delta_event.py b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py
index 6d0cbd8265..96652991b6 100644
--- a/src/openai/types/responses/response_reasoning_summary_text_delta_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py
@@ -17,6 +17,9 @@ class ResponseReasoningSummaryTextDeltaEvent(BaseModel):
output_index: int
"""The index of the output item this summary text delta is associated with."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
diff --git a/src/openai/types/responses/response_reasoning_summary_text_done_event.py b/src/openai/types/responses/response_reasoning_summary_text_done_event.py
index 15b894c75b..b35b82316a 100644
--- a/src/openai/types/responses/response_reasoning_summary_text_done_event.py
+++ b/src/openai/types/responses/response_reasoning_summary_text_done_event.py
@@ -14,6 +14,9 @@ class ResponseReasoningSummaryTextDoneEvent(BaseModel):
output_index: int
"""The index of the output item this summary text is associated with."""
+ sequence_number: int
+ """The sequence number of this event."""
+
summary_index: int
"""The index of the summary part within the reasoning summary."""
diff --git a/src/openai/types/responses/response_refusal_delta_event.py b/src/openai/types/responses/response_refusal_delta_event.py
index 04dcdf1c8c..03c903ed28 100644
--- a/src/openai/types/responses/response_refusal_delta_event.py
+++ b/src/openai/types/responses/response_refusal_delta_event.py
@@ -20,5 +20,8 @@ class ResponseRefusalDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the refusal text is added to."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.refusal.delta"]
"""The type of the event. Always `response.refusal.delta`."""
diff --git a/src/openai/types/responses/response_refusal_done_event.py b/src/openai/types/responses/response_refusal_done_event.py
index a9b6f4b055..61fd51aab0 100644
--- a/src/openai/types/responses/response_refusal_done_event.py
+++ b/src/openai/types/responses/response_refusal_done_event.py
@@ -20,5 +20,8 @@ class ResponseRefusalDoneEvent(BaseModel):
refusal: str
"""The refusal text that is finalized."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.refusal.done"]
"""The type of the event. Always `response.refusal.done`."""
diff --git a/src/openai/types/responses/response_text_annotation_delta_event.py b/src/openai/types/responses/response_text_annotation_delta_event.py
index 4f2582282a..43d70bacac 100644
--- a/src/openai/types/responses/response_text_annotation_delta_event.py
+++ b/src/openai/types/responses/response_text_annotation_delta_event.py
@@ -75,5 +75,8 @@ class ResponseTextAnnotationDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the text annotation was added to."""
+ sequence_number: int
+ """The sequence number of this event."""
+
type: Literal["response.output_text.annotation.added"]
"""The type of the event. Always `response.output_text.annotation.added`."""
diff --git a/src/openai/types/responses/response_text_delta_event.py b/src/openai/types/responses/response_text_delta_event.py
index 751a5e2a19..7e4aec7024 100644
--- a/src/openai/types/responses/response_text_delta_event.py
+++ b/src/openai/types/responses/response_text_delta_event.py
@@ -20,5 +20,8 @@ class ResponseTextDeltaEvent(BaseModel):
output_index: int
"""The index of the output item that the text delta was added to."""
+ sequence_number: int
+ """The sequence number for this event."""
+
type: Literal["response.output_text.delta"]
"""The type of the event. Always `response.output_text.delta`."""
diff --git a/src/openai/types/responses/response_text_done_event.py b/src/openai/types/responses/response_text_done_event.py
index 9b5c5e020c..0d5ed4dd19 100644
--- a/src/openai/types/responses/response_text_done_event.py
+++ b/src/openai/types/responses/response_text_done_event.py
@@ -17,6 +17,9 @@ class ResponseTextDoneEvent(BaseModel):
output_index: int
"""The index of the output item that the text content is finalized."""
+ sequence_number: int
+ """The sequence number for this event."""
+
text: str
"""The text content that is finalized."""
diff --git a/tests/api_resources/containers/__init__.py b/tests/api_resources/containers/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/containers/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/containers/files/__init__.py b/tests/api_resources/containers/files/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/containers/files/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/containers/files/test_content.py b/tests/api_resources/containers/files/test_content.py
new file mode 100644
index 0000000000..470353e18d
--- /dev/null
+++ b/tests/api_resources/containers/files/test_content.py
@@ -0,0 +1,116 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestContent:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ content = client.containers.files.content.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert content is None
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ content = response.parse()
+ assert content is None
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.files.content.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ content = response.parse()
+ assert content is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.content.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+
+class TestAsyncContent:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ content = await async_client.containers.files.content.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert content is None
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ content = response.parse()
+ assert content is None
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.content.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ content = await response.parse()
+ assert content is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.content.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
diff --git a/tests/api_resources/containers/test_files.py b/tests/api_resources/containers/test_files.py
new file mode 100644
index 0000000000..6edcc7973a
--- /dev/null
+++ b/tests/api_resources/containers/test_files.py
@@ -0,0 +1,409 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+from openai.types.containers import (
+ FileListResponse,
+ FileCreateResponse,
+ FileRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFiles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ file = client.containers.files.create(
+ container_id="container_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ file = client.containers.files.create(
+ container_id="container_id",
+ file=b"raw file contents",
+ file_id="file_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.create(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.create(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.create(
+ container_id="",
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ file = client.containers.files.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ file = client.containers.files.list(
+ container_id="container_id",
+ )
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ file = client.containers.files.list(
+ container_id="container_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.list(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.list(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(SyncCursorPage[FileListResponse], file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.list(
+ container_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ file = client.containers.files.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert file is None
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert file is None
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.containers.files.with_streaming_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert file is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.containers.files.with_raw_response.delete(
+ file_id="",
+ container_id="container_id",
+ )
+
+
+class TestAsyncFiles:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.create(
+ container_id="container_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.create(
+ container_id="container_id",
+ file=b"raw file contents",
+ file_id="file_id",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.create(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.create(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.create(
+ container_id="",
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.retrieve(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileRetrieveResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.retrieve(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.with_raw_response.retrieve(
+ file_id="",
+ container_id="container_id",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.list(
+ container_id="container_id",
+ )
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.list(
+ container_id="container_id",
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.list(
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.list(
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.list(
+ container_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ file = await async_client.containers.files.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+ assert file is None
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert file is None
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.files.with_streaming_response.delete(
+ file_id="file_id",
+ container_id="container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert file is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.files.with_raw_response.delete(
+ file_id="file_id",
+ container_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.containers.files.with_raw_response.delete(
+ file_id="",
+ container_id="container_id",
+ )
diff --git a/tests/api_resources/test_containers.py b/tests/api_resources/test_containers.py
new file mode 100644
index 0000000000..be9787c4d6
--- /dev/null
+++ b/tests/api_resources/test_containers.py
@@ -0,0 +1,333 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types import (
+ ContainerListResponse,
+ ContainerCreateResponse,
+ ContainerRetrieveResponse,
+)
+from openai.pagination import SyncCursorPage, AsyncCursorPage
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestContainers:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ container = client.containers.create(
+ name="name",
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ container = client.containers.create(
+ name="name",
+ expires_after={
+ "anchor": "last_active_at",
+ "minutes": 0,
+ },
+ file_ids=["string"],
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ container = client.containers.retrieve(
+ "container_id",
+ )
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.retrieve(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.retrieve(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ container = client.containers.list()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ container = client.containers.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert_matches_type(SyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ container = client.containers.delete(
+ "container_id",
+ )
+ assert container is None
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.containers.with_raw_response.delete(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert container is None
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.containers.with_streaming_response.delete(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = response.parse()
+ assert container is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ client.containers.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncContainers:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.create(
+ name="name",
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.create(
+ name="name",
+ expires_after={
+ "anchor": "last_active_at",
+ "minutes": 0,
+ },
+ file_ids=["string"],
+ )
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.create(
+ name="name",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.create(
+ name="name",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(ContainerCreateResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.retrieve(
+ "container_id",
+ )
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.retrieve(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.retrieve(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(ContainerRetrieveResponse, container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.list()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.list(
+ after="after",
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert_matches_type(AsyncCursorPage[ContainerListResponse], container, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ container = await async_client.containers.delete(
+ "container_id",
+ )
+ assert container is None
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.containers.with_raw_response.delete(
+ "container_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ container = response.parse()
+ assert container is None
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.containers.with_streaming_response.delete(
+ "container_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ container = await response.parse()
+ assert container is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
+ await async_client.containers.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index d7f72ce50d..0d33de4a15 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -247,6 +247,44 @@ def test_path_params_delete(self, client: OpenAI) -> None:
"",
)
+ @parametrize
+ def test_method_cancel(self, client: OpenAI) -> None:
+ response = client.responses.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+ assert response is None
+
+ @parametrize
+ def test_raw_response_cancel(self, client: OpenAI) -> None:
+ http_response = client.responses.with_raw_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert response is None
+
+ @parametrize
+ def test_streaming_response_cancel(self, client: OpenAI) -> None:
+ with client.responses.with_streaming_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = http_response.parse()
+ assert response is None
+
+ assert cast(Any, http_response.is_closed) is True
+
+ @parametrize
+ def test_path_params_cancel(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ client.responses.with_raw_response.cancel(
+ "",
+ )
+
class TestAsyncResponses:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
@@ -480,3 +518,41 @@ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
await async_client.responses.with_raw_response.delete(
"",
)
+
+ @parametrize
+ async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.responses.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+ assert response is None
+
+ @parametrize
+ async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ http_response = await async_client.responses.with_raw_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ )
+
+ assert http_response.is_closed is True
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+ response = http_response.parse()
+ assert response is None
+
+ @parametrize
+ async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.responses.with_streaming_response.cancel(
+ "resp_677efb5139a88190b512bc3fef8e535d",
+ ) as http_response:
+ assert not http_response.is_closed
+ assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ response = await http_response.parse()
+ assert response is None
+
+ assert cast(Any, http_response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"):
+ await async_client.responses.with_raw_response.cancel(
+ "",
+ )