Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 8640fd8

Browse filesBrowse files
fix(api): add missing reasoning effort + model enums (openai#2096)
1 parent b99c35c commit 8640fd8
Copy full SHA for 8640fd8

File tree

Expand file treeCollapse file tree

12 files changed

+268
-30
lines changed
Filter options
Expand file treeCollapse file tree

12 files changed

+268
-30
lines changed

‎.stats.yml

Copy file name to clipboard
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 69
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-fc5dbc19505b0035f9e7f88868619f4fb519b048bde011f6154f3132d4be71fb.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7c699d4503077d06a4a44f52c0c1f902d19a87c766b8be75b97c8dfd484ad4aa.yml

‎src/openai/resources/beta/assistants.py

Copy file name to clipboardExpand all lines: src/openai/resources/beta/assistants.py
+104-2Lines changed: 104 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def create(
6161
instructions: Optional[str] | NotGiven = NOT_GIVEN,
6262
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
6363
name: Optional[str] | NotGiven = NOT_GIVEN,
64+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
6465
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
6566
temperature: Optional[float] | NotGiven = NOT_GIVEN,
6667
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -97,6 +98,13 @@ def create(
9798
9899
name: The name of the assistant. The maximum length is 256 characters.
99100
101+
reasoning_effort: **o1 and o3-mini models only**
102+
103+
Constrains effort on reasoning for
104+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
105+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
106+
result in faster responses and fewer tokens used on reasoning in a response.
107+
100108
response_format: Specifies the format that the model must output. Compatible with
101109
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
102110
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -155,6 +163,7 @@ def create(
155163
"instructions": instructions,
156164
"metadata": metadata,
157165
"name": name,
166+
"reasoning_effort": reasoning_effort,
158167
"response_format": response_format,
159168
"temperature": temperature,
160169
"tool_resources": tool_resources,
@@ -210,8 +219,42 @@ def update(
210219
description: Optional[str] | NotGiven = NOT_GIVEN,
211220
instructions: Optional[str] | NotGiven = NOT_GIVEN,
212221
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
213-
model: str | NotGiven = NOT_GIVEN,
222+
model: Union[
223+
str,
224+
Literal[
225+
"o3-mini",
226+
"o3-mini-2025-01-31",
227+
"o1",
228+
"o1-2024-12-17",
229+
"gpt-4o",
230+
"gpt-4o-2024-11-20",
231+
"gpt-4o-2024-08-06",
232+
"gpt-4o-2024-05-13",
233+
"gpt-4o-mini",
234+
"gpt-4o-mini-2024-07-18",
235+
"gpt-4-turbo",
236+
"gpt-4-turbo-2024-04-09",
237+
"gpt-4-0125-preview",
238+
"gpt-4-turbo-preview",
239+
"gpt-4-1106-preview",
240+
"gpt-4-vision-preview",
241+
"gpt-4",
242+
"gpt-4-0314",
243+
"gpt-4-0613",
244+
"gpt-4-32k",
245+
"gpt-4-32k-0314",
246+
"gpt-4-32k-0613",
247+
"gpt-3.5-turbo",
248+
"gpt-3.5-turbo-16k",
249+
"gpt-3.5-turbo-0613",
250+
"gpt-3.5-turbo-1106",
251+
"gpt-3.5-turbo-0125",
252+
"gpt-3.5-turbo-16k-0613",
253+
],
254+
]
255+
| NotGiven = NOT_GIVEN,
214256
name: Optional[str] | NotGiven = NOT_GIVEN,
257+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
215258
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
216259
temperature: Optional[float] | NotGiven = NOT_GIVEN,
217260
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -249,6 +292,13 @@ def update(
249292
250293
name: The name of the assistant. The maximum length is 256 characters.
251294
295+
reasoning_effort: **o1 and o3-mini models only**
296+
297+
Constrains effort on reasoning for
298+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
299+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
300+
result in faster responses and fewer tokens used on reasoning in a response.
301+
252302
response_format: Specifies the format that the model must output. Compatible with
253303
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
254304
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -309,6 +359,7 @@ def update(
309359
"metadata": metadata,
310360
"model": model,
311361
"name": name,
362+
"reasoning_effort": reasoning_effort,
312363
"response_format": response_format,
313364
"temperature": temperature,
314365
"tool_resources": tool_resources,
@@ -451,6 +502,7 @@ async def create(
451502
instructions: Optional[str] | NotGiven = NOT_GIVEN,
452503
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
453504
name: Optional[str] | NotGiven = NOT_GIVEN,
505+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
454506
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
455507
temperature: Optional[float] | NotGiven = NOT_GIVEN,
456508
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -487,6 +539,13 @@ async def create(
487539
488540
name: The name of the assistant. The maximum length is 256 characters.
489541
542+
reasoning_effort: **o1 and o3-mini models only**
543+
544+
Constrains effort on reasoning for
545+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
546+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
547+
result in faster responses and fewer tokens used on reasoning in a response.
548+
490549
response_format: Specifies the format that the model must output. Compatible with
491550
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
492551
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -545,6 +604,7 @@ async def create(
545604
"instructions": instructions,
546605
"metadata": metadata,
547606
"name": name,
607+
"reasoning_effort": reasoning_effort,
548608
"response_format": response_format,
549609
"temperature": temperature,
550610
"tool_resources": tool_resources,
@@ -600,8 +660,42 @@ async def update(
600660
description: Optional[str] | NotGiven = NOT_GIVEN,
601661
instructions: Optional[str] | NotGiven = NOT_GIVEN,
602662
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
603-
model: str | NotGiven = NOT_GIVEN,
663+
model: Union[
664+
str,
665+
Literal[
666+
"o3-mini",
667+
"o3-mini-2025-01-31",
668+
"o1",
669+
"o1-2024-12-17",
670+
"gpt-4o",
671+
"gpt-4o-2024-11-20",
672+
"gpt-4o-2024-08-06",
673+
"gpt-4o-2024-05-13",
674+
"gpt-4o-mini",
675+
"gpt-4o-mini-2024-07-18",
676+
"gpt-4-turbo",
677+
"gpt-4-turbo-2024-04-09",
678+
"gpt-4-0125-preview",
679+
"gpt-4-turbo-preview",
680+
"gpt-4-1106-preview",
681+
"gpt-4-vision-preview",
682+
"gpt-4",
683+
"gpt-4-0314",
684+
"gpt-4-0613",
685+
"gpt-4-32k",
686+
"gpt-4-32k-0314",
687+
"gpt-4-32k-0613",
688+
"gpt-3.5-turbo",
689+
"gpt-3.5-turbo-16k",
690+
"gpt-3.5-turbo-0613",
691+
"gpt-3.5-turbo-1106",
692+
"gpt-3.5-turbo-0125",
693+
"gpt-3.5-turbo-16k-0613",
694+
],
695+
]
696+
| NotGiven = NOT_GIVEN,
604697
name: Optional[str] | NotGiven = NOT_GIVEN,
698+
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
605699
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
606700
temperature: Optional[float] | NotGiven = NOT_GIVEN,
607701
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -639,6 +733,13 @@ async def update(
639733
640734
name: The name of the assistant. The maximum length is 256 characters.
641735
736+
reasoning_effort: **o1 and o3-mini models only**
737+
738+
Constrains effort on reasoning for
739+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
740+
supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
741+
result in faster responses and fewer tokens used on reasoning in a response.
742+
642743
response_format: Specifies the format that the model must output. Compatible with
643744
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
644745
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
@@ -699,6 +800,7 @@ async def update(
699800
"metadata": metadata,
700801
"model": model,
701802
"name": name,
803+
"reasoning_effort": reasoning_effort,
702804
"response_format": response_format,
703805
"temperature": temperature,
704806
"tool_resources": tool_resources,

‎src/openai/resources/beta/chat/completions.py

Copy file name to clipboardExpand all lines: src/openai/resources/beta/chat/completions.py
+4-4Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ def parse(
8383
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
8484
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
8585
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
86-
reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
86+
reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
8787
seed: Optional[int] | NotGiven = NOT_GIVEN,
8888
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
8989
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
@@ -228,7 +228,7 @@ def stream(
228228
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
229229
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
230230
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
231-
reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
231+
reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
232232
seed: Optional[int] | NotGiven = NOT_GIVEN,
233233
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
234234
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
@@ -358,7 +358,7 @@ async def parse(
358358
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
359359
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
360360
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
361-
reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
361+
reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
362362
seed: Optional[int] | NotGiven = NOT_GIVEN,
363363
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
364364
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
@@ -503,7 +503,7 @@ def stream(
503503
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
504504
prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN,
505505
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
506-
reasoning_effort: ChatCompletionReasoningEffort | NotGiven = NOT_GIVEN,
506+
reasoning_effort: Optional[ChatCompletionReasoningEffort] | NotGiven = NOT_GIVEN,
507507
seed: Optional[int] | NotGiven = NOT_GIVEN,
508508
service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
509509
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.