Skip to content

Commit e689216

Browse files
release: 1.90.0 (#2420)
* feat(api): make model and inputs not required to create response * release: 1.90.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
1 parent ca9363d commit e689216

File tree

8 files changed

+165
-189
lines changed

8 files changed

+165
-189
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.89.0"
2+
".": "1.90.0"
33
}

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 111
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9e41d2d5471d2c28bff0d616f4476f5b0e6c541ef4cb51bdaaef5fdf5e13c8b2.yml
3-
openapi_spec_hash: 86f765e18d00e32cf2ce9db7ab84d946
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f411a68f272b8be0ab0c266043da33228687b9b2d76896724e3cef797de9563d.yml
3+
openapi_spec_hash: 89bf866ea95ecfb3d76c8833237047d6
44
config_hash: dc5515e257676a27cb1ace1784aa92b3

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# Changelog
22

3+
## 1.90.0 (2025-06-20)
4+
5+
Full Changelog: [v1.89.0...v1.90.0](https://github.com/openai/openai-python/compare/v1.89.0...v1.90.0)
6+
7+
### Features
8+
9+
* **api:** make model and inputs not required to create response ([11bd62e](https://github.com/openai/openai-python/commit/11bd62eb7e46eec748edaf2e0cecf253ffc1202c))
10+
311
## 1.89.0 (2025-06-20)
412

513
Full Changelog: [v1.88.0...v1.89.0](https://github.com/openai/openai-python/compare/v1.88.0...v1.89.0)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.89.0"
3+
version = "1.90.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.89.0" # x-release-please-version
4+
__version__ = "1.90.0" # x-release-please-version

src/openai/resources/responses/responses.py

Lines changed: 117 additions & 119 deletions
Large diffs are not rendered by default.

src/openai/types/responses/response_create_params.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -26,27 +26,6 @@
2626

2727

2828
class ResponseCreateParamsBase(TypedDict, total=False):
29-
input: Required[Union[str, ResponseInputParam]]
30-
"""Text, image, or file inputs to the model, used to generate a response.
31-
32-
Learn more:
33-
34-
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
35-
- [Image inputs](https://platform.openai.com/docs/guides/images)
36-
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
37-
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
38-
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
39-
"""
40-
41-
model: Required[ResponsesModel]
42-
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
43-
44-
OpenAI offers a wide range of models with different capabilities, performance
45-
characteristics, and price points. Refer to the
46-
[model guide](https://platform.openai.com/docs/models) to browse and compare
47-
available models.
48-
"""
49-
5029
background: Optional[bool]
5130
"""Whether to run the model response in the background.
5231
@@ -72,6 +51,18 @@ class ResponseCreateParamsBase(TypedDict, total=False):
7251
in code interpreter tool call items.
7352
"""
7453

54+
input: Union[str, ResponseInputParam]
55+
"""Text, image, or file inputs to the model, used to generate a response.
56+
57+
Learn more:
58+
59+
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
60+
- [Image inputs](https://platform.openai.com/docs/guides/images)
61+
- [File inputs](https://platform.openai.com/docs/guides/pdf-files)
62+
- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
63+
- [Function calling](https://platform.openai.com/docs/guides/function-calling)
64+
"""
65+
7566
instructions: Optional[str]
7667
"""A system (or developer) message inserted into the model's context.
7768
@@ -97,6 +88,15 @@ class ResponseCreateParamsBase(TypedDict, total=False):
9788
a maximum length of 512 characters.
9889
"""
9990

91+
model: ResponsesModel
92+
"""Model ID used to generate the response, like `gpt-4o` or `o3`.
93+
94+
OpenAI offers a wide range of models with different capabilities, performance
95+
characteristics, and price points. Refer to the
96+
[model guide](https://platform.openai.com/docs/models) to browse and compare
97+
available models.
98+
"""
99+
100100
parallel_tool_calls: Optional[bool]
101101
"""Whether to allow the model to run tool calls in parallel."""
102102

tests/api_resources/test_responses.py

Lines changed: 14 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -21,22 +21,19 @@ class TestResponses:
2121

2222
@parametrize
2323
def test_method_create_overload_1(self, client: OpenAI) -> None:
24-
response = client.responses.create(
25-
input="string",
26-
model="gpt-4o",
27-
)
24+
response = client.responses.create()
2825
assert_matches_type(Response, response, path=["response"])
2926

3027
@parametrize
3128
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
3229
response = client.responses.create(
33-
input="string",
34-
model="gpt-4o",
3530
background=True,
3631
include=["file_search_call.results"],
32+
input="string",
3733
instructions="instructions",
3834
max_output_tokens=0,
3935
metadata={"foo": "string"},
36+
model="gpt-4o",
4037
parallel_tool_calls=True,
4138
previous_response_id="previous_response_id",
4239
prompt={
@@ -72,10 +69,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
7269

7370
@parametrize
7471
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
75-
http_response = client.responses.with_raw_response.create(
76-
input="string",
77-
model="gpt-4o",
78-
)
72+
http_response = client.responses.with_raw_response.create()
7973

8074
assert http_response.is_closed is True
8175
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -84,10 +78,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
8478

8579
@parametrize
8680
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
87-
with client.responses.with_streaming_response.create(
88-
input="string",
89-
model="gpt-4o",
90-
) as http_response:
81+
with client.responses.with_streaming_response.create() as http_response:
9182
assert not http_response.is_closed
9283
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
9384

@@ -99,23 +90,21 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
9990
@parametrize
10091
def test_method_create_overload_2(self, client: OpenAI) -> None:
10192
response_stream = client.responses.create(
102-
input="string",
103-
model="gpt-4o",
10493
stream=True,
10594
)
10695
response_stream.response.close()
10796

10897
@parametrize
10998
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
11099
response_stream = client.responses.create(
111-
input="string",
112-
model="gpt-4o",
113100
stream=True,
114101
background=True,
115102
include=["file_search_call.results"],
103+
input="string",
116104
instructions="instructions",
117105
max_output_tokens=0,
118106
metadata={"foo": "string"},
107+
model="gpt-4o",
119108
parallel_tool_calls=True,
120109
previous_response_id="previous_response_id",
121110
prompt={
@@ -151,8 +140,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
151140
@parametrize
152141
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
153142
response = client.responses.with_raw_response.create(
154-
input="string",
155-
model="gpt-4o",
156143
stream=True,
157144
)
158145

@@ -163,8 +150,6 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
163150
@parametrize
164151
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
165152
with client.responses.with_streaming_response.create(
166-
input="string",
167-
model="gpt-4o",
168153
stream=True,
169154
) as response:
170155
assert not response.is_closed
@@ -358,22 +343,19 @@ class TestAsyncResponses:
358343

359344
@parametrize
360345
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
361-
response = await async_client.responses.create(
362-
input="string",
363-
model="gpt-4o",
364-
)
346+
response = await async_client.responses.create()
365347
assert_matches_type(Response, response, path=["response"])
366348

367349
@parametrize
368350
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
369351
response = await async_client.responses.create(
370-
input="string",
371-
model="gpt-4o",
372352
background=True,
373353
include=["file_search_call.results"],
354+
input="string",
374355
instructions="instructions",
375356
max_output_tokens=0,
376357
metadata={"foo": "string"},
358+
model="gpt-4o",
377359
parallel_tool_calls=True,
378360
previous_response_id="previous_response_id",
379361
prompt={
@@ -409,10 +391,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
409391

410392
@parametrize
411393
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
412-
http_response = await async_client.responses.with_raw_response.create(
413-
input="string",
414-
model="gpt-4o",
415-
)
394+
http_response = await async_client.responses.with_raw_response.create()
416395

417396
assert http_response.is_closed is True
418397
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -421,10 +400,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
421400

422401
@parametrize
423402
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
424-
async with async_client.responses.with_streaming_response.create(
425-
input="string",
426-
model="gpt-4o",
427-
) as http_response:
403+
async with async_client.responses.with_streaming_response.create() as http_response:
428404
assert not http_response.is_closed
429405
assert http_response.http_request.headers.get("X-Stainless-Lang") == "python"
430406

@@ -436,23 +412,21 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
436412
@parametrize
437413
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
438414
response_stream = await async_client.responses.create(
439-
input="string",
440-
model="gpt-4o",
441415
stream=True,
442416
)
443417
await response_stream.response.aclose()
444418

445419
@parametrize
446420
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
447421
response_stream = await async_client.responses.create(
448-
input="string",
449-
model="gpt-4o",
450422
stream=True,
451423
background=True,
452424
include=["file_search_call.results"],
425+
input="string",
453426
instructions="instructions",
454427
max_output_tokens=0,
455428
metadata={"foo": "string"},
429+
model="gpt-4o",
456430
parallel_tool_calls=True,
457431
previous_response_id="previous_response_id",
458432
prompt={
@@ -488,8 +462,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
488462
@parametrize
489463
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
490464
response = await async_client.responses.with_raw_response.create(
491-
input="string",
492-
model="gpt-4o",
493465
stream=True,
494466
)
495467

@@ -500,8 +472,6 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
500472
@parametrize
501473
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
502474
async with async_client.responses.with_streaming_response.create(
503-
input="string",
504-
model="gpt-4o",
505475
stream=True,
506476
) as response:
507477
assert not response.is_closed

0 commit comments

Comments
 (0)