Skip to content

Commit ec9aa5a

Browse files
feat(api): OpenAPI spec update via Stainless API (#219)
1 parent ff2d1b6 commit ec9aa5a

File tree

6 files changed

+52
-50
lines changed

6 files changed

+52
-50
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 22
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-9cff8ea13f14bd0899df69243fe78b4f88d4d0172263aa260af1ea66a7d0484e.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-d853690356bd7363560a181b7acd421d0fbc1b95800423a0382b2c248edaf87b.yml

src/prompt_foundry_python_sdk/resources/prompts.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,10 @@ def get_parameters(
230230
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
231231
) -> ModelParameters:
232232
"""
233-
Fetches the model configuration parameters for a specified prompt, including
234-
penalty settings, response format, and the model messages rendered with the
235-
given variables mapped to the set LLM provider.
233+
Fetches the configured model parameters and messages rendered with the provided
234+
variables mapped to the set LLM provider. This endpoint abstracts the need to
235+
handle mapping between different providers, while still allowing direct calls to
236+
the providers.
236237
237238
Args:
238239
append_messages: Appended the the end of the configured prompt messages before running the
@@ -477,9 +478,10 @@ async def get_parameters(
477478
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
478479
) -> ModelParameters:
479480
"""
480-
Fetches the model configuration parameters for a specified prompt, including
481-
penalty settings, response format, and the model messages rendered with the
482-
given variables mapped to the set LLM provider.
481+
Fetches the configured model parameters and messages rendered with the provided
482+
variables mapped to the set LLM provider. This endpoint abstracts the need to
483+
handle mapping between different providers, while still allowing direct calls to
484+
the providers.
483485
484486
Args:
485487
append_messages: Appended the the end of the configured prompt messages before running the

src/prompt_foundry_python_sdk/types/prompt_configuration.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -98,17 +98,17 @@ class Parameters(BaseModel):
9898
max_tokens: Optional[float] = FieldInfo(alias="maxTokens", default=None)
9999
"""Example: 100"""
100100

101-
api_model_name: str = FieldInfo(alias="modelName")
102-
"""Example: "gpt-3.5-turbo" """
103-
104-
api_model_provider: Literal["ANTHROPIC", "OPENAI"] = FieldInfo(alias="modelProvider")
105-
"""The provider of the provided model."""
101+
name: str
102+
"""The name of the model for the provider."""
106103

107104
parallel_tool_calls: bool = FieldInfo(alias="parallelToolCalls")
108105

109106
presence_penalty: float = FieldInfo(alias="presencePenalty")
110107
"""Example: 0"""
111108

109+
provider: Literal["ANTHROPIC", "OPENAI"]
110+
"""The LLM model provider."""
111+
112112
response_format: Literal["JSON", "TEXT"] = FieldInfo(alias="responseFormat")
113113
"""Example: PromptResponseFormat.TEXT"""
114114

src/prompt_foundry_python_sdk/types/prompt_create_params.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,17 +106,17 @@ class Parameters(TypedDict, total=False):
106106
max_tokens: Required[Annotated[Optional[float], PropertyInfo(alias="maxTokens")]]
107107
"""Example: 100"""
108108

109-
model_name: Required[Annotated[str, PropertyInfo(alias="modelName")]]
110-
"""Example: "gpt-3.5-turbo" """
111-
112-
model_provider: Required[Annotated[Literal["ANTHROPIC", "OPENAI"], PropertyInfo(alias="modelProvider")]]
113-
"""The provider of the provided model."""
109+
name: Required[str]
110+
"""The name of the model for the provider."""
114111

115112
parallel_tool_calls: Required[Annotated[bool, PropertyInfo(alias="parallelToolCalls")]]
116113

117114
presence_penalty: Required[Annotated[float, PropertyInfo(alias="presencePenalty")]]
118115
"""Example: 0"""
119116

117+
provider: Required[Literal["ANTHROPIC", "OPENAI"]]
118+
"""The LLM model provider."""
119+
120120
response_format: Required[Annotated[Literal["JSON", "TEXT"], PropertyInfo(alias="responseFormat")]]
121121
"""Example: PromptResponseFormat.TEXT"""
122122

src/prompt_foundry_python_sdk/types/prompt_update_params.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,17 +106,17 @@ class Parameters(TypedDict, total=False):
106106
max_tokens: Required[Annotated[Optional[float], PropertyInfo(alias="maxTokens")]]
107107
"""Example: 100"""
108108

109-
model_name: Required[Annotated[str, PropertyInfo(alias="modelName")]]
110-
"""Example: "gpt-3.5-turbo" """
111-
112-
model_provider: Required[Annotated[Literal["ANTHROPIC", "OPENAI"], PropertyInfo(alias="modelProvider")]]
113-
"""The provider of the provided model."""
109+
name: Required[str]
110+
"""The name of the model for the provider."""
114111

115112
parallel_tool_calls: Required[Annotated[bool, PropertyInfo(alias="parallelToolCalls")]]
116113

117114
presence_penalty: Required[Annotated[float, PropertyInfo(alias="presencePenalty")]]
118115
"""Example: 0"""
119116

117+
provider: Required[Literal["ANTHROPIC", "OPENAI"]]
118+
"""The LLM model provider."""
119+
120120
response_format: Required[Annotated[Literal["JSON", "TEXT"], PropertyInfo(alias="responseFormat")]]
121121
"""Example: PromptResponseFormat.TEXT"""
122122

tests/api_resources/test_prompts.py

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ def test_method_create(self, client: PromptFoundry) -> None:
8383
],
8484
name="name",
8585
parameters={
86-
"model_provider": "ANTHROPIC",
87-
"model_name": "modelName",
86+
"provider": "ANTHROPIC",
87+
"name": "name",
8888
"response_format": "JSON",
8989
"temperature": 0,
9090
"top_p": 0,
@@ -162,8 +162,8 @@ def test_raw_response_create(self, client: PromptFoundry) -> None:
162162
],
163163
name="name",
164164
parameters={
165-
"model_provider": "ANTHROPIC",
166-
"model_name": "modelName",
165+
"provider": "ANTHROPIC",
166+
"name": "name",
167167
"response_format": "JSON",
168168
"temperature": 0,
169169
"top_p": 0,
@@ -245,8 +245,8 @@ def test_streaming_response_create(self, client: PromptFoundry) -> None:
245245
],
246246
name="name",
247247
parameters={
248-
"model_provider": "ANTHROPIC",
249-
"model_name": "modelName",
248+
"provider": "ANTHROPIC",
249+
"name": "name",
250250
"response_format": "JSON",
251251
"temperature": 0,
252252
"top_p": 0,
@@ -331,8 +331,8 @@ def test_method_update(self, client: PromptFoundry) -> None:
331331
],
332332
name="name",
333333
parameters={
334-
"model_provider": "ANTHROPIC",
335-
"model_name": "modelName",
334+
"provider": "ANTHROPIC",
335+
"name": "name",
336336
"response_format": "JSON",
337337
"temperature": 0,
338338
"top_p": 0,
@@ -411,8 +411,8 @@ def test_raw_response_update(self, client: PromptFoundry) -> None:
411411
],
412412
name="name",
413413
parameters={
414-
"model_provider": "ANTHROPIC",
415-
"model_name": "modelName",
414+
"provider": "ANTHROPIC",
415+
"name": "name",
416416
"response_format": "JSON",
417417
"temperature": 0,
418418
"top_p": 0,
@@ -495,8 +495,8 @@ def test_streaming_response_update(self, client: PromptFoundry) -> None:
495495
],
496496
name="name",
497497
parameters={
498-
"model_provider": "ANTHROPIC",
499-
"model_name": "modelName",
498+
"provider": "ANTHROPIC",
499+
"name": "name",
500500
"response_format": "JSON",
501501
"temperature": 0,
502502
"top_p": 0,
@@ -582,8 +582,8 @@ def test_path_params_update(self, client: PromptFoundry) -> None:
582582
],
583583
name="name",
584584
parameters={
585-
"model_provider": "ANTHROPIC",
586-
"model_name": "modelName",
585+
"provider": "ANTHROPIC",
586+
"name": "name",
587587
"response_format": "JSON",
588588
"temperature": 0,
589589
"top_p": 0,
@@ -918,8 +918,8 @@ async def test_method_create(self, async_client: AsyncPromptFoundry) -> None:
918918
],
919919
name="name",
920920
parameters={
921-
"model_provider": "ANTHROPIC",
922-
"model_name": "modelName",
921+
"provider": "ANTHROPIC",
922+
"name": "name",
923923
"response_format": "JSON",
924924
"temperature": 0,
925925
"top_p": 0,
@@ -997,8 +997,8 @@ async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> No
997997
],
998998
name="name",
999999
parameters={
1000-
"model_provider": "ANTHROPIC",
1001-
"model_name": "modelName",
1000+
"provider": "ANTHROPIC",
1001+
"name": "name",
10021002
"response_format": "JSON",
10031003
"temperature": 0,
10041004
"top_p": 0,
@@ -1080,8 +1080,8 @@ async def test_streaming_response_create(self, async_client: AsyncPromptFoundry)
10801080
],
10811081
name="name",
10821082
parameters={
1083-
"model_provider": "ANTHROPIC",
1084-
"model_name": "modelName",
1083+
"provider": "ANTHROPIC",
1084+
"name": "name",
10851085
"response_format": "JSON",
10861086
"temperature": 0,
10871087
"top_p": 0,
@@ -1166,8 +1166,8 @@ async def test_method_update(self, async_client: AsyncPromptFoundry) -> None:
11661166
],
11671167
name="name",
11681168
parameters={
1169-
"model_provider": "ANTHROPIC",
1170-
"model_name": "modelName",
1169+
"provider": "ANTHROPIC",
1170+
"name": "name",
11711171
"response_format": "JSON",
11721172
"temperature": 0,
11731173
"top_p": 0,
@@ -1246,8 +1246,8 @@ async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> No
12461246
],
12471247
name="name",
12481248
parameters={
1249-
"model_provider": "ANTHROPIC",
1250-
"model_name": "modelName",
1249+
"provider": "ANTHROPIC",
1250+
"name": "name",
12511251
"response_format": "JSON",
12521252
"temperature": 0,
12531253
"top_p": 0,
@@ -1330,8 +1330,8 @@ async def test_streaming_response_update(self, async_client: AsyncPromptFoundry)
13301330
],
13311331
name="name",
13321332
parameters={
1333-
"model_provider": "ANTHROPIC",
1334-
"model_name": "modelName",
1333+
"provider": "ANTHROPIC",
1334+
"name": "name",
13351335
"response_format": "JSON",
13361336
"temperature": 0,
13371337
"top_p": 0,
@@ -1417,8 +1417,8 @@ async def test_path_params_update(self, async_client: AsyncPromptFoundry) -> Non
14171417
],
14181418
name="name",
14191419
parameters={
1420-
"model_provider": "ANTHROPIC",
1421-
"model_name": "modelName",
1420+
"provider": "ANTHROPIC",
1421+
"name": "name",
14221422
"response_format": "JSON",
14231423
"temperature": 0,
14241424
"top_p": 0,

0 commit comments

Comments
 (0)