Skip to content

feat(api): update via SDK Studio #218

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 22
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-34edf740524e434708905ba916368bd4b1b335aa95cc8c26883f25d3dfbdd221.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-9cff8ea13f14bd0899df69243fe78b4f88d4d0172263aa260af1ea66a7d0484e.yml
14 changes: 6 additions & 8 deletions src/prompt_foundry_python_sdk/resources/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,10 +230,9 @@ def get_parameters(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModelParameters:
"""
Fetches the configured model parameters and messages rendered with the provided
variables mapped to the set LLM provider. This endpoint abstracts the need to
handle mapping between different providers, while still allowing direct calls to
the providers.
Fetches the model configuration parameters for a specified prompt, including
penalty settings, response format, and the model messages rendered with the
given variables mapped to the set LLM provider.

Args:
append_messages: Appended the the end of the configured prompt messages before running the
Expand Down Expand Up @@ -478,10 +477,9 @@ async def get_parameters(
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> ModelParameters:
"""
Fetches the configured model parameters and messages rendered with the provided
variables mapped to the set LLM provider. This endpoint abstracts the need to
handle mapping between different providers, while still allowing direct calls to
the providers.
Fetches the model configuration parameters for a specified prompt, including
penalty settings, response format, and the model messages rendered with the
given variables mapped to the set LLM provider.

Args:
append_messages: Appended the the end of the configured prompt messages before running the
Expand Down
10 changes: 5 additions & 5 deletions src/prompt_foundry_python_sdk/types/prompt_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,17 @@ class Parameters(BaseModel):
max_tokens: Optional[float] = FieldInfo(alias="maxTokens", default=None)
"""Example: 100"""

name: str
"""The name of the model for the provider."""
api_model_name: str = FieldInfo(alias="modelName")
"""Example: "gpt-3.5-turbo" """

api_model_provider: Literal["ANTHROPIC", "OPENAI"] = FieldInfo(alias="modelProvider")
"""The provider of the provided model."""

parallel_tool_calls: bool = FieldInfo(alias="parallelToolCalls")

presence_penalty: float = FieldInfo(alias="presencePenalty")
"""Example: 0"""

provider: Literal["ANTHROPIC", "OPENAI"]
"""The LLM model provider."""

response_format: Literal["JSON", "TEXT"] = FieldInfo(alias="responseFormat")
"""Example: PromptResponseFormat.TEXT"""

Expand Down
10 changes: 5 additions & 5 deletions src/prompt_foundry_python_sdk/types/prompt_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,17 +106,17 @@ class Parameters(TypedDict, total=False):
max_tokens: Required[Annotated[Optional[float], PropertyInfo(alias="maxTokens")]]
"""Example: 100"""

name: Required[str]
"""The name of the model for the provider."""
model_name: Required[Annotated[str, PropertyInfo(alias="modelName")]]
"""Example: "gpt-3.5-turbo" """

model_provider: Required[Annotated[Literal["ANTHROPIC", "OPENAI"], PropertyInfo(alias="modelProvider")]]
"""The provider of the provided model."""

parallel_tool_calls: Required[Annotated[bool, PropertyInfo(alias="parallelToolCalls")]]

presence_penalty: Required[Annotated[float, PropertyInfo(alias="presencePenalty")]]
"""Example: 0"""

provider: Required[Literal["ANTHROPIC", "OPENAI"]]
"""The LLM model provider."""

response_format: Required[Annotated[Literal["JSON", "TEXT"], PropertyInfo(alias="responseFormat")]]
"""Example: PromptResponseFormat.TEXT"""

Expand Down
10 changes: 5 additions & 5 deletions src/prompt_foundry_python_sdk/types/prompt_update_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,17 +106,17 @@ class Parameters(TypedDict, total=False):
max_tokens: Required[Annotated[Optional[float], PropertyInfo(alias="maxTokens")]]
"""Example: 100"""

name: Required[str]
"""The name of the model for the provider."""
model_name: Required[Annotated[str, PropertyInfo(alias="modelName")]]
"""Example: "gpt-3.5-turbo" """

model_provider: Required[Annotated[Literal["ANTHROPIC", "OPENAI"], PropertyInfo(alias="modelProvider")]]
"""The provider of the provided model."""

parallel_tool_calls: Required[Annotated[bool, PropertyInfo(alias="parallelToolCalls")]]

presence_penalty: Required[Annotated[float, PropertyInfo(alias="presencePenalty")]]
"""Example: 0"""

provider: Required[Literal["ANTHROPIC", "OPENAI"]]
"""The LLM model provider."""

response_format: Required[Annotated[Literal["JSON", "TEXT"], PropertyInfo(alias="responseFormat")]]
"""Example: PromptResponseFormat.TEXT"""

Expand Down
56 changes: 28 additions & 28 deletions tests/api_resources/test_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def test_method_create(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -162,8 +162,8 @@ def test_raw_response_create(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -245,8 +245,8 @@ def test_streaming_response_create(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -331,8 +331,8 @@ def test_method_update(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -411,8 +411,8 @@ def test_raw_response_update(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -495,8 +495,8 @@ def test_streaming_response_update(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -582,8 +582,8 @@ def test_path_params_update(self, client: PromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -918,8 +918,8 @@ async def test_method_create(self, async_client: AsyncPromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -997,8 +997,8 @@ async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> No
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -1080,8 +1080,8 @@ async def test_streaming_response_create(self, async_client: AsyncPromptFoundry)
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -1166,8 +1166,8 @@ async def test_method_update(self, async_client: AsyncPromptFoundry) -> None:
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -1246,8 +1246,8 @@ async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> No
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -1330,8 +1330,8 @@ async def test_streaming_response_update(self, async_client: AsyncPromptFoundry)
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down Expand Up @@ -1417,8 +1417,8 @@ async def test_path_params_update(self, async_client: AsyncPromptFoundry) -> Non
],
name="name",
parameters={
"provider": "ANTHROPIC",
"name": "name",
"model_provider": "ANTHROPIC",
"model_name": "modelName",
"response_format": "JSON",
"temperature": 0,
"top_p": 0,
Expand Down