Skip to content

Commit 346ab1b

Browse files
lievanlievan
andauthored
fix(llmobs): fix parsing for NotGiven in beta oai clients (DataDog#13315)
ddtrace is throwing when users use `client.beta.chat.completions.stream`. This is because the beta client passes `NOT_GIVEN` explicitly as a keyword argument to an endpoint that we do support tracing for, and when we extract it out we attempt to call `get` on it. We don't usually bump into this issue since `NOT_GIVEN` is never passed in as an explicit keyword argument by an actual library user The fix accounts for the `NOT_GIVEN` cause and treats it the same as if `stream_options` was `None`. We also add a regression test to make sure `client.beta.chat.completions.stream` does not throw Previous error ``` packages/ddtrace/contrib/internal/openai/_endpoint_hooks.py, line 78, in handle_request         self._record_request(pin, integration, instance, span, args, kwargs)     File /deps/pysetup/.venv/lib/python3.12/site-packages/ddtrace/contrib/internal/openai/_endpoint_hooks.py, line 252, in _record_request         if kwargs.get("stream_options", {}).get("include_usage", None) is not None: AttributeError: 'NotGiven' object has no attribute 'get' ``` ## Checklist - [x] PR author has checked that all the criteria below are met - The PR description includes an overview of the change - The PR description articulates the motivation for the change - The change includes tests OR the PR description describes a testing strategy - The PR description notes risks associated with the change, if any - Newly-added code is easy to change - The change follows the [library release note guidelines](https://ddtrace.readthedocs.io/en/stable/releasenotes.html) - The change includes or references documentation updates if necessary - Backport labels are set (if [applicable](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting)) ## Reviewer Checklist - [x] Reviewer has checked that all the criteria below are met - Title is accurate - All changes are related to the pull request's stated goal - Avoids breaking [API](https://ddtrace.readthedocs.io/en/stable/versioning.html#interfaces) changes - Testing strategy adequately addresses listed risks - Newly-added code is easy to change - Release note makes sense to a user of the library - If necessary, author has acknowledged and discussed the performance implications of this PR as reported in the benchmarks PR comment - Backport labels are set in a manner that is consistent with the [release branch maintenance policy](https://ddtrace.readthedocs.io/en/latest/contributing.html#backporting) Co-authored-by: lievan <[email protected]>
1 parent 26a6d09 commit 346ab1b

File tree

3 files changed

+44
-2
lines changed

3 files changed

+44
-2
lines changed

ddtrace/contrib/internal/openai/_endpoint_hooks.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,11 +249,13 @@ def _record_request(self, pin, integration, instance, span, args, kwargs):
249249
span.set_tag_str("openai.request.messages.%d.role" % idx, str(role))
250250
span.set_tag_str("openai.request.messages.%d.name" % idx, str(name))
251251
if parse_version(OPENAI_VERSION) >= (1, 26) and kwargs.get("stream"):
252-
if kwargs.get("stream_options", {}).get("include_usage", None) is not None:
252+
stream_options = kwargs.get("stream_options", {})
253+
if not isinstance(stream_options, dict):
254+
stream_options = {}
255+
if stream_options.get("include_usage", None) is not None:
253256
# Only perform token chunk auto-extraction if this option is not explicitly set
254257
return
255258
span._set_ctx_item("_dd.auto_extract_token_chunk", True)
256-
stream_options = kwargs.get("stream_options", {})
257259
stream_options["include_usage"] = True
258260
kwargs["stream_options"] = stream_options
259261

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
fixes:
3+
- |
4+
LLM Observability: This fix resolves an issue where using `client.beta.chat.completions.stream` with LLM Observability enabled caused an attribute error.
5+
- |
6+
openai: This fix resolves an issue where using `client.beta.chat.completions.stream` with openai patching caused an attribute error.
7+

tests/contrib/openai/test_openai_llmobs.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,39 @@ def test_chat_completion_stream_tokens(self, openai, ddtrace_global_config, mock
387387
)
388388
)
389389

390+
@pytest.mark.skipif(
391+
parse_version(openai_module.version.VERSION) < (1, 40, 0),
392+
reason="`client.beta.chat.completions.stream` available in 1.40.0+",
393+
)
394+
def test_chat_completion_stream_tokens_beta(self, openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer):
395+
"""Assert that streamed token chunk extraction logic works when options are not explicitly passed from user."""
396+
with get_openai_vcr(subdirectory_name="v1").use_cassette("chat_completion_streamed_tokens.yaml"):
397+
model = "gpt-3.5-turbo"
398+
resp_model = model
399+
input_messages = [{"role": "user", "content": "Who won the world series in 2020?"}]
400+
expected_completion = "The Los Angeles Dodgers won the World Series in 2020."
401+
client = openai.OpenAI()
402+
with client.beta.chat.completions.stream(model=model, messages=input_messages) as stream:
403+
for chunk in stream:
404+
if hasattr(chunk, "chunk") and hasattr(chunk.chunk, "model"):
405+
resp_model = chunk.chunk.model
406+
span = mock_tracer.pop_traces()[0][0]
407+
assert mock_llmobs_writer.enqueue.call_count == 1
408+
llmobs_span_event = mock_llmobs_writer.enqueue.call_args_list[0][0][0]
409+
assert llmobs_span_event["meta"]["metadata"]["stream_options"]["include_usage"] is True
410+
mock_llmobs_writer.enqueue.assert_called_with(
411+
_expected_llmobs_llm_span_event(
412+
span,
413+
model_name=resp_model,
414+
model_provider="openai",
415+
input_messages=input_messages,
416+
output_messages=[{"content": expected_completion, "role": "assistant"}],
417+
metadata=mock.ANY,
418+
token_metrics={"input_tokens": 17, "output_tokens": 19, "total_tokens": 36},
419+
tags={"ml_app": "<ml-app-name>", "service": "tests.contrib.openai"},
420+
)
421+
)
422+
390423
def test_chat_completion_function_call(self, openai, ddtrace_global_config, mock_llmobs_writer, mock_tracer):
391424
"""Test that function call chat completion calls are recorded as LLMObs events correctly."""
392425
with get_openai_vcr(subdirectory_name="v1").use_cassette("chat_completion_function_call.yaml"):

0 commit comments

Comments
 (0)