Skip to content

Commit 86d1b05

Browse files
authored
Merge pull request #660 from UiPath/fix/logs
fix: eval logs
2 parents 112452a + 4bfbfae commit 86d1b05

File tree

8 files changed

+1028
-968
lines changed

8 files changed

+1028
-968
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.1.75"
3+
version = "2.1.76"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.10"

src/uipath/_cli/_evals/_console_progress_reporter.py

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,10 @@ def _display_successful_evaluation(self, eval_name: str, eval_results) -> None:
7575
result.append(" - No evaluators", style="dim")
7676
self.console.print(result)
7777

78-
def _extract_error_message(self, eval_item_payload) -> str:
79-
"""Extract clean error message from evaluation item."""
80-
if hasattr(eval_item_payload, "_error_message"):
81-
error_message = getattr(eval_item_payload, "_error_message", None)
82-
if error_message:
83-
return str(error_message) or "Execution failed"
84-
return "Execution failed"
85-
86-
def _display_failed_evaluation(self, eval_name: str, error_msg: str) -> None:
78+
def _extract_error_message(self, payload: EvalRunUpdatedEvent) -> str:
79+
return str(payload.exception_details.exception) or "Execution failed" # type: ignore
80+
81+
def _display_failed_evaluation(self, eval_name: str) -> None:
8782
"""Display results for a failed evaluation."""
8883
from rich.text import Text
8984

@@ -92,11 +87,6 @@ def _display_failed_evaluation(self, eval_name: str, error_msg: str) -> None:
9287
result.append(eval_name, style="bold white")
9388
self.console.print(result)
9489

95-
error_text = Text()
96-
error_text.append(" ", style="")
97-
error_text.append(error_msg, style="red")
98-
self.console.print(error_text)
99-
10090
def start_display(self):
10191
"""Start the display."""
10292
if not self.display_started:
@@ -122,37 +112,47 @@ async def handle_create_eval_run(self, payload: EvalRunCreatedEvent) -> None:
122112
except Exception as e:
123113
logger.error(f"Failed to handle create eval run event: {e}")
124114

115+
def _display_logs_panel(self, eval_name: str, logs, error_msg: str = "") -> None:
116+
"""Display execution logs panel with optional exception at the end."""
117+
self.console.print(
118+
Rule(
119+
f"[dim italic]Execution Logs: {eval_name}[/dim italic]",
120+
style="dim",
121+
align="center",
122+
)
123+
)
124+
125+
if logs:
126+
for record in logs:
127+
self.console.print(f" [dim]{record.getMessage()}[/dim]")
128+
elif not error_msg:
129+
self.console.print(" [dim italic]No execution logs[/dim italic]")
130+
131+
if error_msg:
132+
self.console.print(f" [red]{error_msg}[/red]")
133+
134+
self.console.print(Rule(style="dim"))
135+
125136
async def handle_update_eval_run(self, payload: EvalRunUpdatedEvent) -> None:
126137
"""Handle evaluation run updates."""
127138
try:
128139
if payload.success:
129-
# Store results for final display
130140
self.eval_results_by_name[payload.eval_item.name] = payload.eval_results
131141
self._display_successful_evaluation(
132142
payload.eval_item.name, payload.eval_results
133143
)
144+
self._display_logs_panel(payload.eval_item.name, payload.logs)
134145
else:
135-
error_msg = self._extract_error_message(payload.eval_item)
136-
self._display_failed_evaluation(payload.eval_item.name, error_msg)
137-
138-
logs = payload.logs
139-
140-
self.console.print(
141-
Rule(
142-
f"[dim italic]Execution Logs: {payload.eval_item.name}[/dim italic]",
143-
style="dim",
144-
align="center",
145-
)
146-
)
147-
148-
if len(logs) > 0:
149-
for record in logs:
150-
log_line = f" [dim]{record.getMessage()}[/dim]"
151-
self.console.print(log_line)
152-
else:
153-
self.console.print(" [dim italic]No execution logs[/dim italic]")
154-
155-
self.console.print(Rule(style="dim"))
146+
error_msg = self._extract_error_message(payload)
147+
self._display_failed_evaluation(payload.eval_item.name)
148+
149+
if payload.exception_details.runtime_exception: # type: ignore
150+
self._display_logs_panel(
151+
payload.eval_item.name, payload.logs, error_msg
152+
)
153+
else:
154+
self.console.print(f" [red]{error_msg}[/red]")
155+
self.console.print()
156156
except Exception as e:
157157
logger.error(f"Console reporter error: {e}")
158158

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
class EvaluationRuntimeException(Exception):
2+
def __init__(self, spans, logs, root_exception):
3+
self.spans = spans
4+
self.logs = logs
5+
self.root_exception = root_exception

src/uipath/_cli/_evals/_runtime.py

Lines changed: 51 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
from ..._events._event_bus import EventBus
1313
from ..._events._events import (
14+
EvalItemExceptionDetails,
1415
EvalRunCreatedEvent,
1516
EvalRunUpdatedEvent,
1617
EvalSetRunCreatedEvent,
@@ -31,6 +32,7 @@
3132
from .._utils._eval_set import EvalHelpers
3233
from ._evaluator_factory import EvaluatorFactory
3334
from ._models._evaluation_set import EvaluationItem, EvaluationSet
35+
from ._models._exceptions import EvaluationRuntimeException
3436
from ._models._output import (
3537
EvaluationResultDto,
3638
EvaluationRunResult,
@@ -232,8 +234,7 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
232234
wait_for_completion=False,
233235
)
234236
except Exception as e:
235-
error_msg = str(e)
236-
eval_item._error_message = error_msg # type: ignore[attr-defined]
237+
exception_details = EvalItemExceptionDetails(exception=e)
237238

238239
for evaluator in evaluators:
239240
evaluator_counts[evaluator.id] += 1
@@ -242,18 +243,28 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
242243
0.0 - evaluator_averages[evaluator.id]
243244
) / count
244245

246+
eval_run_updated_event = EvalRunUpdatedEvent(
247+
execution_id=self.execution_id,
248+
eval_item=eval_item,
249+
eval_results=[],
250+
success=False,
251+
agent_output={},
252+
agent_execution_time=0.0,
253+
exception_details=exception_details,
254+
spans=[],
255+
logs=[],
256+
)
257+
if isinstance(e, EvaluationRuntimeException):
258+
eval_run_updated_event.spans = e.spans
259+
eval_run_updated_event.logs = e.logs
260+
eval_run_updated_event.exception_details.exception = ( # type: ignore
261+
e.root_exception
262+
)
263+
eval_run_updated_event.exception_details.runtime_exception = True # type: ignore
264+
245265
await event_bus.publish(
246266
EvaluationEvents.UPDATE_EVAL_RUN,
247-
EvalRunUpdatedEvent(
248-
execution_id=self.execution_id,
249-
eval_item=eval_item,
250-
eval_results=[],
251-
success=False,
252-
agent_output={},
253-
agent_execution_time=0.0,
254-
spans=[],
255-
logs=[],
256-
),
267+
eval_run_updated_event,
257268
wait_for_completion=False,
258269
)
259270

@@ -274,6 +285,17 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
274285
)
275286
return self.context.result
276287

288+
def _get_and_clear_execution_data(
289+
self, execution_id: str
290+
) -> tuple[List[ReadableSpan], list[logging.LogRecord]]:
291+
spans = self.span_exporter.get_spans(execution_id)
292+
self.span_exporter.clear(execution_id)
293+
294+
logs = self.logs_exporter.get_logs(execution_id)
295+
self.logs_exporter.clear(execution_id)
296+
297+
return spans, logs
298+
277299
async def execute_runtime(
278300
self, eval_item: EvaluationItem
279301
) -> UiPathEvalRunExecutionOutput:
@@ -284,6 +306,9 @@ async def execute_runtime(
284306
is_eval_run=True,
285307
log_handler=self._setup_execution_logging(eval_item_id),
286308
)
309+
if runtime_context.execution_id is None:
310+
raise ValueError("execution_id must be set for eval runs")
311+
287312
attributes = {
288313
"evalId": eval_item.id,
289314
"span_type": "eval",
@@ -292,21 +317,22 @@ async def execute_runtime(
292317
attributes["execution.id"] = runtime_context.execution_id
293318

294319
start_time = time()
295-
296-
result = await self.factory.execute_in_root_span(
297-
runtime_context, root_span=eval_item.name, attributes=attributes
298-
)
320+
try:
321+
result = await self.factory.execute_in_root_span(
322+
runtime_context, root_span=eval_item.name, attributes=attributes
323+
)
324+
except Exception as e:
325+
spans, logs = self._get_and_clear_execution_data(
326+
runtime_context.execution_id
327+
)
328+
raise EvaluationRuntimeException(
329+
spans=spans,
330+
logs=logs,
331+
root_exception=e,
332+
) from e
299333

300334
end_time = time()
301-
302-
if runtime_context.execution_id is None:
303-
raise ValueError("execution_id must be set for eval runs")
304-
305-
spans = self.span_exporter.get_spans(runtime_context.execution_id)
306-
self.span_exporter.clear(runtime_context.execution_id)
307-
308-
logs = self.logs_exporter.get_logs(runtime_context.execution_id)
309-
self.logs_exporter.clear(runtime_context.execution_id)
335+
spans, logs = self._get_and_clear_execution_data(runtime_context.execution_id)
310336

311337
if result is None:
312338
raise ValueError("Execution result cannot be None for eval runs")

src/uipath/_cli/cli_eval.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
UiPathRuntimeFactory,
1818
)
1919
from uipath._cli._runtime._runtime import UiPathScriptRuntime
20+
from uipath._cli._utils._constants import UIPATH_PROJECT_ID
2021
from uipath._cli._utils._folders import get_personal_workspace_key_async
2122
from uipath._cli.middlewares import Middlewares
2223
from uipath._events._event_bus import EventBus
@@ -39,6 +40,22 @@ def type_cast_value(self, ctx, value):
3940
raise click.BadParameter(value) from e
4041

4142

43+
def setup_reporting_prereq(no_report: bool) -> bool:
44+
if no_report:
45+
return False
46+
47+
if not os.getenv(UIPATH_PROJECT_ID, False):
48+
console.warning(
49+
"UIPATH_PROJECT_ID environment variable not set. Results will no be reported to Studio Web."
50+
)
51+
return False
52+
if not os.getenv("UIPATH_FOLDER_KEY"):
53+
os.environ["UIPATH_FOLDER_KEY"] = asyncio.run(
54+
get_personal_workspace_key_async()
55+
)
56+
return True
57+
58+
4259
@click.command()
4360
@click.argument("entrypoint", required=False)
4461
@click.argument("eval_set", required=False)
@@ -79,10 +96,7 @@ def eval(
7996
workers: Number of parallel workers for running evaluations
8097
no_report: Do not report the evaluation results
8198
"""
82-
if not no_report and not os.getenv("UIPATH_FOLDER_KEY"):
83-
os.environ["UIPATH_FOLDER_KEY"] = asyncio.run(
84-
get_personal_workspace_key_async()
85-
)
99+
should_register_progress_reporter = setup_reporting_prereq(no_report)
86100

87101
result = Middlewares.next(
88102
"eval",
@@ -92,6 +106,7 @@ def eval(
92106
no_report=no_report,
93107
workers=workers,
94108
execution_output_file=output_file,
109+
register_progress_reporter=should_register_progress_reporter,
95110
)
96111

97112
if result.error_message:
@@ -100,7 +115,7 @@ def eval(
100115
if result.should_continue:
101116
event_bus = EventBus()
102117

103-
if not no_report:
118+
if should_register_progress_reporter:
104119
progress_reporter = StudioWebProgressReporter(LlmOpsHttpExporter())
105120
asyncio.run(progress_reporter.subscribe_to_eval_runtime_events(event_bus))
106121

src/uipath/_events/_events.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import enum
22
import logging
3-
from typing import Any, List, Union
3+
from typing import Any, List, Optional, Union
44

55
from opentelemetry.sdk.trace import ReadableSpan
6-
from pydantic import BaseModel, ConfigDict
6+
from pydantic import BaseModel, ConfigDict, model_validator
77

88
from uipath._cli._evals._models._evaluation_set import EvaluationItem
99
from uipath.eval.models import EvalItemResult
@@ -29,6 +29,13 @@ class EvalRunCreatedEvent(BaseModel):
2929
eval_item: EvaluationItem
3030

3131

32+
class EvalItemExceptionDetails(BaseModel):
33+
model_config = ConfigDict(arbitrary_types_allowed=True)
34+
35+
runtime_exception: bool = False
36+
exception: Exception
37+
38+
3239
class EvalRunUpdatedEvent(BaseModel):
3340
model_config = ConfigDict(arbitrary_types_allowed=True)
3441

@@ -40,6 +47,13 @@ class EvalRunUpdatedEvent(BaseModel):
4047
agent_execution_time: float
4148
spans: List[ReadableSpan]
4249
logs: List[logging.LogRecord]
50+
exception_details: Optional[EvalItemExceptionDetails] = None
51+
52+
@model_validator(mode="after")
53+
def validate_exception_details(self):
54+
if not self.success and self.exception_details is None:
55+
raise ValueError("exception_details must be provided when success is False")
56+
return self
4357

4458

4559
class EvalSetRunUpdatedEvent(BaseModel):

src/uipath/telemetry/_track.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import os
33
from functools import wraps
44
from importlib.metadata import version
5-
from logging import INFO, LogRecord, getLogger
5+
from logging import WARNING, LogRecord, getLogger
66
from typing import Any, Callable, Dict, Optional, Union
77

88
from azure.monitor.opentelemetry import configure_azure_monitor
@@ -102,7 +102,7 @@ def _initialize():
102102
)
103103

104104
_logger.addHandler(_AzureMonitorOpenTelemetryEventHandler())
105-
_logger.setLevel(INFO)
105+
_logger.setLevel(WARNING)
106106

107107
_TelemetryClient._initialized = True
108108
except Exception:

0 commit comments

Comments
 (0)