Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion 2 pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.1.75"
version = "2.1.76"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
Expand Down
72 changes: 36 additions & 36 deletions 72 src/uipath/_cli/_evals/_console_progress_reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,10 @@ def _display_successful_evaluation(self, eval_name: str, eval_results) -> None:
result.append(" - No evaluators", style="dim")
self.console.print(result)

def _extract_error_message(self, eval_item_payload) -> str:
"""Extract clean error message from evaluation item."""
if hasattr(eval_item_payload, "_error_message"):
error_message = getattr(eval_item_payload, "_error_message", None)
if error_message:
return str(error_message) or "Execution failed"
return "Execution failed"

def _display_failed_evaluation(self, eval_name: str, error_msg: str) -> None:
def _extract_error_message(self, payload: EvalRunUpdatedEvent) -> str:
return str(payload.exception_details.exception) or "Execution failed" # type: ignore

def _display_failed_evaluation(self, eval_name: str) -> None:
"""Display results for a failed evaluation."""
from rich.text import Text

Expand All @@ -92,11 +87,6 @@ def _display_failed_evaluation(self, eval_name: str, error_msg: str) -> None:
result.append(eval_name, style="bold white")
self.console.print(result)

error_text = Text()
error_text.append(" ", style="")
error_text.append(error_msg, style="red")
self.console.print(error_text)

def start_display(self):
"""Start the display."""
if not self.display_started:
Expand All @@ -122,37 +112,47 @@ async def handle_create_eval_run(self, payload: EvalRunCreatedEvent) -> None:
except Exception as e:
logger.error(f"Failed to handle create eval run event: {e}")

def _display_logs_panel(self, eval_name: str, logs, error_msg: str = "") -> None:
"""Display execution logs panel with optional exception at the end."""
self.console.print(
Rule(
f"[dim italic]Execution Logs: {eval_name}[/dim italic]",
style="dim",
align="center",
)
)

if logs:
for record in logs:
self.console.print(f" [dim]{record.getMessage()}[/dim]")
elif not error_msg:
self.console.print(" [dim italic]No execution logs[/dim italic]")

if error_msg:
self.console.print(f" [red]{error_msg}[/red]")

self.console.print(Rule(style="dim"))

async def handle_update_eval_run(self, payload: EvalRunUpdatedEvent) -> None:
"""Handle evaluation run updates."""
try:
if payload.success:
# Store results for final display
self.eval_results_by_name[payload.eval_item.name] = payload.eval_results
self._display_successful_evaluation(
payload.eval_item.name, payload.eval_results
)
self._display_logs_panel(payload.eval_item.name, payload.logs)
else:
error_msg = self._extract_error_message(payload.eval_item)
self._display_failed_evaluation(payload.eval_item.name, error_msg)

logs = payload.logs

self.console.print(
Rule(
f"[dim italic]Execution Logs: {payload.eval_item.name}[/dim italic]",
style="dim",
align="center",
)
)

if len(logs) > 0:
for record in logs:
log_line = f" [dim]{record.getMessage()}[/dim]"
self.console.print(log_line)
else:
self.console.print(" [dim italic]No execution logs[/dim italic]")

self.console.print(Rule(style="dim"))
error_msg = self._extract_error_message(payload)
self._display_failed_evaluation(payload.eval_item.name)

if payload.exception_details.runtime_exception: # type: ignore
self._display_logs_panel(
payload.eval_item.name, payload.logs, error_msg
)
else:
self.console.print(f" [red]{error_msg}[/red]")
self.console.print()
except Exception as e:
logger.error(f"Console reporter error: {e}")

Expand Down
5 changes: 5 additions & 0 deletions 5 src/uipath/_cli/_evals/_models/_exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
class EvaluationRuntimeException(Exception):
def __init__(self, spans, logs, root_exception):
self.spans = spans
self.logs = logs
self.root_exception = root_exception
76 changes: 51 additions & 25 deletions 76 src/uipath/_cli/_evals/_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from ..._events._event_bus import EventBus
from ..._events._events import (
EvalItemExceptionDetails,
EvalRunCreatedEvent,
EvalRunUpdatedEvent,
EvalSetRunCreatedEvent,
Expand All @@ -31,6 +32,7 @@
from .._utils._eval_set import EvalHelpers
from ._evaluator_factory import EvaluatorFactory
from ._models._evaluation_set import EvaluationItem, EvaluationSet
from ._models._exceptions import EvaluationRuntimeException
from ._models._output import (
EvaluationResultDto,
EvaluationRunResult,
Expand Down Expand Up @@ -232,8 +234,7 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
wait_for_completion=False,
)
except Exception as e:
error_msg = str(e)
eval_item._error_message = error_msg # type: ignore[attr-defined]
exception_details = EvalItemExceptionDetails(exception=e)

for evaluator in evaluators:
evaluator_counts[evaluator.id] += 1
Expand All @@ -242,18 +243,28 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
0.0 - evaluator_averages[evaluator.id]
) / count

eval_run_updated_event = EvalRunUpdatedEvent(
execution_id=self.execution_id,
eval_item=eval_item,
eval_results=[],
success=False,
agent_output={},
agent_execution_time=0.0,
exception_details=exception_details,
spans=[],
logs=[],
)
if isinstance(e, EvaluationRuntimeException):
eval_run_updated_event.spans = e.spans
eval_run_updated_event.logs = e.logs
eval_run_updated_event.exception_details.exception = ( # type: ignore
e.root_exception
)
eval_run_updated_event.exception_details.runtime_exception = True # type: ignore

await event_bus.publish(
EvaluationEvents.UPDATE_EVAL_RUN,
EvalRunUpdatedEvent(
execution_id=self.execution_id,
eval_item=eval_item,
eval_results=[],
success=False,
agent_output={},
agent_execution_time=0.0,
spans=[],
logs=[],
),
eval_run_updated_event,
wait_for_completion=False,
)

Expand All @@ -274,6 +285,17 @@ async def execute(self) -> Optional[UiPathRuntimeResult]:
)
return self.context.result

def _get_and_clear_execution_data(
self, execution_id: str
) -> tuple[List[ReadableSpan], list[logging.LogRecord]]:
spans = self.span_exporter.get_spans(execution_id)
self.span_exporter.clear(execution_id)

logs = self.logs_exporter.get_logs(execution_id)
self.logs_exporter.clear(execution_id)

return spans, logs

async def execute_runtime(
self, eval_item: EvaluationItem
) -> UiPathEvalRunExecutionOutput:
Expand All @@ -284,6 +306,9 @@ async def execute_runtime(
is_eval_run=True,
log_handler=self._setup_execution_logging(eval_item_id),
)
if runtime_context.execution_id is None:
raise ValueError("execution_id must be set for eval runs")

attributes = {
"evalId": eval_item.id,
"span_type": "eval",
Expand All @@ -292,21 +317,22 @@ async def execute_runtime(
attributes["execution.id"] = runtime_context.execution_id

start_time = time()

result = await self.factory.execute_in_root_span(
runtime_context, root_span=eval_item.name, attributes=attributes
)
try:
result = await self.factory.execute_in_root_span(
runtime_context, root_span=eval_item.name, attributes=attributes
)
except Exception as e:
spans, logs = self._get_and_clear_execution_data(
runtime_context.execution_id
)
raise EvaluationRuntimeException(
spans=spans,
logs=logs,
root_exception=e,
) from e

end_time = time()

if runtime_context.execution_id is None:
raise ValueError("execution_id must be set for eval runs")

spans = self.span_exporter.get_spans(runtime_context.execution_id)
self.span_exporter.clear(runtime_context.execution_id)

logs = self.logs_exporter.get_logs(runtime_context.execution_id)
self.logs_exporter.clear(runtime_context.execution_id)
spans, logs = self._get_and_clear_execution_data(runtime_context.execution_id)

if result is None:
raise ValueError("Execution result cannot be None for eval runs")
Expand Down
25 changes: 20 additions & 5 deletions 25 src/uipath/_cli/cli_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
UiPathRuntimeFactory,
)
from uipath._cli._runtime._runtime import UiPathScriptRuntime
from uipath._cli._utils._constants import UIPATH_PROJECT_ID
from uipath._cli._utils._folders import get_personal_workspace_key_async
from uipath._cli.middlewares import Middlewares
from uipath._events._event_bus import EventBus
Expand All @@ -39,6 +40,22 @@ def type_cast_value(self, ctx, value):
raise click.BadParameter(value) from e


def setup_reporting_prereq(no_report: bool) -> bool:
if no_report:
return False

if not os.getenv(UIPATH_PROJECT_ID, False):
console.warning(
"UIPATH_PROJECT_ID environment variable not set. Results will no be reported to Studio Web."
)
return False
if not os.getenv("UIPATH_FOLDER_KEY"):
os.environ["UIPATH_FOLDER_KEY"] = asyncio.run(
get_personal_workspace_key_async()
)
return True


@click.command()
@click.argument("entrypoint", required=False)
@click.argument("eval_set", required=False)
Expand Down Expand Up @@ -79,10 +96,7 @@ def eval(
workers: Number of parallel workers for running evaluations
no_report: Do not report the evaluation results
"""
if not no_report and not os.getenv("UIPATH_FOLDER_KEY"):
os.environ["UIPATH_FOLDER_KEY"] = asyncio.run(
get_personal_workspace_key_async()
)
should_register_progress_reporter = setup_reporting_prereq(no_report)

result = Middlewares.next(
"eval",
Expand All @@ -92,6 +106,7 @@ def eval(
no_report=no_report,
workers=workers,
execution_output_file=output_file,
register_progress_reporter=should_register_progress_reporter,
)

if result.error_message:
Expand All @@ -100,7 +115,7 @@ def eval(
if result.should_continue:
event_bus = EventBus()

if not no_report:
if should_register_progress_reporter:
progress_reporter = StudioWebProgressReporter(LlmOpsHttpExporter())
asyncio.run(progress_reporter.subscribe_to_eval_runtime_events(event_bus))

Expand Down
18 changes: 16 additions & 2 deletions 18 src/uipath/_events/_events.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import enum
import logging
from typing import Any, List, Union
from typing import Any, List, Optional, Union

from opentelemetry.sdk.trace import ReadableSpan
from pydantic import BaseModel, ConfigDict
from pydantic import BaseModel, ConfigDict, model_validator

from uipath._cli._evals._models._evaluation_set import EvaluationItem
from uipath.eval.models import EvalItemResult
Expand All @@ -29,6 +29,13 @@ class EvalRunCreatedEvent(BaseModel):
eval_item: EvaluationItem


class EvalItemExceptionDetails(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)

runtime_exception: bool = False
exception: Exception


class EvalRunUpdatedEvent(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)

Expand All @@ -40,6 +47,13 @@ class EvalRunUpdatedEvent(BaseModel):
agent_execution_time: float
spans: List[ReadableSpan]
logs: List[logging.LogRecord]
exception_details: Optional[EvalItemExceptionDetails] = None

@model_validator(mode="after")
def validate_exception_details(self):
if not self.success and self.exception_details is None:
raise ValueError("exception_details must be provided when success is False")
return self


class EvalSetRunUpdatedEvent(BaseModel):
Expand Down
4 changes: 2 additions & 2 deletions 4 src/uipath/telemetry/_track.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
from functools import wraps
from importlib.metadata import version
from logging import INFO, LogRecord, getLogger
from logging import WARNING, LogRecord, getLogger
from typing import Any, Callable, Dict, Optional, Union

from azure.monitor.opentelemetry import configure_azure_monitor
Expand Down Expand Up @@ -102,7 +102,7 @@ def _initialize():
)

_logger.addHandler(_AzureMonitorOpenTelemetryEventHandler())
_logger.setLevel(INFO)
_logger.setLevel(WARNING)

_TelemetryClient._initialized = True
except Exception:
Expand Down
Loading
Morty Proxy This is a proxified and sanitized view of the page, visit original site.