diff --git a/packages/uipath-openai-agents/pyproject.toml b/packages/uipath-openai-agents/pyproject.toml index ca036cc..7a93493 100644 --- a/packages/uipath-openai-agents/pyproject.toml +++ b/packages/uipath-openai-agents/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath-openai-agents" -version = "0.0.3" +version = "0.0.4" description = "UiPath OpenAI Agents SDK" readme = "README.md" requires-python = ">=3.11" diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/.agent/CLI_REFERENCE.md b/packages/uipath-openai-agents/samples/agent-as-tools/.agent/CLI_REFERENCE.md index a58faad..065b31a 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/.agent/CLI_REFERENCE.md +++ b/packages/uipath-openai-agents/samples/agent-as-tools/.agent/CLI_REFERENCE.md @@ -59,8 +59,10 @@ uv run uipath init --infer-bindings | `--input-file` | value | `Sentinel.UNSET` | Alias for '-f/--file' arguments | | `--output-file` | value | `Sentinel.UNSET` | File path where the output will be written | | `--trace-file` | value | `Sentinel.UNSET` | File path where the trace spans will be written (JSON Lines format) | +| `--state-file` | value | `Sentinel.UNSET` | File path where the state file is stored for persisting execution state. If not provided, a temporary file will be used. | | `--debug` | flag | false | Enable debugging with debugpy. The process will wait for a debugger to attach. | | `--debug-port` | value | `5678` | Port for the debug server (default: 5678) | +| `--keep-state-file` | flag | false | Keep the temporary state file even when not resuming and no job id is provided | **Usage Examples:** @@ -99,6 +101,10 @@ uv run uipath run --resume enable_mocker_cache: Enable caching for LLM mocker responses report_coverage: Report evaluation coverage model_settings_id: Model settings ID to override agent settings + trace_file: File path where traces will be written in JSONL format + max_llm_concurrency: Maximum concurrent LLM requests + input_overrides: Input field overrides mapping (direct field override with deep merge) + resume: Resume execution from a previous suspended state **Arguments:** @@ -120,6 +126,8 @@ uv run uipath run --resume | `--report-coverage` | flag | false | Report evaluation coverage | | `--model-settings-id` | value | `"default"` | Model settings ID from evaluation set to override agent settings (default: 'default') | | `--trace-file` | value | `Sentinel.UNSET` | File path where traces will be written in JSONL format | +| `--max-llm-concurrency` | value | `20` | Maximum concurrent LLM requests (default: 20) | +| `--resume` | flag | false | Resume execution from a previous suspended state | **Usage Examples:** @@ -226,6 +234,53 @@ The `uipath.json` file is automatically generated by `uipath init` and defines y The UiPath CLI provides commands for interacting with UiPath platform services. These commands allow you to manage buckets, assets, jobs, and other resources. +### `uipath assets` + +Manage UiPath assets. + + Assets are key-value pairs that store configuration data, credentials, + and settings used by automation processes. + + \b + Examples: + # List all assets in a folder + uipath assets list --folder-path "Shared" + + # List with filter + uipath assets list --filter "ValueType eq 'Text'" + + # List with ordering + uipath assets list --orderby "Name asc" + + +**Subcommands:** + +**`uipath assets list`** + +List assets in a folder. + + \b + Examples: + uipath assets list + uipath assets list --folder-path "Shared" + uipath assets list --filter "ValueType eq 'Text'" + uipath assets list --filter "Name eq 'MyAsset'" + uipath assets list --orderby "Name asc" + uipath assets list --top 50 --skip 100 + + +Options: +- `--filter`: OData $filter expression (default: `Sentinel.UNSET`) +- `--orderby`: OData $orderby expression (default: `Sentinel.UNSET`) +- `--top`: Maximum number of items to return (default: 100, max: 1000) (default: `100`) +- `--skip`: Number of items to skip (default: `0`) +- `--folder-path`: Folder path (e.g., "Shared"). Can also be set via UIPATH_FOLDER_PATH environment variable. (default: `Sentinel.UNSET`) +- `--folder-key`: Folder key (UUID) (default: `Sentinel.UNSET`) +- `--format`: Output format (overrides global) (default: `Sentinel.UNSET`) +- `--output`, `-o`: Output file (overrides global) (default: `Sentinel.UNSET`) + +--- + ### `uipath buckets` Manage UiPath storage buckets and files. diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/.agent/SDK_REFERENCE.md b/packages/uipath-openai-agents/samples/agent-as-tools/.agent/SDK_REFERENCE.md index 8373939..3904a5b 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/.agent/SDK_REFERENCE.md +++ b/packages/uipath-openai-agents/samples/agent-as-tools/.agent/SDK_REFERENCE.md @@ -16,6 +16,25 @@ sdk = UiPath() sdk = UiPath(base_url="https://cloud.uipath.com/...", secret="your_token") ``` +### Agenthub + +Agenthub service + +```python +# Fetch available models from LLM Gateway discovery endpoint. +sdk.agenthub.get_available_llm_models(headers: dict[str, Any] | None=None) -> list[uipath.platform.agenthub.agenthub.LlmModel] + +# Asynchronously fetch available models from LLM Gateway discovery endpoint. +sdk.agenthub.get_available_llm_models_async(headers: dict[str, Any] | None=None) -> list[uipath.platform.agenthub.agenthub.LlmModel] + +# Start a system agent job. +sdk.agenthub.invoke_system_agent(agent_name: str, entrypoint: str, input_arguments: dict[str, Any] | None=None, folder_key: str | None=None, folder_path: str | None=None, headers: dict[str, Any] | None=None) -> str + +# Asynchronously start a system agent and return the job. +sdk.agenthub.invoke_system_agent_async(agent_name: str, entrypoint: str, input_arguments: dict[str, Any] | None=None, folder_key: str | None=None, folder_path: str | None=None, headers: dict[str, Any] | None=None) -> str + +``` + ### Api Client Api Client service @@ -31,6 +50,12 @@ service = sdk.api_client Assets service ```python +# List assets using OData API with offset-based pagination. +sdk.assets.list(folder_path: Optional[str]=None, folder_key: Optional[str]=None, filter: Optional[str]=None, orderby: Optional[str]=None, skip: int=0, top: int=100) -> uipath.platform.common.paging.PagedResult[uipath.platform.orchestrator.assets.Asset] + +# Asynchronously list assets using OData API with offset-based pagination. +sdk.assets.list_async(folder_path: Optional[str]=None, folder_key: Optional[str]=None, filter: Optional[str]=None, orderby: Optional[str]=None, skip: int=0, top: int=100) -> uipath.platform.common.paging.PagedResult[uipath.platform.orchestrator.assets.Asset] + # Retrieve an asset by its name. sdk.assets.retrieve(name: str, folder_key: Optional[str]=None, folder_path: Optional[str]=None) -> uipath.platform.orchestrator.assets.UserAsset | uipath.platform.orchestrator.assets.Asset @@ -340,12 +365,24 @@ sdk.documents.retrieve_ixp_extraction_result(project_id: str, tag: str, operatio # Asynchronous version of the [`retrieve_ixp_extraction_result`][uipath.platform.documents._documents_service.DocumentsService.retrieve_ixp_extraction_result] method. sdk.documents.retrieve_ixp_extraction_result_async(project_id: str, tag: str, operation_id: str) -> uipath.platform.documents.documents.ExtractionResponseIXP +# Retrieve the result of an IXP create validate extraction action operation (single-shot, non-blocking). +sdk.documents.retrieve_ixp_extraction_validation_result(project_id: str, tag: str, operation_id: str) -> uipath.platform.documents.documents.ValidateExtractionAction + +# Asynchronous version of the [`retrieve_ixp_extraction_validation_result`][uipath.platform.documents._documents_service.DocumentsService.retrieve_ixp_extraction_validation_result] method. +sdk.documents.retrieve_ixp_extraction_validation_result_async(project_id: str, tag: str, operation_id: str) -> uipath.platform.documents.documents.ValidateExtractionAction + # Start an IXP extraction process without waiting for results (non-blocking). sdk.documents.start_ixp_extraction(project_name: str, tag: str, file: Union[IO[bytes], bytes, str, NoneType]=None, file_path: Optional[str]=None) -> uipath.platform.documents.documents.StartExtractionResponse # Asynchronous version of the [`start_ixp_extraction`][uipath.platform.documents._documents_service.DocumentsService.start_ixp_extraction] method. sdk.documents.start_ixp_extraction_async(project_name: str, tag: str, file: Union[IO[bytes], bytes, str, NoneType]=None, file_path: Optional[str]=None) -> uipath.platform.documents.documents.StartExtractionResponse +# Start an IXP extraction validation action without waiting for results (non-blocking). +sdk.documents.start_ixp_extraction_validation(action_title: str, action_priority: uipath.platform.documents.documents.StartOperationResponse + +# Asynchronous version of the [`start_ixp_extraction_validation`][uipath.platform.documents._documents_service.DocumentsService.start_ixp_extraction_validation] method. +sdk.documents.start_ixp_extraction_validation_async(action_title: str, action_priority: uipath.platform.documents.documents.StartOperationResponse + ``` ### Entities @@ -505,7 +542,7 @@ Llm service ```python # Generate chat completions using UiPath's normalized LLM Gateway API. -sdk.llm.chat_completions(messages: list[dict[str, str]] | list[tuple[str, str]], model: str="gpt-4o-mini-2024-07-18", max_tokens: int=4096, temperature: float=0, n: int=1, frequency_penalty: float=0, presence_penalty: float=0, top_p: float | None=1, top_k: int | None=None, tools: list[uipath.platform.chat.llm_gateway.ToolDefinition] | None=None, tool_choice: Union[uipath.platform.chat.llm_gateway.AutoToolChoice, uipath.platform.chat.llm_gateway.RequiredToolChoice, uipath.platform.chat.llm_gateway.SpecificToolChoice, Literal['auto', 'none'], NoneType]=None, response_format: dict[str, Any] | type[pydantic.main.BaseModel] | None=None, api_version: str="2024-08-01-preview") +sdk.llm.chat_completions(messages: list[dict[str, str]] | list[tuple[str, str]], model: str="gpt-4.1-mini-2025-04-14", max_tokens: int=4096, temperature: float=0, n: int=1, frequency_penalty: float=0, presence_penalty: float=0, top_p: float | None=1, top_k: int | None=None, tools: list[uipath.platform.chat.llm_gateway.ToolDefinition] | None=None, tool_choice: Union[uipath.platform.chat.llm_gateway.AutoToolChoice, uipath.platform.chat.llm_gateway.RequiredToolChoice, uipath.platform.chat.llm_gateway.SpecificToolChoice, Literal['auto', 'none'], NoneType]=None, response_format: dict[str, Any] | type[pydantic.main.BaseModel] | None=None, api_version: str="2024-08-01-preview") ``` @@ -515,7 +552,7 @@ Llm Openai service ```python # Generate chat completions using UiPath's LLM Gateway service. -sdk.llm_openai.chat_completions(messages: list[dict[str, str]], model: str="gpt-4o-mini-2024-07-18", max_tokens: int=4096, temperature: float=0, response_format: dict[str, Any] | type[pydantic.main.BaseModel] | None=None, api_version: str="2024-10-21") +sdk.llm_openai.chat_completions(messages: list[dict[str, str]], model: str="gpt-4.1-mini-2025-04-14", max_tokens: int=4096, temperature: float=0, response_format: dict[str, Any] | type[pydantic.main.BaseModel] | None=None, api_version: str="2024-10-21") # Generate text embeddings using UiPath's LLM Gateway service. sdk.llm_openai.embeddings(input: str, embedding_model: str="text-embedding-ada-002", openai_api_version: str="2024-10-21") diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/eval.md b/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/eval.md new file mode 100644 index 0000000..15fa04f --- /dev/null +++ b/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/eval.md @@ -0,0 +1,287 @@ +--- +allowed-tools: Bash, Read, Write, Edit, Glob +description: Create and run agent evaluations +--- + +I'll help you create and run evaluations for your UiPath agent. + +## Step 1: Check project setup + +Let me check your project structure: + +!ls -la evaluations/ entry-points.json 2>/dev/null || echo "NEEDS_SETUP" + +# Check if schemas might be stale (main.py newer than entry-points.json) +!if [ -f main.py ] && [ -f entry-points.json ] && [ main.py -nt entry-points.json ]; then echo "SCHEMAS_MAY_BE_STALE"; fi + +### If NEEDS_SETUP + +If `entry-points.json` doesn't exist, initialize the project first: + +!uv run uipath init + +Then re-run this skill. + +### If SCHEMAS_MAY_BE_STALE + +Your `main.py` is newer than `entry-points.json`. Refresh schemas: + +!uv run uipath init --no-agents-md-override + +## Step 2: What would you like to do? + +1. **Create new eval set** - Set up evaluations from scratch +2. **Add test case** - Add a test to existing eval set +3. **Run evaluations** - Execute tests and see results +4. **Analyze failures** - Debug failing tests + +--- + +## Creating an Eval Set + +First, create the directory structure: + +!mkdir -p evaluations/eval-sets evaluations/evaluators + +Read the agent's Input/Output schema from entry-points.json to understand the data types. + +### Evaluator Selection Guide + +| If your output is... | Use this evaluator | evaluatorTypeId | +|---------------------|-------------------|-----------------| +| Exact string/number | `ExactMatchEvaluator` | `uipath-exact-match` | +| Contains key phrases | `ContainsEvaluator` | `uipath-contains` | +| Semantically correct | `LLMJudgeOutputEvaluator` | `uipath-llm-judge-output-semantic-similarity` | +| JSON with numbers | `JsonSimilarityEvaluator` | `uipath-json-similarity` | + +### Step 1: Create Evaluator Config Files + +**Each evaluator needs a JSON config file** in `evaluations/evaluators/`. + +**ExactMatchEvaluator** (`evaluations/evaluators/exact-match.json`): +```json +{ + "version": "1.0", + "id": "ExactMatchEvaluator", + "name": "ExactMatchEvaluator", + "description": "Checks for exact output match", + "evaluatorTypeId": "uipath-exact-match", + "evaluatorConfig": { + "name": "ExactMatchEvaluator", + "targetOutputKey": "*" + } +} +``` + +**LLMJudgeOutputEvaluator** (`evaluations/evaluators/llm-judge-output.json`): +```json +{ + "version": "1.0", + "id": "LLMJudgeOutputEvaluator", + "name": "LLMJudgeOutputEvaluator", + "description": "Uses LLM to judge semantic similarity", + "evaluatorTypeId": "uipath-llm-judge-output-semantic-similarity", + "evaluatorConfig": { + "name": "LLMJudgeOutputEvaluator", + "model": "gpt-4o-mini-2024-07-18" + } +} +``` + +**JsonSimilarityEvaluator** (`evaluations/evaluators/json-similarity.json`): +```json +{ + "version": "1.0", + "id": "JsonSimilarityEvaluator", + "name": "JsonSimilarityEvaluator", + "description": "Compares JSON structures", + "evaluatorTypeId": "uipath-json-similarity", + "evaluatorConfig": { + "name": "JsonSimilarityEvaluator", + "targetOutputKey": "*" + } +} +``` + +**ContainsEvaluator** (`evaluations/evaluators/contains.json`): +```json +{ + "version": "1.0", + "id": "ContainsEvaluator", + "name": "ContainsEvaluator", + "description": "Checks if output contains text", + "evaluatorTypeId": "uipath-contains", + "evaluatorConfig": { + "name": "ContainsEvaluator" + } +} +``` + +### Step 2: Create Eval Set + +**Eval Set Template** (`evaluations/eval-sets/default.json`): +```json +{ + "version": "1.0", + "id": "default-eval-set", + "name": "Default Evaluation Set", + "evaluatorRefs": ["ExactMatchEvaluator"], + "evaluations": [ + { + "id": "test-1", + "name": "Test description", + "inputs": { + "field": "value" + }, + "evaluationCriterias": { + "ExactMatchEvaluator": { + "expectedOutput": { + "result": "expected value" + } + } + } + } + ] +} +``` + +**Important notes:** +- `evaluatorRefs` must list ALL evaluators used in any test case +- Each evaluator in `evaluatorRefs` needs a matching JSON config in `evaluations/evaluators/` +- `evaluationCriterias` keys must match entries in `evaluatorRefs` +- Use `expectedOutput` for most evaluators +- LLM evaluators need `model` in their config. Available models are defined in the SDK's `ChatModels` class (`uipath.platform.chat.ChatModels`): + - `gpt-4o-mini-2024-07-18` (recommended for cost-efficiency) + - `gpt-4o-2024-08-06` (higher quality, higher cost) + - `o3-mini-2025-01-31` (latest reasoning model) + - Model availability varies by region and tenant configuration + - Check your UiPath Automation Cloud portal under AI Trust Layer for available models in your region + +--- + +## Adding a Test Case + +When adding a test to an existing eval set: + +1. Read the existing eval set +2. Check which evaluators are in `evaluatorRefs` +3. Add the new test to `evaluations` array +4. If using a new evaluator, add it to `evaluatorRefs` + +### Test Case Template + +```json +{ + "id": "test-{n}", + "name": "Description of what this tests", + "inputs": { }, + "evaluationCriterias": { + "EvaluatorName": { + "expectedOutput": { } + } + } +} +``` + +--- + +## Running Evaluations + +First, read entry-points.json to get the entrypoint name (e.g., `main`): + +!uv run uipath eval main evaluations/eval-sets/default.json --output-file eval-results.json + +**Note:** Replace `main` with your actual entrypoint from entry-points.json. + +### Analyze Results + +After running, read `eval-results.json` and show: +- Pass/fail summary table +- For failures: expected vs actual output +- Suggestions for fixing or changing evaluators + +### Results Format + +```json +{ + "evaluationSetResults": [{ + "evaluationRunResults": [ + { + "evaluationId": "test-1", + "evaluatorId": "ExactMatchEvaluator", + "result": { "score": 1.0 }, + "errorMessage": null + } + ] + }] +} +``` + +- Score 1.0 = PASS +- Score < 1.0 = FAIL (show expected vs actual) +- errorMessage present = ERROR (show message) + +--- + +## Evaluator Reference + +### Deterministic Evaluators + +**ExactMatchEvaluator** - Exact output matching +```json +"ExactMatchEvaluator": { + "expectedOutput": { "result": "exact value" } +} +``` + +**ContainsEvaluator** - Output contains substring +```json +"ContainsEvaluator": { + "searchText": "must contain this" +} +``` + +**JsonSimilarityEvaluator** - JSON comparison with tolerance +```json +"JsonSimilarityEvaluator": { + "expectedOutput": { "value": 10.0 } +} +``` + +### LLM-Based Evaluators + +**LLMJudgeOutputEvaluator** - Semantic correctness +```json +"LLMJudgeOutputEvaluator": { + "expectedOutput": { "summary": "Expected semantic meaning" } +} +``` + +**LLMJudgeTrajectoryEvaluator** - Validate agent reasoning +```json +"LLMJudgeTrajectoryEvaluator": { + "expectedAgentBehavior": "The agent should first fetch data, then process it" +} +``` + +--- + +## Common Issues + +### "No evaluations found" +- Check `evaluations/eval-sets/` directory exists +- Verify JSON file is valid + +### Evaluator not found +- Each evaluator needs a JSON config file in `evaluations/evaluators/` +- Config file must have correct `evaluatorTypeId` (see templates above) +- Config file must have `name` field at root level +- LLM evaluators need `model` in `evaluatorConfig` + +### Evaluator skipped +- Ensure evaluator is listed in root `evaluatorRefs` array +- Check evaluator config file exists in `evaluations/evaluators/` + +### Schema mismatch +- Run `uv run uipath init --no-agents-md-override` to refresh schemas +- Check `entry-points.json` matches your Input/Output models diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/new-agent.md b/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/new-agent.md new file mode 100644 index 0000000..b1d0518 --- /dev/null +++ b/packages/uipath-openai-agents/samples/agent-as-tools/.claude/commands/new-agent.md @@ -0,0 +1,103 @@ +--- +allowed-tools: Bash, Read, Write, Edit, Glob +description: Create a new UiPath coded agent from a description +--- + +I'll help you create a new UiPath coded agent. + +## Step 1: Check existing project + +Let me check if this is an existing UiPath project: + +!ls uipath.json main.py 2>/dev/null || echo "NEW_PROJECT" + +## Step 2: Gather requirements + +**What should this agent do?** + +Please describe: + +- What inputs it needs (e.g., "a file path and bucket name") +- What it should accomplish (e.g., "process CSV data") +- What outputs it should return (e.g., "total count and status") + +I'll generate the agent structure based on your description. + +## Step 3: Generate agent + +After you describe the agent, I will: + +1. Create `main.py` with Input/Output Pydantic models and `async def main()` +2. Add entrypoint to `uipath.json` under `"functions": {"agent_name": "main.py:main"}` +3. Run `uv run uipath init --no-agents-md-override` to generate schemas + +**Template structure** (from .agent/REQUIRED_STRUCTURE.md): + +```python +from pydantic import BaseModel +from uipath.platform import UiPath + +class Input(BaseModel): + """Input fields for the agent.""" + # Fields based on your description + pass + + +class Output(BaseModel): + """Output fields returned by the agent.""" + # Fields based on your description + pass + + +async def main(input: Input) -> Output: + """Main entry point for the agent. + + Args: + input: The input data for the agent. + + Returns: + The output data from the agent. + """ + + uipath = UiPath() + + # TODO: Implement agent logic + return Output() +``` + +**Important notes:** + +- Use `async def main` - many SDK methods are async +- Initialize `UiPath()` inside the function, not at module level +- After creating main.py, add entrypoint to `uipath.json` under `"functions"` + +## Step 4: Update entry-point schemas + +After creating main.py, regenerate the schemas: + +!uv run uipath init --no-agents-md-override + +## Step 5: Verify + +Quick test to verify the setup: + +!uv run uipath run main '{}' 2>&1 | head -30 + +## Summary + +Once complete, you'll have: + +| File | Purpose | +| ------------------- | ----------------------------------- | +| `main.py` | Agent code with Input/Output models | +| `uipath.json` | Project configuration | +| `entry-points.json` | Entry point schemas | +| `bindings.json` | Resource bindings | +| `.agent/` | SDK and CLI reference docs | + +**Next steps:** + +1. Implement your logic in `main()` +2. Test: `uv run uipath run main '{"field": "value"}'` +3. Create `eval_set.json` for evaluations +4. Evaluate: `uv run uipath eval` diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/CLAUDE.md b/packages/uipath-openai-agents/samples/agent-as-tools/CLAUDE.md index 43c994c..eef4bd2 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/CLAUDE.md +++ b/packages/uipath-openai-agents/samples/agent-as-tools/CLAUDE.md @@ -1 +1 @@ -@AGENTS.md +@AGENTS.md \ No newline at end of file diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/agent.mermaid b/packages/uipath-openai-agents/samples/agent-as-tools/agent.mermaid index d15e4f6..a2280a6 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/agent.mermaid +++ b/packages/uipath-openai-agents/samples/agent-as-tools/agent.mermaid @@ -1,15 +1,18 @@ flowchart TB __start__(__start__) orchestrator_agent(orchestrator_agent) - translate_to_spanish(translate_to_spanish) - translate_to_french(translate_to_french) - translate_to_italian(translate_to_italian) + spanish_agent(spanish_agent) + french_agent(french_agent) + italian_agent(italian_agent) + orchestrator_agent_tools(tools) __end__(__end__) + orchestrator_agent --> |translate_to_spanish|spanish_agent + spanish_agent --> orchestrator_agent + orchestrator_agent --> |translate_to_french|french_agent + french_agent --> orchestrator_agent + orchestrator_agent --> |translate_to_italian|italian_agent + italian_agent --> orchestrator_agent + orchestrator_agent --> orchestrator_agent_tools + orchestrator_agent_tools --> orchestrator_agent __start__ --> |input|orchestrator_agent - orchestrator_agent --> |tool_call|translate_to_spanish - translate_to_spanish --> |tool_result|orchestrator_agent - orchestrator_agent --> |tool_call|translate_to_french - translate_to_french --> |tool_result|orchestrator_agent - orchestrator_agent --> |tool_call|translate_to_italian - translate_to_italian --> |tool_result|orchestrator_agent orchestrator_agent --> |output|__end__ diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/entry-points.json b/packages/uipath-openai-agents/samples/agent-as-tools/entry-points.json new file mode 100644 index 0000000..5de6258 --- /dev/null +++ b/packages/uipath-openai-agents/samples/agent-as-tools/entry-points.json @@ -0,0 +1,189 @@ +{ + "$schema": "https://cloud.uipath.com/draft/2024-12/entry-point", + "$id": "entry-points.json", + "entryPoints": [ + { + "filePath": "agent", + "uniqueId": "aa358456-b985-4a8d-abf8-280d13d1ef60", + "type": "agent", + "input": { + "type": "object", + "properties": { + "messages": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "object" + } + } + ], + "title": "Messages", + "description": "User messages to send to the agent" + }, + "user_id": { + "default": "anonymous", + "description": "User identifier for logging", + "title": "User Id", + "type": "string" + }, + "preferred_formality": { + "default": "formal", + "description": "Translation formality level: 'formal' or 'informal'", + "title": "Preferred Formality", + "type": "string" + } + }, + "required": [ + "messages" + ] + }, + "output": { + "type": "object", + "properties": { + "original_text": { + "description": "The original English text", + "title": "Original Text", + "type": "string" + }, + "translations": { + "additionalProperties": { + "type": "string" + }, + "description": "Dictionary mapping language names to translated text", + "title": "Translations", + "type": "object" + }, + "languages_used": { + "description": "List of languages that were translated to", + "items": { + "type": "string" + }, + "title": "Languages Used", + "type": "array" + } + }, + "required": [ + "original_text", + "translations", + "languages_used" + ], + "title": "TranslationOutput", + "description": "Output model for the translation orchestrator." + }, + "graph": { + "nodes": [ + { + "id": "__start__", + "name": "__start__", + "type": "__start__", + "subgraph": null, + "metadata": null + }, + { + "id": "orchestrator_agent", + "name": "orchestrator_agent", + "type": "node", + "subgraph": null, + "metadata": null + }, + { + "id": "spanish_agent", + "name": "spanish_agent", + "type": "node", + "subgraph": null, + "metadata": null + }, + { + "id": "french_agent", + "name": "french_agent", + "type": "node", + "subgraph": null, + "metadata": null + }, + { + "id": "italian_agent", + "name": "italian_agent", + "type": "node", + "subgraph": null, + "metadata": null + }, + { + "id": "orchestrator_agent_tools", + "name": "tools", + "type": "tool", + "subgraph": null, + "metadata": { + "tool_names": [ + "get_translation_preferences" + ], + "tool_count": 1 + } + }, + { + "id": "__end__", + "name": "__end__", + "type": "__end__", + "subgraph": null, + "metadata": null + } + ], + "edges": [ + { + "source": "orchestrator_agent", + "target": "spanish_agent", + "label": "translate_to_spanish" + }, + { + "source": "spanish_agent", + "target": "orchestrator_agent", + "label": null + }, + { + "source": "orchestrator_agent", + "target": "french_agent", + "label": "translate_to_french" + }, + { + "source": "french_agent", + "target": "orchestrator_agent", + "label": null + }, + { + "source": "orchestrator_agent", + "target": "italian_agent", + "label": "translate_to_italian" + }, + { + "source": "italian_agent", + "target": "orchestrator_agent", + "label": null + }, + { + "source": "orchestrator_agent", + "target": "orchestrator_agent_tools", + "label": null + }, + { + "source": "orchestrator_agent_tools", + "target": "orchestrator_agent", + "label": null + }, + { + "source": "__start__", + "target": "orchestrator_agent", + "label": "input" + }, + { + "source": "orchestrator_agent", + "target": "__end__", + "label": "output" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/input.json b/packages/uipath-openai-agents/samples/agent-as-tools/input.json index e276566..ac54243 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/input.json +++ b/packages/uipath-openai-agents/samples/agent-as-tools/input.json @@ -1,3 +1,4 @@ { - "messages": "Tell me a joke" + "messages": "Translate in all languages. I am three", + "formality": "casual" } \ No newline at end of file diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/main.py b/packages/uipath-openai-agents/samples/agent-as-tools/main.py index c0a52fe..56195aa 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/main.py +++ b/packages/uipath-openai-agents/samples/agent-as-tools/main.py @@ -1,4 +1,4 @@ -from agents import Agent, AgentOutputSchema +from agents import Agent, AgentOutputSchema, RunContextWrapper, function_tool from agents.models import _openai_shared from pydantic import BaseModel, Field @@ -10,13 +10,28 @@ The frontline agent receives a user message and then picks which agents to call, as tools. In this case, it picks from a set of translation agents. -This sample demonstrates parameter inference - the Input/Output Pydantic models -are automatically extracted to generate rich schemas for UiPath integration. +This sample demonstrates: +- Parameter inference: Input/Output Pydantic models are automatically extracted +- Context passing: A Context model provides data accessible to tools (not sent to LLM) Based on: https://github.com/openai/openai-agents-python/blob/main/examples/agent_patterns/tools.py """ +class Context(BaseModel): + """Context data accessible to tools (not sent to LLM). + + The 'messages' field is always separate and goes to the LLM. + These fields are passed to tools via RunContextWrapper. + """ + + user_id: str = Field(default="anonymous", description="User identifier for logging") + preferred_formality: str = Field( + default="formal", + description="Translation formality level: 'formal' or 'informal'", + ) + + class TranslationOutput(BaseModel): """Output model for the translation orchestrator.""" @@ -29,7 +44,16 @@ class TranslationOutput(BaseModel): ) -def main() -> Agent: +@function_tool +def get_translation_preferences(ctx: RunContextWrapper[Context]) -> str: + """Get the user's translation preferences from context.""" + return ( + f"User {ctx.context.user_id} prefers {ctx.context.preferred_formality} " + f"translations." + ) + + +def main() -> Agent[Context]: """Configure UiPath OpenAI client and return the orchestrator agent.""" # Configure UiPath OpenAI client for agent execution # This routes all OpenAI API calls through UiPath's LLM Gateway @@ -38,21 +62,21 @@ def main() -> Agent: _openai_shared.set_default_openai_client(uipath_openai_client.async_client) # Define specialized translation agents - spanish_agent = Agent( + spanish_agent = Agent[Context]( name="spanish_agent", instructions="You translate the user's message to Spanish", handoff_description="An english to spanish translator", model=MODEL, ) - french_agent = Agent( + french_agent = Agent[Context]( name="french_agent", instructions="You translate the user's message to French", handoff_description="An english to french translator", model=MODEL, ) - italian_agent = Agent( + italian_agent = Agent[Context]( name="italian_agent", instructions="You translate the user's message to Italian", handoff_description="An english to italian translator", @@ -63,14 +87,16 @@ def main() -> Agent: # Uses output_type for structured outputs (native OpenAI Agents pattern) # Note: Using AgentOutputSchema with strict_json_schema=False because # dict[str, str] is not compatible with OpenAI's strict JSON schema mode - orchestrator_agent = Agent( + orchestrator_agent = Agent[Context]( name="orchestrator_agent", instructions=( "You are a translation agent. You use the tools given to you to translate. " "If asked for multiple translations, you call the relevant tools in order. " - "You never translate on your own, you always use the provided tools." + "You never translate on your own, you always use the provided tools. " + "Before translating, check the user's preferences using get_translation_preferences." ), tools=[ + get_translation_preferences, spanish_agent.as_tool( tool_name="translate_to_spanish", tool_description="Translate the user's message to Spanish", diff --git a/packages/uipath-openai-agents/samples/agent-as-tools/uv.lock b/packages/uipath-openai-agents/samples/agent-as-tools/uv.lock index 0b10b46..8a74683 100644 --- a/packages/uipath-openai-agents/samples/agent-as-tools/uv.lock +++ b/packages/uipath-openai-agents/samples/agent-as-tools/uv.lock @@ -2027,7 +2027,7 @@ wheels = [ [[package]] name = "uipath-openai-agents" -version = "0.0.1" +version = "0.0.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiosqlite" }, @@ -2037,9 +2037,9 @@ dependencies = [ { name = "uipath" }, { name = "uipath-runtime" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/50/0c/76e3fa61c8b2aa552d659ed3b6840bbf980a3e5fd33cc7cbfb62e4176885/uipath_openai_agents-0.0.1.tar.gz", hash = "sha256:e2e357e78da30b7c3e5388fd41832fa20d866b12271e6e078e57d08b178ce64b", size = 660107, upload-time = "2026-01-20T14:57:28.73Z" } +sdist = { url = "https://files.pythonhosted.org/packages/43/48/34e6e156489cf60e0537e81690bb5885c73aa1b207ea3c8276da3dd53f45/uipath_openai_agents-0.0.3.tar.gz", hash = "sha256:665d18077c67e83a98c68d3c34c2fe7a551ad92f93d7bea803e2f264019dfddd", size = 661842, upload-time = "2026-02-04T07:15:31.558Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/dd/ae/c0fb2d2ee854064c4aaa121ed96e5e010c475765e62a1ba2073362c74081/uipath_openai_agents-0.0.1-py3-none-any.whl", hash = "sha256:61fd3e0b34ce902484d89607369adb476697c8d51bde96f49c7dea5e5d0c7ba6", size = 31634, upload-time = "2026-01-20T14:57:27.441Z" }, + { url = "https://files.pythonhosted.org/packages/3e/0f/af58b3409a8c55b654124e123609bb8b7b11c400ddaf299802f29e5d2860/uipath_openai_agents-0.0.3-py3-none-any.whl", hash = "sha256:fce3d6409c6e6ddf6df7b5a620b3c367e17267ae686c8a911a6459cf77da730a", size = 24989, upload-time = "2026-02-04T07:15:29.689Z" }, ] [[package]] diff --git a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/__init__.py b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/__init__.py index 9aab7d5..b83dc8d 100644 --- a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/__init__.py +++ b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/__init__.py @@ -54,6 +54,14 @@ def __getattr__(name): from .runtime import UiPathOpenAIAgentRuntime return UiPathOpenAIAgentRuntime + if name == "get_agent_context_type": + from .context import get_agent_context_type + + return get_agent_context_type + if name == "parse_input_to_context": + from .context import parse_input_to_context + + return parse_input_to_context raise AttributeError(f"module {__name__!r} has no attribute {name!r}") @@ -63,4 +71,6 @@ def __getattr__(name): "get_agent_schema", "UiPathOpenAIAgentRuntimeFactory", "UiPathOpenAIAgentRuntime", + "get_agent_context_type", + "parse_input_to_context", ] diff --git a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/context.py b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/context.py new file mode 100644 index 0000000..186547d --- /dev/null +++ b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/context.py @@ -0,0 +1,65 @@ +"""Context type detection utilities for OpenAI Agents.""" + +import inspect +from typing import Any, get_args, get_origin + +from agents import Agent +from pydantic import BaseModel + + +def get_agent_context_type(agent: Agent) -> type[BaseModel] | None: + """Extract the context type from Agent[TContext] generic parameter.""" + context_type = None + + # Check __orig_class__ (set when instantiating with type parameter) + orig_class = getattr(agent, "__orig_class__", None) + if orig_class: + args = get_args(orig_class) + if args: + context_type = args[0] + + # Check class-level __orig_bases__ for subclassed agents + if context_type is None: + for base in getattr(agent.__class__, "__orig_bases__", []): + origin = get_origin(base) + if origin and _is_agent_class(origin): + args = get_args(base) + if args: + context_type = args[0] + break + + if context_type and _is_pydantic_model(context_type): + return context_type + return None + + +def parse_input_to_context( + input_dict: dict[str, Any] | None, context_type: type[BaseModel] +) -> BaseModel: + """Parse input dict into a Pydantic context model (excludes 'messages' field).""" + data = dict(input_dict) if input_dict else {} + data.pop("messages", None) # messages is separate, not part of context + try: + return context_type.model_validate(data) + except Exception as e: + raise ValueError(f"Failed to parse context: {e}") + + +def _is_agent_class(cls: Any) -> bool: + try: + return cls is Agent or (inspect.isclass(cls) and issubclass(cls, Agent)) + except TypeError: + return False + + +def _is_pydantic_model(type_hint: Any) -> bool: + try: + return inspect.isclass(type_hint) and issubclass(type_hint, BaseModel) + except TypeError: + return False + + +__all__ = [ + "get_agent_context_type", + "parse_input_to_context", +] diff --git a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/runtime.py b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/runtime.py index 7a67556..3a06d69 100644 --- a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/runtime.py +++ b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/runtime.py @@ -5,6 +5,7 @@ from uuid import uuid4 from agents import Agent, Runner +from pydantic import BaseModel from uipath.runtime import ( UiPathExecuteOptions, UiPathRuntimeResult, @@ -20,6 +21,7 @@ from uipath.runtime.schema import UiPathRuntimeSchema from ._serialize import serialize_output +from .context import get_agent_context_type, parse_input_to_context from .errors import UiPathOpenAIAgentsErrorCode, UiPathOpenAIAgentsRuntimeError from .schema import get_agent_schema, get_entrypoints_schema @@ -47,6 +49,9 @@ def __init__( self.runtime_id: str = runtime_id or "default" self.entrypoint: str | None = entrypoint + # Detect context type from agent's generic parameter + self._context_type: type[BaseModel] | None = get_agent_context_type(agent) + async def execute( self, input: dict[str, Any] | None = None, @@ -121,13 +126,14 @@ async def _run_agent( Yields: Runtime events if stream_events=True, then final result """ - agent_input = self._prepare_agent_input(input) + # Prepare input and context + agent_input, context = self._prepare_agent_input_and_context(input) # Run the agent with streaming if events requested if stream_events: # Use streaming for events async for event_or_result in self._run_agent_streamed( - agent_input, options, stream_events + agent_input, context, options, stream_events ): yield event_or_result else: @@ -135,12 +141,14 @@ async def _run_agent( result = await Runner.run( starting_agent=self.agent, input=agent_input, + context=context, ) yield self._create_success_result(result.final_output) async def _run_agent_streamed( self, agent_input: str | list[Any], + context: Any | None, options: UiPathExecuteOptions | UiPathStreamOptions | None, stream_events: bool, ) -> AsyncGenerator[UiPathRuntimeEvent | UiPathRuntimeResult, None]: @@ -149,6 +157,7 @@ async def _run_agent_streamed( Args: agent_input: Prepared agent input (string or list of messages) + context: Optional context object (Pydantic model instance) options: Execution/stream options stream_events: Whether to yield streaming events to caller @@ -160,6 +169,7 @@ async def _run_agent_streamed( result = Runner.run_streamed( starting_agent=self.agent, input=agent_input, + context=context, ) # Stream events from the agent @@ -218,21 +228,32 @@ def _convert_stream_event_to_runtime_event( # Filter out raw response events (too granular) return None - def _prepare_agent_input(self, input: dict[str, Any] | None) -> str | list[Any]: + def _prepare_agent_input_and_context( + self, input: dict[str, Any] | None + ) -> tuple[str | list[Any], Any | None]: """ - Prepare agent input from UiPath input dictionary. + Prepare agent input and context from UiPath input dictionary. + - 'messages' field is always extracted as the LLM input + - If agent has a context type, remaining fields are parsed into Pydantic model """ if not input: - return "" + return "", None + # Extract messages (always goes to LLM) messages = input.get("messages", "") - - if isinstance(messages, (str, list)): - return messages - - # Fallback to empty string for unexpected types - return "" + if not isinstance(messages, (str, list)): + messages = "" + + # If agent has a context type, parse remaining fields into context + context = None + if self._context_type is not None: + try: + context = parse_input_to_context(input, self._context_type) + except ValueError: + pass # Fallback to no context if parsing fails + + return messages, context def _serialize_message(self, message: Any) -> dict[str, Any]: """ diff --git a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/schema.py b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/schema.py index a794173..d45c8a0 100644 --- a/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/schema.py +++ b/packages/uipath-openai-agents/src/uipath_openai_agents/runtime/schema.py @@ -11,6 +11,44 @@ UiPathRuntimeNode, ) +from .context import get_agent_context_type + + +def _extract_agent_from_tool(tool: Any) -> Agent | None: + """ + Extract an Agent from a tool that was created via Agent.as_tool(). + + The agent is stored deep in the closure chain of the tool's on_invoke_tool function. + """ + if not hasattr(tool, "on_invoke_tool"): + return None + + try: + func = tool.on_invoke_tool + if not hasattr(func, "__closure__") or not func.__closure__: + return None + + # First level: get _on_invoke_tool_impl + impl = func.__closure__[0].cell_contents + if not callable(impl) or not hasattr(impl, "__closure__") or not impl.__closure__: + return None + + # Second level: get run_agent function + run_agent = impl.__closure__[1].cell_contents + if not callable(run_agent) or not hasattr(run_agent, "__closure__") or not run_agent.__closure__: + return None + + # Third level: find the Agent in run_agent's closure + for cell in run_agent.__closure__: + content = cell.cell_contents + if isinstance(content, Agent): + return content + + except (IndexError, AttributeError): + pass + + return None + def _is_pydantic_model(type_hint: Any) -> bool: """ @@ -45,39 +83,50 @@ def get_entrypoints_schema(agent: Agent) -> dict[str, Any]: """ Extract input/output schema from an OpenAI Agent. - Uses the agent's native output_type attribute for schema extraction. - - Args: - agent: An OpenAI Agent instance - - Returns: - Dictionary with input and output schemas + Input schema always includes 'messages' field, plus context fields if defined. + Output schema uses agent's output_type attribute. """ - schema = { - "input": {"type": "object", "properties": {}, "required": []}, - "output": {"type": "object", "properties": {}, "required": []}, + # Messages field is always required + messages_schema: dict[str, Any] = { + "anyOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "object"}}, + ], + "title": "Messages", + "description": "User messages to send to the agent", } - # Extract input schema - check agent's context type or use default messages - # For OpenAI Agents, input is typically messages (string or list of message objects) - schema["input"] = { - "type": "object", - "properties": { - "messages": { - "anyOf": [ - {"type": "string"}, - { - "type": "array", - "items": {"type": "object"}, - }, - ], - "title": "Messages", - "description": "User messages to send to the agent", - } + input_properties: dict[str, Any] = {"messages": messages_schema} + input_required: list[str] = ["messages"] + + schema: dict[str, Any] = { + "input": { + "type": "object", + "properties": input_properties, + "required": input_required, }, - "required": ["messages"], + "output": {"type": "object", "properties": {}, "required": []}, } + # Add context fields if agent has a context type (Agent[MyContext]) + context_type = get_agent_context_type(agent) + if context_type is not None and _is_pydantic_model(context_type): + try: + adapter = TypeAdapter(context_type) + context_schema = adapter.json_schema() + unpacked = _resolve_refs(context_schema) + + # Merge context properties with messages + context_props = _process_nullable_types(unpacked.get("properties", {})) + input_properties.update(context_props) + + # Add context required fields (messages is already required) + for field in unpacked.get("required", []): + if field not in input_required: + input_required.append(field) + except Exception: + pass + # Extract output schema - Agent's output_type (native OpenAI Agents pattern) output_type = getattr(agent, "output_type", None) output_extracted = False @@ -177,41 +226,52 @@ def _add_agent_and_tools(current_agent: Agent) -> None: ) ) - # Process tools - separate agent-tools from regular tools + # Process tools - separate agent-tools from regular function tools tools = getattr(current_agent, "tools", None) or [] - agent_tools: list[Agent] = [] + agent_tools: list[tuple[str, Agent]] = [] # (tool_name, agent) regular_tools: list[Any] = [] for tool in tools: if isinstance(tool, Agent): - agent_tools.append(tool) + # Direct Agent instance + agent_name_str: str = getattr(tool, "name", None) or "agent" + agent_tools.append((agent_name_str, tool)) else: - regular_tools.append(tool) + # Check if this is an agent wrapped via .as_tool() + wrapped_agent = _extract_agent_from_tool(tool) + if wrapped_agent is not None: + # Use the tool's name (e.g., "translate_to_spanish") for the edge + tool_name_str: str = ( + _get_tool_name(tool) or getattr(wrapped_agent, "name", None) or "agent" + ) + agent_tools.append((tool_name_str, wrapped_agent)) + else: + regular_tools.append(tool) - # Process agent-tools (agents used as tools) - for tool_agent in agent_tools: - tool_agent_name = getattr(tool_agent, "name", _get_tool_name(tool_agent)) - if tool_agent_name and tool_agent_name not in visited: + # Process agent-tools (agents used as tools via .as_tool()) + for tool_name, tool_agent in agent_tools: + tool_agent_name = getattr(tool_agent, "name", "agent") + if tool_agent_name not in visited: # Recursively process agent-tool _add_agent_and_tools(tool_agent) - # Add edges for agent-tool - edges.append( - UiPathRuntimeEdge( - source=agent_name, - target=tool_agent_name, - label="tool_call", - ) + # Add edges for agent-tool (even if already visited, we need edges) + edges.append( + UiPathRuntimeEdge( + source=agent_name, + target=tool_agent_name, + label=tool_name, ) - edges.append( - UiPathRuntimeEdge( - source=tool_agent_name, - target=agent_name, - label="tool_result", - ) + ) + edges.append( + UiPathRuntimeEdge( + source=tool_agent_name, + target=agent_name, + label=None, ) + ) - # Process regular tools - aggregate into single tools node + # Process regular function tools - aggregate into single tools node if regular_tools: tool_names = [_get_tool_name(tool) for tool in regular_tools] tool_names = [name for name in tool_names if name] # Filter out None values diff --git a/packages/uipath-openai-agents/tests/test_context.py b/packages/uipath-openai-agents/tests/test_context.py new file mode 100644 index 0000000..d8a0f7c --- /dev/null +++ b/packages/uipath-openai-agents/tests/test_context.py @@ -0,0 +1,98 @@ +"""Tests for context type detection and handling.""" + +from typing import Any + +import pytest +from pydantic import BaseModel + +from uipath_openai_agents.runtime.context import ( + get_agent_context_type, + parse_input_to_context, +) + + +class UserContext(BaseModel): + """Context model with user info.""" + + user_id: str + tier: str = "standard" + + +class OptionalContext(BaseModel): + """Context model with all optional fields.""" + + session_id: str | None = None + metadata: dict[str, Any] | None = None + + +class TestParseInputToContext: + """Tests for parse_input_to_context function.""" + + def test_parse_context_excludes_messages(self) -> None: + """Test that 'messages' field is excluded from context.""" + input_dict = { + "messages": "Hello world", + "user_id": "user_123", + "tier": "premium", + } + context = parse_input_to_context(input_dict, UserContext) + assert isinstance(context, UserContext) + + assert context.user_id == "user_123" + assert context.tier == "premium" + assert not hasattr(context, "messages") + + def test_parse_context_with_defaults(self) -> None: + """Test parsing with default values.""" + input_dict = {"messages": "Hello", "user_id": "user_456"} + context = parse_input_to_context(input_dict, UserContext) + assert isinstance(context, UserContext) + + assert context.user_id == "user_456" + assert context.tier == "standard" # default value + + def test_parse_all_optional_context(self) -> None: + """Test parsing context with all optional fields.""" + input_dict = {"messages": "Test message"} + context = parse_input_to_context(input_dict, OptionalContext) + assert isinstance(context, OptionalContext) + + assert context.session_id is None + assert context.metadata is None + + def test_parse_empty_input(self): + """Test parsing empty input with required fields fails.""" + with pytest.raises(ValueError): + parse_input_to_context({}, UserContext) + + def test_parse_none_input(self): + """Test parsing None input with required fields fails.""" + with pytest.raises(ValueError): + parse_input_to_context(None, UserContext) + + def test_parse_messages_only_input(self): + """Test that messages-only input fails when context has required fields.""" + with pytest.raises(ValueError): + parse_input_to_context({"messages": "Hello"}, UserContext) + + +class TestIntegration: + """Integration tests for context handling.""" + + def test_full_flow_with_context(self) -> None: + """Test the full flow: messages separate from context.""" + input_dict = { + "messages": "What are my features?", + "user_id": "user_789", + "tier": "enterprise", + } + + # Messages should be extracted separately (done by runtime) + messages = input_dict.get("messages", "") + assert messages == "What are my features?" + + # Context should exclude messages + context = parse_input_to_context(input_dict, UserContext) + assert isinstance(context, UserContext) + assert context.user_id == "user_789" + assert context.tier == "enterprise" diff --git a/packages/uipath-openai-agents/uv.lock b/packages/uipath-openai-agents/uv.lock index 49867ff..7a5d525 100644 --- a/packages/uipath-openai-agents/uv.lock +++ b/packages/uipath-openai-agents/uv.lock @@ -2318,7 +2318,7 @@ wheels = [ [[package]] name = "uipath-openai-agents" -version = "0.0.3" +version = "0.0.4" source = { editable = "." } dependencies = [ { name = "aiosqlite" },