Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
150 changes: 56 additions & 94 deletions packages/uipath-openai-agents/samples/agent-as-tools/main.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import dotenv
from agents import Agent, AgentOutputSchema, Runner, trace
from agents import Agent, AgentOutputSchema
from agents.models import _openai_shared
from pydantic import BaseModel, Field
from uipath.tracing import traced

dotenv.load_dotenv()
from uipath_openai_agents.chat import UiPathChatOpenAI

"""
This example shows the agents-as-tools pattern adapted for UiPath coded agents.
Expand Down Expand Up @@ -39,95 +38,58 @@ class TranslationOutput(BaseModel):
)


spanish_agent = Agent(
name="spanish_agent",
instructions="You translate the user's message to Spanish",
handoff_description="An english to spanish translator",
)

french_agent = Agent(
name="french_agent",
instructions="You translate the user's message to French",
handoff_description="An english to french translator",
)

italian_agent = Agent(
name="italian_agent",
instructions="You translate the user's message to Italian",
handoff_description="An english to italian translator",
)

# Orchestrator agent that uses other agents as tools
# Uses output_type for structured outputs (native OpenAI Agents pattern)
# Note: Using AgentOutputSchema with strict_json_schema=False because
# dict[str, str] is not compatible with OpenAI's strict JSON schema mode
orchestrator_agent = Agent(
name="orchestrator_agent",
instructions=(
"You are a translation agent. You use the tools given to you to translate. "
"If asked for multiple translations, you call the relevant tools in order. "
"You never translate on your own, you always use the provided tools."
),
tools=[
spanish_agent.as_tool(
tool_name="translate_to_spanish",
tool_description="Translate the user's message to Spanish",
),
french_agent.as_tool(
tool_name="translate_to_french",
tool_description="Translate the user's message to French",
),
italian_agent.as_tool(
tool_name="translate_to_italian",
tool_description="Translate the user's message to Italian",
def main() -> Agent:
"""Configure UiPath OpenAI client and return the orchestrator agent."""
# Configure UiPath OpenAI client for agent execution
# This routes all OpenAI API calls through UiPath's LLM Gateway
uipath_openai_client = UiPathChatOpenAI(model_name="gpt-4o-2024-11-20")
_openai_shared.set_default_openai_client(uipath_openai_client.async_client)

# Define specialized translation agents
spanish_agent = Agent(
name="spanish_agent",
instructions="You translate the user's message to Spanish",
handoff_description="An english to spanish translator",
)

french_agent = Agent(
name="french_agent",
instructions="You translate the user's message to French",
handoff_description="An english to french translator",
)

italian_agent = Agent(
name="italian_agent",
instructions="You translate the user's message to Italian",
handoff_description="An english to italian translator",
)

# Orchestrator agent that uses other agents as tools
# Uses output_type for structured outputs (native OpenAI Agents pattern)
# Note: Using AgentOutputSchema with strict_json_schema=False because
# dict[str, str] is not compatible with OpenAI's strict JSON schema mode
orchestrator_agent = Agent(
name="orchestrator_agent",
instructions=(
"You are a translation agent. You use the tools given to you to translate. "
"If asked for multiple translations, you call the relevant tools in order. "
"You never translate on your own, you always use the provided tools."
),
],
output_type=AgentOutputSchema(TranslationOutput, strict_json_schema=False),
)


@traced(name="Translation Orchestrator Main")
async def main(input_data: TranslationInput) -> TranslationOutput:
"""
Main function to orchestrate translations using agent-as-tools pattern.

This function demonstrates parameter inference - the Input/Output models
are automatically extracted to generate schemas for UiPath workflows.

Args:
input_data: Input containing text and target languages

Returns:
TranslationOutput: Result containing translations for requested languages
"""
print(f"\nTranslating: '{input_data.text}'")
print(f"Target languages: {', '.join(input_data.target_languages)}\n")

# Build the prompt based on requested languages
language_list = ", ".join(input_data.target_languages)
prompt = f"Translate this text to {language_list}: {input_data.text}"

with trace("Translation Orchestrator"):
# Run the orchestrator agent
result = await Runner.run(
starting_agent=orchestrator_agent,
input=[{"content": prompt, "role": "user"}],
)

# Extract translations from the response
# In a real implementation, you'd parse the structured response
final_response = result.final_output
print(f"\nAgent response: {final_response}\n")

# For demonstration, create structured output
# In production, you'd parse the agent's structured response
translations = {}
for lang in input_data.target_languages:
# Placeholder - in real usage, extract from agent response
translations[lang] = f"[Translation to {lang}]"

return TranslationOutput(
original_text=input_data.text,
translations=translations,
languages_used=input_data.target_languages,
tools=[
spanish_agent.as_tool(
tool_name="translate_to_spanish",
tool_description="Translate the user's message to Spanish",
),
french_agent.as_tool(
tool_name="translate_to_french",
tool_description="Translate the user's message to French",
),
italian_agent.as_tool(
tool_name="translate_to_italian",
tool_description="Translate the user's message to Italian",
),
],
output_type=AgentOutputSchema(TranslationOutput, strict_json_schema=False),
)

return orchestrator_agent
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"agents": {
"agent": "main.py:orchestrator_agent"
"agent": "main.py:main"
}
}
54 changes: 16 additions & 38 deletions packages/uipath-openai-agents/samples/rag-assistant/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,11 @@
- Streaming responses support
"""

import dotenv
from agents import Agent, Runner
from agents import Agent
from agents.models import _openai_shared
from pydantic import BaseModel, Field
from uipath.tracing import traced

dotenv.load_dotenv()
from uipath_openai_agents.chat import UiPathChatOpenAI


# Required Input/Output models for UiPath coded agents
Expand All @@ -31,46 +30,25 @@ class Output(BaseModel):
agent_used: str = Field(description="The name of the agent that answered")


# Define the assistant agent
# Model defaults to gpt-4.1 which automatically maps to gpt-4o-2024-11-20
assistant_agent = Agent(
name="assistant_agent",
instructions="""You are a helpful AI assistant that provides clear, concise answers.
def main() -> Agent:
"""Configure UiPath OpenAI client and return the assistant agent."""
# Configure UiPath OpenAI client for agent execution
# This routes all OpenAI API calls through UiPath's LLM Gateway
uipath_openai_client = UiPathChatOpenAI(model_name="gpt-4o-2024-11-20")
_openai_shared.set_default_openai_client(uipath_openai_client.async_client)

# Define the assistant agent
# Model defaults to gpt-4.1 which automatically maps to gpt-4o-2024-11-20
assistant_agent = Agent(
name="assistant_agent",
instructions="""You are a helpful AI assistant that provides clear, concise answers.

Your capabilities:
- Answer questions accurately
- Provide well-structured responses
- Be helpful and informative

Always aim for clarity and accuracy in your responses.""",
)


@traced(name="RAG Assistant Main")
async def main(input_data: Input) -> Output:
"""Main function for RAG assistant using OpenAI Agents SDK.

This function demonstrates the basic OpenAI Agents pattern with UiPath integration.

Args:
input_data: Input containing the question to ask

Returns:
Output: Result containing the answer and agent used
"""
print(f"\n🔍 Question: {input_data.question}\n")

# Run the assistant agent (non-streaming for simplicity)
result = await Runner.run(
starting_agent=assistant_agent,
input=[{"content": input_data.question, "role": "user"}],
)

# Extract the final response
final_response = result.final_output
agent_used = result.current_agent.name

print(f"\n💬 Answer: {final_response}")
print(f"✅ Agent used: {agent_used}\n")

return Output(answer=final_response, agent_used=agent_used)
return assistant_agent
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"agents": {
"agent": "main.py:assistant_agent"
"agent": "main.py:main"
}
}
114 changes: 34 additions & 80 deletions packages/uipath-openai-agents/samples/triage-agent/main.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
import asyncio

import dotenv
from agents import Agent, RawResponsesStreamEvent, Runner, trace
from openai.types.responses import ResponseContentPartDoneEvent, ResponseTextDeltaEvent
from agents import Agent
from agents.models import _openai_shared
from pydantic import BaseModel
from uipath.tracing import traced

dotenv.load_dotenv()
from uipath_openai_agents.chat import UiPathChatOpenAI

"""
This example shows the handoffs/routing pattern adapted for UiPath coded agents.
Expand All @@ -31,76 +27,34 @@ class Output(BaseModel):
agent_used: str


# Define specialized agents for different languages
french_agent = Agent(
name="french_agent",
instructions="You only speak French",
)

spanish_agent = Agent(
name="spanish_agent",
instructions="You only speak Spanish",
)

english_agent = Agent(
name="english_agent",
instructions="You only speak English",
)

# Triage agent routes to appropriate language agent
triage_agent = Agent(
name="triage_agent",
instructions="Handoff to the appropriate agent based on the language of the request.",
handoffs=[french_agent, spanish_agent, english_agent],
)


@traced(name="Language Routing Agent Main")
async def main(input_data: Input) -> Output:
"""Main function to run the language routing agent.

Args:
input_data: Input model with a message for the agent.

Returns:
Output: Result containing the agent's response and which agent was used.
"""
print(f"\nProcessing message: {input_data.message}")

with trace("Language Routing Agent"):
# Run the agent with streaming
result = Runner.run_streamed(
triage_agent,
input=[{"content": input_data.message, "role": "user"}],
)

# Collect the response
response_parts = []
async for event in result.stream_events():
if not isinstance(event, RawResponsesStreamEvent):
continue
data = event.data
if isinstance(data, ResponseTextDeltaEvent):
print(data.delta, end="", flush=True)
response_parts.append(data.delta)
elif isinstance(data, ResponseContentPartDoneEvent):
print()

# Get the final response and agent used
final_response = "".join(response_parts)
agent_used = result.current_agent.name

print(f"\n\nAgent used: {agent_used}")
return Output(response=final_response, agent_used=agent_used)


if __name__ == "__main__":
# Example usage with different languages:
# 1. English message
# asyncio.run(main(Input(message="Hello, how are you?")))

# 2. French message
# asyncio.run(main(Input(message="Bonjour, comment allez-vous?")))

# 3. Spanish message
asyncio.run(main(Input(message="Hola, ¿cómo estás?")))
def main() -> Agent:
"""Configure UiPath OpenAI client and return the triage agent."""
# Configure UiPath OpenAI client for agent execution
# This routes all OpenAI API calls through UiPath's LLM Gateway
uipath_openai_client = UiPathChatOpenAI(model_name="gpt-4o-2024-11-20")
_openai_shared.set_default_openai_client(uipath_openai_client.async_client)

# Define specialized agents for different languages
french_agent = Agent(
name="french_agent",
instructions="You only speak French",
)

spanish_agent = Agent(
name="spanish_agent",
instructions="You only speak Spanish",
)

english_agent = Agent(
name="english_agent",
instructions="You only speak English",
)

# Triage agent routes to appropriate language agent
triage_agent = Agent(
name="triage_agent",
instructions="Handoff to the appropriate agent based on the language of the request.",
handoffs=[french_agent, spanish_agent, english_agent],
)

return triage_agent
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"agents": {
"agent": "main.py:triage_agent"
"agent": "main.py:main"
}
}
Loading