Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions packages/uipath-llamaindex/samples/chat-agent/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Literature Chat Agent

An AI assistant using Llamaindex and Tavily search for literature research and recommendations.

## Requirements

- Python 3.11+
- OpenAI API key
- Tavily API key

## Installation

```bash
uv venv -p 3.11 .venv
source .venv/bin/activate # On Windows: .venv\Scripts\activate
uv sync
```

Set your API keys as environment variables in .env

```bash
OPENAI_API_KEY=your_anthropic_api_key
TAVILY_API_KEY=your_tavily_api_key
```

## Usage

```bash
uipath run agent 'uipath run agent '{"user_msg": "Tell me about 1984 by George Orwell"}''
```
6 changes: 6 additions & 0 deletions packages/uipath-llamaindex/samples/chat-agent/agent.mermaid
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
flowchart TB
__start__(__start__)
chat(chat)
__end__(__end__)
__start__ --> |ChatInput|chat
chat --> |StopEvent|__end__
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"workflows": {
"agent": "main.py:agent"
}
}
35 changes: 35 additions & 0 deletions packages/uipath-llamaindex/samples/chat-agent/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import os

from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
from llama_index.tools.tavily_research import TavilyToolSpec

llm = OpenAI(model="gpt-4o-mini")
tavily_tool = TavilyToolSpec(api_key=os.environ["TAVILY_API_KEY"])

SYSTEM_PROMPT = (
"You are an advanced AI assistant specializing in book research and literature analysis. "
"Your primary functions are:\n\n"
"1. Book Information Research: Gather comprehensive information about books, including plot summaries, "
"themes, publishing details, sales performance, critical reception, and awards.\n"
"2. Author Research: Provide detailed information about authors, translators, editors, and other "
"publishing industry professionals.\n"
"3. Book Recommendations: Suggest books based on user preferences, genres, themes, or similar books "
"they have enjoyed.\n"
"4. Publishing Industry Analysis: Analyze trends, bestseller data, genre popularity, and insights "
"from the literary world.\n"
"5. Book Trivia and Facts: Share interesting facts, behind-the-scenes stories, and trivia about "
"books, authors, and the publishing industry.\n\n"
"Use the search tool for recent or factual information. "
"Remember previous messages and maintain context across the discussion."
)

agent = FunctionAgent(
tools=tavily_tool.to_tool_list(),
llm=llm,
system_prompt=SYSTEM_PROMPT
)

async def chat(user_input: str) -> str:
response = await agent.run(user_msg=user_input)
return str(response)
18 changes: 18 additions & 0 deletions packages/uipath-llamaindex/samples/chat-agent/pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
[project]
name = "chat-agent"
version = "0.0.1"
description = "chat-agent"
authors = [{ name = "John Doe", email = "john.doe@myemail.com" }]
dependencies = [
"uipath-llamaindex>=0.5.0, <0.6.0",
"llama-index-llms-openai>=0.6.10",
"llama-index-tools-tavily-research>=0.4.2",
"llama-index-llms-openai>=0.6.18"
]
requires-python = ">=3.11"


[dependency-groups]
dev = [
"uipath-dev>=0.0.19",
]
14 changes: 14 additions & 0 deletions packages/uipath-llamaindex/samples/chat-agent/uipath.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"$schema": "https://cloud.uipath.com/draft/2024-12/uipath",
"runtimeOptions": {
"isConversational": false
},
"packOptions": {
"fileExtensionsIncluded": [],
"filesIncluded": [],
"filesExcluded": [],
"directoriesExcluded": [],
"includeUvLock": true
},
"functions": {}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from uipath_llamaindex.runtime.chat.messages import UiPathChatMessagesMapper

__all__ = ["UiPathChatMessagesMapper"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
import logging
from datetime import datetime, timezone
from typing import Any
from uuid import uuid4

from llama_index.core.agent.workflow.workflow_events import (
AgentOutput,
AgentStream,
ToolCall,
ToolCallResult,
)
from uipath.core.chat import (
UiPathConversationContentPartChunkEvent,
UiPathConversationContentPartEndEvent,
UiPathConversationContentPartEvent,
UiPathConversationContentPartStartEvent,
UiPathConversationMessageEndEvent,
UiPathConversationMessageEvent,
UiPathConversationMessageStartEvent,
UiPathConversationToolCallEndEvent,
UiPathConversationToolCallEvent,
UiPathConversationToolCallStartEvent,
)

logger = logging.getLogger(__name__)


class UiPathChatMessagesMapper:
"""Stateful mapper that converts LlamaIndex agent events to UiPath message events.

Maintains state across events to properly track:
- The current AI message ID (generated per agent turn, since LlamaIndex doesn't provide one)
- Pending tool calls per message ID for correct message_end timing
"""

def __init__(self, runtime_id: str) -> None:
self.runtime_id = runtime_id
self._current_message_id: str | None = None
# message_id -> set of tool_ids still pending completion
self._pending_tool_calls: dict[str, set[str]] = {}
# tool_id -> message_id for correlating ToolCallResult with its parent AI message
self._tool_id_to_message_id: dict[str, str] = {}

@staticmethod
def map_input(input: dict[str, Any]) -> dict[str, Any]:
"""Map UiPath chat message format to LlamaIndex expected format.

If the input already has 'user_msg', return it unchanged.
If the input has a 'messages' array (UiPath chat format), extract the
text content from the first message's contentParts and map it to
'user_msg'.
"""
if "user_msg" in input:
return input

messages = input.get("messages")
if not messages or not isinstance(messages, list):
return input

first_message = messages[0]
content_parts = first_message.get("contentParts", [])

text_parts = [
part["data"]["inline"]
for part in content_parts
if part.get("mimeType") == "text/plain"
and isinstance(part.get("data"), dict)
and part["data"].get("inline")
]

if text_parts:
return {"user_msg": " ".join(text_parts)}

return input

def get_timestamp(self) -> str:
"""Format current time as ISO 8601 UTC with milliseconds: 2025-01-04T10:30:00.123Z"""
return (
datetime.now(timezone.utc)
.isoformat(timespec="milliseconds")
.replace("+00:00", "Z")
)

def get_content_part_id(self, message_id: str) -> str:
return f"chunk-{message_id}-0"

async def map_event(
self,
event: AgentStream | AgentOutput | ToolCall | ToolCallResult,
) -> list[UiPathConversationMessageEvent] | None:
"""Convert a LlamaIndex agent event into UiPath conversation message events.

Returns a list of events to emit, or None if the event should be skipped.
"""
if isinstance(event, AgentStream):
return self._map_agent_stream(event)

if isinstance(event, AgentOutput):
return self._map_agent_output(event)

# ToolCall start is handled via AgentOutput to have the message_id available
if isinstance(event, ToolCall):
return None

if isinstance(event, ToolCallResult):
return self._map_tool_call_result(event)

return None

def _map_agent_stream(
self, event: AgentStream
) -> list[UiPathConversationMessageEvent] | None:
events: list[UiPathConversationMessageEvent] = []

# First stream chunk of a new agent turn: generate a fresh message ID
if self._current_message_id is None:
self._current_message_id = str(uuid4())
events.append(self._create_message_start_event(self._current_message_id))

if event.delta:
events.append(
self._create_content_chunk_event(self._current_message_id, event.delta)
)

return events if events else None

def _map_agent_output(
self, event: AgentOutput
) -> list[UiPathConversationMessageEvent] | None:
message_id = self._current_message_id
# Reset for the next turn regardless of outcome
self._current_message_id = None

if message_id is None:
return None

events: list[UiPathConversationMessageEvent] = []

if event.tool_calls:
# Emit a tool_call_start event for each tool call and track them as pending
pending: set[str] = set()
for tool_call in event.tool_calls:
self._tool_id_to_message_id[tool_call.tool_id] = message_id
pending.add(tool_call.tool_id)
events.append(
self._create_tool_call_start_event(
message_id=message_id,
tool_call_id=tool_call.tool_id,
tool_name=tool_call.tool_name,
input=tool_call.tool_kwargs,
)
)
self._pending_tool_calls[message_id] = pending
# message_end will be emitted once the last ToolCallResult comes in
else:
# No tool calls: this is the final text response, close the message now
events.append(self._create_message_end_event(message_id))

return events if events else None

def _map_tool_call_result(
self, event: ToolCallResult
) -> list[UiPathConversationMessageEvent] | None:
message_id = self._tool_id_to_message_id.pop(event.tool_id, None)
if message_id is None:
logger.warning(
"ToolCallResult received for unknown tool_id '%s' — skipping.",
event.tool_id,
)
return None

output = event.tool_output.content if event.tool_output else None

events: list[UiPathConversationMessageEvent] = [
self._create_tool_call_end_event(
message_id=message_id,
tool_call_id=event.tool_id,
output=output,
)
]

# Close the message once all tool calls for it have completed
pending = self._pending_tool_calls.get(message_id)
if pending is not None:
pending.discard(event.tool_id)
if not pending:
del self._pending_tool_calls[message_id]
events.append(self._create_message_end_event(message_id))

return events

# ── Factory helpers ────────────────────────────────────────────────────────

def _create_message_start_event(
self, message_id: str
) -> UiPathConversationMessageEvent:
return UiPathConversationMessageEvent(
message_id=message_id,
start=UiPathConversationMessageStartEvent(
role="assistant", timestamp=self.get_timestamp()
),
content_part=UiPathConversationContentPartEvent(
content_part_id=self.get_content_part_id(message_id),
start=UiPathConversationContentPartStartEvent(mime_type="text/plain"),
),
)

def _create_content_chunk_event(
self, message_id: str, text: str
) -> UiPathConversationMessageEvent:
return UiPathConversationMessageEvent(
message_id=message_id,
content_part=UiPathConversationContentPartEvent(
content_part_id=self.get_content_part_id(message_id),
chunk=UiPathConversationContentPartChunkEvent(data=text),
),
)

def _create_message_end_event(
self, message_id: str
) -> UiPathConversationMessageEvent:
return UiPathConversationMessageEvent(
message_id=message_id,
end=UiPathConversationMessageEndEvent(),
content_part=UiPathConversationContentPartEvent(
content_part_id=self.get_content_part_id(message_id),
end=UiPathConversationContentPartEndEvent(),
),
)

def _create_tool_call_start_event(
self, message_id: str, tool_call_id: str, tool_name: str, input: dict
) -> UiPathConversationMessageEvent:
return UiPathConversationMessageEvent(
message_id=message_id,
tool_call=UiPathConversationToolCallEvent(
tool_call_id=tool_call_id,
start=UiPathConversationToolCallStartEvent(
tool_name=tool_name,
timestamp=self.get_timestamp(),
input=input,
),
),
)

def _create_tool_call_end_event(
self, message_id: str, tool_call_id: str, output: str | None
) -> UiPathConversationMessageEvent:
return UiPathConversationMessageEvent(
message_id=message_id,
tool_call=UiPathConversationToolCallEvent(
tool_call_id=tool_call_id,
end=UiPathConversationToolCallEndEvent(
timestamp=self.get_timestamp(),
output=output,
),
),
)


__all__ = ["UiPathChatMessagesMapper"]
Loading
Loading