ExamplesFrameworks
LlamaIndex Integration
Using daimon.email with LlamaIndex agents
Overview
Integrate daimon.email with LlamaIndex to build agents that can query email data as part of RAG (Retrieval-Augmented Generation) pipelines, enabling intelligent email search and analysis.
Info
Email + RAG: Index your emails, query them with natural language, and build agents that can both retrieve email context and perform email operations.
Installation
npm install llamaindex daimon-email
# or
pip install llama-index daimon-emailComplete Implementation
from llama_index.core import VectorStoreIndex, Document, Settings
from llama_index.core.tools import FunctionTool
from llama_index.agent.openai import OpenAIAgent
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from daimon_email import DaimonClient
import json
# Initialize clients
daimon_client = DaimonClient(api_key=os.getenv('DAIMON_API_KEY'))
# Configure LlamaIndex
Settings.llm = OpenAI(model="gpt-4")
Settings.embed_model = OpenAIEmbedding()
# Tool 1: Create Inbox
def create_inbox(username: str) -> str:
"""Create a new email inbox."""
inbox = daimon_client.inboxes.create(
username=username,
client_id=f'llamaindex-{int(time.time())}'
)
return json.dumps({
'email': inbox['result']['address'],
'inbox_id': inbox['result']['id']
})
# Tool 2: Check Messages
def check_messages(inbox_id: str) -> str:
"""Check for new messages in an inbox."""
messages = daimon_client.inboxes.messages.list(inbox_id)
return json.dumps([
{
'from': m['from'],
'subject': m['subject'],
'body': m.get('reply_body') or m['body'],
'received_at': m['received_at']
}
for m in messages[:20]
])
# Tool 3: Send Email
def send_email(inbox_id: str, to: str, subject: str, body: str) -> str:
"""Send an email from an inbox."""
try:
result = daimon_client.inboxes.send(inbox_id, {
'to': to,
'subject': subject,
'body': body
})
return json.dumps({'success': True, 'message_id': result['message_id']})
except Exception as error:
return json.dumps({'success': False, 'error': str(error)})
# Tool 4: Index Emails
def index_emails(inbox_id: str) -> VectorStoreIndex:
"""Create a vector index from inbox emails for semantic search."""
messages = daimon_client.inboxes.messages.list(inbox_id)
# Convert messages to documents
documents = [
Document(
text=f"From: {m['from']}\nSubject: {m['subject']}\n\n{m['body']}",
metadata={
'message_id': m['id'],
'from': m['from'],
'subject': m['subject'],
'received_at': m['received_at']
}
)
for m in messages
]
# Create vector index
index = VectorStoreIndex.from_documents(documents)
return index
# Create FunctionTools
create_inbox_tool = FunctionTool.from_defaults(fn=create_inbox)
check_messages_tool = FunctionTool.from_defaults(fn=check_messages)
send_email_tool = FunctionTool.from_defaults(fn=send_email)
# Create agent
agent = OpenAIAgent.from_tools(
[create_inbox_tool, check_messages_tool, send_email_tool],
verbose=True
)
# Example 1: Basic email operations
response = agent.chat("Create an inbox with username 'llama-agent' and check for messages")
print(response)
# Example 2: Email RAG pipeline
inbox_id = 'inb_abc123'
index = index_emails(inbox_id)
# Query engine for semantic search
query_engine = index.as_query_engine()
# Query emails with natural language
response = query_engine.query("What are the action items from my recent emails?")
print(response)
# Find specific information
response = query_engine.query("Who sent me emails about the Q4 budget?")
print(response)import {
Document,
VectorStoreIndex,
OpenAI,
OpenAIAgent
} from 'llamaindex';
import { DaimonClient } from 'daimon-email';
// Initialize clients
const daimonClient = new DaimonClient({ apiKey: process.env.DAIMON_API_KEY });
const llm = new OpenAI({ model: 'gpt-4' });
// Function tools
async function createInbox(username: string): Promise<string> {
const inbox = await daimonClient.inboxes.create({
username,
clientId: `llamaindex-${Date.now()}`
});
return JSON.stringify({
email: inbox.result.address,
inboxId: inbox.result.id
});
}
async function checkMessages(inboxId: string): Promise<string> {
const messages = await daimonClient.inboxes.messages.list(inboxId);
return JSON.stringify(
messages.slice(0, 20).map(m => ({
from: m.from,
subject: m.subject,
body: m.replyBody || m.body,
receivedAt: m.receivedAt
}))
);
}
async function indexEmails(inboxId: string): Promise<VectorStoreIndex> {
const messages = await daimonClient.inboxes.messages.list(inboxId);
// Convert to documents
const documents = messages.map(m => new Document({
text: `From: ${m.from}\nSubject: ${m.subject}\n\n${m.body}`,
metadata: {
messageId: m.id,
from: m.from,
subject: m.subject,
receivedAt: m.receivedAt
}
}));
// Create index
const index = await VectorStoreIndex.fromDocuments(documents);
return index;
}
// Create agent
const agent = new OpenAIAgent({
tools: [
{ name: 'create_inbox', fn: createInbox },
{ name: 'check_messages', fn: checkMessages }
],
llm
});
// Query emails
const inboxId = 'inb_abc123';
const index = await indexEmails(inboxId);
const queryEngine = index.asQueryEngine();
const response = await queryEngine.query('What are my recent action items?');
console.log(response.toString());Use Case: Email Search & Analysis
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
# Build comprehensive email index
def build_email_knowledge_base(inbox_id: str):
"""Index all emails for semantic search."""
messages = daimon_client.inboxes.messages.list(inbox_id)
documents = []
for msg in messages:
# Create rich document with metadata
doc = Document(
text=f"""
Email from {msg['from']}
Date: {msg['received_at']}
Subject: {msg['subject']}
{msg['body']}
Links: {', '.join([link['url'] for link in msg.get('links', [])])}
CTA Links: {', '.join([cta['url'] for cta in msg.get('cta_links', [])])}
""",
metadata={
'message_id': msg['id'],
'from': msg['from'],
'subject': msg['subject'],
'received_at': msg['received_at'],
'has_attachments': len(msg.get('attachments', [])) > 0
}
)
documents.append(doc)
# Create index
index = VectorStoreIndex.from_documents(documents)
return index
# Use the index
inbox_id = 'inb_abc123'
index = build_email_knowledge_base(inbox_id)
# Query with filters
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=5,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=retriever,
llm=Settings.llm
)
# Complex queries
response = query_engine.query(
"Summarize all emails from john@example.com about the project deadline"
)
print(response)
response = query_engine.query(
"Find all emails with confirmation links that I haven't clicked yet"
)
print(response)Use Case: Automated Email Assistant
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.tools import QueryEngineTool
# Build email query tool
inbox_id = 'inb_abc123'
email_index = build_email_knowledge_base(inbox_id)
email_query_engine = email_index.as_query_engine()
email_query_tool = QueryEngineTool.from_defaults(
query_engine=email_query_engine,
name="email_search",
description="Search and analyze emails using natural language queries"
)
# Create agent with email operations + search
agent = OpenAIAgent.from_tools(
[
create_inbox_tool,
check_messages_tool,
send_email_tool,
email_query_tool
],
verbose=True
)
# Agent can now search AND perform operations
response = agent.chat("""
Search my emails for any action items from my manager.
Then draft a response summarizing what I've completed.
""")
print(response)Advanced: Multi-Modal Email Analysis
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.schema import ImageDocument
# For emails with attachments (images)
def index_email_with_images(inbox_id: str):
messages = daimon_client.inboxes.messages.list(inbox_id)
documents = []
for msg in messages:
# Text document
text_doc = Document(
text=f"{msg['subject']}\n\n{msg['body']}",
metadata={'message_id': msg['id']}
)
documents.append(text_doc)
# Image documents (if attachments)
for attachment in msg.get('attachments', []):
if attachment['content_type'].startswith('image/'):
image_doc = ImageDocument(
image_url=attachment['url'],
metadata={
'message_id': msg['id'],
'filename': attachment['filename']
}
)
documents.append(image_doc)
# Multi-modal index
index = VectorStoreIndex.from_documents(documents)
return indexUse Case: Email Thread Analysis
# Analyze conversation threads
def analyze_thread(thread_id: str):
"""Analyze an email thread with LlamaIndex."""
thread = daimon_client.threads.get(thread_id)
messages = thread['messages']
# Create thread document
thread_text = "\n\n---\n\n".join([
f"From: {m['from']}\nDate: {m['received_at']}\n\n{m['body']}"
for m in messages
])
doc = Document(
text=thread_text,
metadata={
'thread_id': thread_id,
'participants': list(set([m['from'] for m in messages])),
'message_count': len(messages)
}
)
# Query the thread
index = VectorStoreIndex.from_documents([doc])
query_engine = index.as_query_engine()
# Summarize
summary = query_engine.query("Summarize this email thread in 3 bullet points")
print(summary)
# Extract action items
actions = query_engine.query("What are the action items from this thread?")
print(actions)
return {
'summary': str(summary),
'action_items': str(actions)
}Chat Engine for Email Conversations
from llama_index.core.chat_engine import ContextChatEngine
# Build chat engine over emails
inbox_id = 'inb_abc123'
index = build_email_knowledge_base(inbox_id)
chat_engine = index.as_chat_engine(
chat_mode="context",
verbose=True
)
# Have a conversation about emails
response = chat_engine.chat("What emails did I receive today?")
print(response)
response = chat_engine.chat("Are any of them urgent?")
print(response)
response = chat_engine.chat("Draft a response to the most urgent one")
print(response)Streaming Responses
from llama_index.core.response.streaming_response import StreamingResponse
# Stream query results
query_engine = index.as_query_engine(streaming=True)
response = query_engine.query("Summarize my emails from this week")
# Stream tokens as they arrive
for text in response.response_gen:
print(text, end="", flush=True)Persistent Index
from llama_index.core.storage.storage_context import StorageContext
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.vector_stores.chroma import ChromaVectorStore
import chromadb
# Use ChromaDB for persistent storage
chroma_client = chromadb.PersistentClient(path="./email_index")
chroma_collection = chroma_client.create_collection("emails")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# Build index with persistence
index = VectorStoreIndex.from_documents(
documents,
storage_context=storage_context
)
# Later: reload from disk
index = VectorStoreIndex.from_vector_store(
vector_store,
storage_context=storage_context
)