ExamplesInbound
Support Bot
AI-powered customer support via email
Overview
A support bot provides AI-powered customer support via email, maintaining conversation context across threads and escalating to humans when needed. This example demonstrates thread management, context retention, and intelligent response generation.
Info
This pattern handles 70-80% of common support queries autonomously, reducing response time from hours to seconds.
How It Works
- Receive support requests via webhook
- Extract conversation context from thread history
- Use LLM to generate helpful responses
- Track conversation state and escalate complex issues
- Maintain thread continuity for multi-turn conversations
Complete Implementation
import { DaimonClient } from 'daimon-email';
import OpenAI from 'openai';
import express from 'express';
const client = new DaimonClient({ apiKey: process.env.DAIMON_API_KEY });
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const app = express();
app.use(express.json());
interface ConversationContext {
threadId: string;
messageHistory: any[];
issueType: string;
resolved: boolean;
escalated: boolean;
}
// Step 1: Set up support inbox and webhook
async function setupSupportBot() {
const inbox = await client.inboxes.create({
username: 'support',
clientId: 'support-bot-v1'
});
console.log(`Support inbox: ${inbox.address}`);
const authedClient = new DaimonClient({ apiKey: inbox.apiKey });
const webhook = await authedClient.webhooks.create({
endpointUrl: 'https://your-support-bot.com/webhook',
events: ['message.received'],
inboxId: inbox.id
});
return { inbox, authedClient };
}
// Step 2: Handle incoming support requests
app.post('/webhook', async (req, res) => {
const { event, message } = req.body;
if (event === 'message.received') {
await handleSupportRequest(message);
}
res.status(200).send('OK');
});
// Step 3: Process support request with context
async function handleSupportRequest(message: any) {
console.log(`Support request from ${message.from}: ${message.subject}`);
// Get full thread context
const context = await getConversationContext(message);
// Generate AI response
const response = await generateSupportResponse(message, context);
// Check if we should escalate
if (response.shouldEscalate) {
await escalateToHuman(message, context, response.reason);
return;
}
// Send AI response
await client.inboxes.messages.reply(message.inboxId, message.id, {
body: response.text,
clientId: `support-reply-${message.id}`
});
// Update thread metadata
await client.threads.update(message.threadId, {
metadata: {
last_bot_response: new Date().toISOString(),
turns: context.messageHistory.length + 1,
resolved: response.resolved
}
});
console.log(`Sent support response to ${message.from}`);
}
// Step 4: Get conversation context from thread
async function getConversationContext(message: any): Promise<ConversationContext> {
// Get thread history for context
const thread = await client.threads.get(message.threadId, {
includeMessages: true
});
const messageHistory = thread.messages.map(msg => ({
from: msg.from,
body: msg.replyBody || msg.body,
timestamp: msg.receivedAt
}));
return {
threadId: message.threadId,
messageHistory,
issueType: thread.metadata?.issueType || 'unknown',
resolved: thread.metadata?.resolved || false,
escalated: thread.metadata?.escalated || false
};
}
// Step 5: Generate intelligent response
async function generateSupportResponse(message: any, context: ConversationContext) {
const conversationHistory = context.messageHistory
.map(msg => `${msg.from}: ${msg.body}`)
.join('\n\n');
const prompt = `You are a helpful customer support assistant. Based on this conversation:
Previous messages:
${conversationHistory}
New message from customer:
${message.replyBody || message.body}
Generate a helpful response. Also determine:
1. Can you fully resolve this issue? (true/false)
2. Should this be escalated to a human? (true/false)
3. If escalating, why?
Return JSON:
{
"text": "your response here",
"resolved": true/false,
"shouldEscalate": true/false,
"reason": "escalation reason or null"
}`;
const completion = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{
role: 'system',
content: 'You are a professional customer support agent. Be helpful, concise, and empathetic.'
},
{
role: 'user',
content: prompt
}
],
response_format: { type: 'json_object' }
});
return JSON.parse(completion.choices[0].message.content);
}
// Step 6: Escalate complex issues to humans
async function escalateToHuman(message: any, context: ConversationContext, reason: string) {
console.log(`Escalating to human: ${reason}`);
// Send escalation notification
await client.inboxes.send(message.inboxId, {
to: 'human-support@yourcompany.com',
subject: `[ESCALATED] ${message.subject}`,
body: `Support bot escalated this conversation.
Reason: ${reason}
Original message:
From: ${message.from}
Subject: ${message.subject}
Thread: ${context.threadId}
View full conversation: https://app.daimon.email/threads/${context.threadId}`,
clientId: `escalation-${message.id}`
});
// Notify customer
await client.inboxes.messages.reply(message.inboxId, message.id, {
body: `Thank you for your patience. I've forwarded your request to our specialist team. A human agent will respond within 2 hours.`,
clientId: `escalation-notice-${message.id}`
});
// Mark thread as escalated
await client.threads.update(context.threadId, {
metadata: {
escalated: true,
escalated_at: new Date().toISOString(),
escalation_reason: reason
}
});
}
// Start the support bot
setupSupportBot().then(() => {
app.listen(3000, () => {
console.log('Support bot running on port 3000');
});
});from daimon_email import DaimonClient
from openai import OpenAI
from flask import Flask, request
import os
import json
from datetime import datetime
from typing import Dict, List
client = DaimonClient(api_key=os.environ.get('DAIMON_API_KEY'))
openai_client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
app = Flask(__name__)
# Step 1: Set up support inbox and webhook
def setup_support_bot():
inbox = client.inboxes.create(
username='support',
client_id='support-bot-v1'
)
print(f"Support inbox: {inbox.address}")
authed_client = DaimonClient(api_key=inbox.api_key)
webhook = authed_client.webhooks.create(
endpoint_url='https://your-support-bot.com/webhook',
events=['message.received'],
inbox_id=inbox.id
)
return inbox, authed_client
# Step 2: Handle incoming support requests
@app.post('/webhook')
def handle_webhook():
data = request.json
event = data.get('event')
message = data.get('message')
if event == 'message.received':
handle_support_request(message)
return 'OK', 200
# Step 3: Process support request with context
def handle_support_request(message: Dict):
print(f"Support request from {message['from']}: {message['subject']}")
# Get full thread context
context = get_conversation_context(message)
# Generate AI response
response = generate_support_response(message, context)
# Check if we should escalate
if response['shouldEscalate']:
escalate_to_human(message, context, response['reason'])
return
# Send AI response
client.inboxes.messages.reply(
message['inbox_id'],
message['id'],
body=response['text'],
client_id=f"support-reply-{message['id']}"
)
# Update thread metadata
client.threads.update(message['thread_id'], {
'metadata': {
'last_bot_response': datetime.now().isoformat(),
'turns': len(context['message_history']) + 1,
'resolved': response['resolved']
}
})
print(f"Sent support response to {message['from']}")
# Step 4: Get conversation context from thread
def get_conversation_context(message: Dict) -> Dict:
# Get thread history for context
thread = client.threads.get(
message['thread_id'],
include_messages=True
)
message_history = [
{
'from': msg['from'],
'body': msg.get('reply_body') or msg.get('body'),
'timestamp': msg['received_at']
}
for msg in thread['messages']
]
return {
'thread_id': message['thread_id'],
'message_history': message_history,
'issue_type': thread.get('metadata', {}).get('issue_type', 'unknown'),
'resolved': thread.get('metadata', {}).get('resolved', False),
'escalated': thread.get('metadata', {}).get('escalated', False)
}
# Step 5: Generate intelligent response
def generate_support_response(message: Dict, context: Dict):
conversation_history = '\n\n'.join([
f"{msg['from']}: {msg['body']}"
for msg in context['message_history']
])
prompt = f"""You are a helpful customer support assistant. Based on this conversation:
Previous messages:
{conversation_history}
New message from customer:
{message.get('reply_body') or message.get('body')}
Generate a helpful response. Also determine:
1. Can you fully resolve this issue? (true/false)
2. Should this be escalated to a human? (true/false)
3. If escalating, why?
Return JSON:
{{
"text": "your response here",
"resolved": true/false,
"shouldEscalate": true/false,
"reason": "escalation reason or null"
}}"""
completion = openai_client.chat.completions.create(
model='gpt-4',
messages=[
{
'role': 'system',
'content': 'You are a professional customer support agent. Be helpful, concise, and empathetic.'
},
{
'role': 'user',
'content': prompt
}
],
response_format={'type': 'json_object'}
)
return json.loads(completion.choices[0].message.content)
# Step 6: Escalate complex issues to humans
def escalate_to_human(message: Dict, context: Dict, reason: str):
print(f"Escalating to human: {reason}")
# Send escalation notification
client.inboxes.send(message['inbox_id'], {
'to': 'human-support@yourcompany.com',
'subject': f"[ESCALATED] {message['subject']}",
'body': f"""Support bot escalated this conversation.
Reason: {reason}
Original message:
From: {message['from']}
Subject: {message['subject']}
Thread: {context['thread_id']}
View full conversation: https://app.daimon.email/threads/{context['thread_id']}""",
'client_id': f"escalation-{message['id']}"
})
# Notify customer
client.inboxes.messages.reply(
message['inbox_id'],
message['id'],
body="Thank you for your patience. I've forwarded your request to our specialist team. A human agent will respond within 2 hours.",
client_id=f"escalation-notice-{message['id']}"
)
# Mark thread as escalated
client.threads.update(context['thread_id'], {
'metadata': {
'escalated': True,
'escalated_at': datetime.now().isoformat(),
'escalation_reason': reason
}
})
# Start the support bot
if __name__ == '__main__':
setup_support_bot()
app.run(port=3000)Key Features
- Thread-aware: Maintains conversation context across multiple messages
- Smart escalation: Recognizes when human intervention is needed
- Metadata tracking: Stores conversation state in thread metadata
- Idempotent responses: Prevents duplicate replies