Code Examples
Ready-to-use examples showcasing Yohanun's intelligent processing and capabilities
Intelligent Customer Support Python
Sentiment Analysis
Auto-Routing
Memory
import requests
import json
class YohanunIntelligentSupport:
def __init__(self, api_key, base_url):
self.api_key = api_key
self.base_url = base_url
self.headers = {
'X-API-Key': api_key,
'Content-Type': 'application/json'
}
def process_customer_message(self, user_id, message, context=None):
"""Process customer message with AI chat system"""
payload = {
"message": message,
"context": {
"user_id": user_id,
"session_id": f"support_{user_id}",
**(context or {})
}
}
response = requests.post(
f"{self.base_url}/api/ai/chat",
headers=self.headers,
json=payload
)
return response.json()
def store_memory(self, content, context, metadata=None):
"""Store customer interaction in memory"""
memory_payload = {
"content": content,
"context": context,
"tags": ["support", "customer_interaction"],
"metadata": metadata or {}
}
response = requests.post(
f"{self.base_url}/api/memory/memories",
headers=self.headers,
json=memory_payload
)
return response.json()
# Usage Example
support = YohanunIntelligentSupport(
api_key="sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ",
base_url="http://localhost:8000"
)
# Process customer messages
result = support.process_customer_message(
"customer_123",
"I'm really frustrated! The app crashed during checkout!",
{"platform": "mobile", "app_version": "2.1.0"}
)
print("AI Response:", result["response"])
print("Intent:", result["intent"])
print("Model Used:", result["model_used"])
# Store the interaction in memory
support.store_memory(
content=f"Customer frustrated with app crash: {result['response']}",
context="support:customer_123",
metadata={"priority": "high", "issue_type": "crash"}
)
Memory & Learning System Python
Persistent Memory
Context Learning
class YohanunMemoryManager:
def __init__(self, api_key, base_url):
self.api_key = api_key
self.base_url = base_url
self.headers = {
'X-API-Key': api_key,
'Content-Type': 'application/json'
}
def store_memory(self, content, context, metadata=None):
"""Store contextual memory"""
payload = {
"content": content,
"context": context,
"tags": ["memory"],
"metadata": metadata or {}
}
response = requests.post(
f"{self.base_url}/api/memory/memories",
headers=self.headers,
json=payload
)
return response.json()
def search_memory(self, query, limit=5):
"""Search stored memories"""
payload = {
"query": query,
"limit": limit
}
response = requests.post(
f"{self.base_url}/api/memory/search",
headers=self.headers,
json=payload
)
return response.json()
def get_stats(self):
"""Get memory system statistics"""
response = requests.get(
f"{self.base_url}/api/memory/stats",
headers=self.headers
)
return response.json()
# Usage Example
memory = YohanunMemoryManager(
api_key="sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ",
base_url="http://localhost:8000"
)
# Store user preferences
result = memory.store_memory(
content="User Alice prefers morning meetings and uses iPhone",
context="user_preferences_alice",
metadata={"category": "preferences", "importance": "high"}
)
# Later, search for relevant context
search_results = memory.search_memory(
query="Alice meeting preferences",
limit=5
)
print("Memory stored:", result.get("memory_id", "Success"))
print("Found memories:", len(search_results.get("results", [])))
print("Search results:", search_results["results"])
Intelligent Chat Widget JavaScript
Real-time Intelligence
Browser Compatible
class YohanunIntelligentChat {
constructor(apiKey, baseUrl) {
this.apiKey = apiKey;
this.baseUrl = baseUrl;
this.userId = this.getUserId();
}
getUserId() {
let userId = localStorage.getItem('yohanun_user_id');
if (!userId) {
userId = 'user_' + Math.random().toString(36).substr(2, 9);
localStorage.setItem('yohanun_user_id', userId);
}
return userId;
}
async processMessage(message) {
const response = await fetch(`${this.baseUrl}/api/ai/chat`, {
method: 'POST',
headers: {
'X-API-Key': this.apiKey,
'Content-Type': 'application/json'
},
body: JSON.stringify({
message: message,
context: {
user_id: this.userId,
session_id: `chat_${Date.now()}`,
platform: 'web'
}
})
});
return await response.json();
}
async sendMessage(message) {
this.showTyping();
try {
const result = await this.processMessage(message);
this.addMessage(result.response, 'ai');
// Check if response indicates frustration or escalation needed
if (result.intent && result.intent.includes('frustrated')) {
this.showEscalationNotice();
}
} catch (error) {
console.error('Chat error:', error);
this.addMessage('Sorry, I encountered an error.', 'ai');
} finally {
this.hideTyping();
}
}
}
// Initialize the chat
const chat = new YohanunIntelligentChat(
'sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ',
'http://localhost:8000'
);
Rules Management JavaScript
Dynamic Rules
Automation
class YohanunRulesManager {
constructor(apiKey, clientId, baseUrl) {
this.apiKey = apiKey;
this.clientId = clientId;
this.baseUrl = baseUrl;
this.headers = {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
};
}
async createRule(ruleName, conditions, action, actionData) {
const rule = {
name: ruleName,
conditions: conditions,
action: action,
action_data: actionData
};
const response = await fetch(
`${this.baseUrl}/dev/app/${this.clientId}/rules`, {
method: 'POST',
headers: this.headers,
body: JSON.stringify(rule)
});
return await response.json();
}
async getRules() {
const response = await fetch(
`${this.baseUrl}/dev/app/${this.clientId}/rules`, {
headers: this.headers
});
return await response.json();
}
async activateRule(ruleId) {
const response = await fetch(
`${this.baseUrl}/dev/app/${this.clientId}/rules/${ruleId}/activate`, {
method: 'PUT',
headers: this.headers
});
return await response.json();
}
}
// Example: Create priority escalation rule
const rulesManager = new YohanunRulesManager(
'yohanun_your_api_key_here',
'your_client_id',
'https://api.yohanun.ai'
);
rulesManager.createRule(
"Priority Customer Escalation",
{
"sentiment": "negative",
"confidence": {"$gte": 0.8}
},
"escalate_to_human",
{
"team": "senior_support",
"priority": "high"
}
);
Quick API Testing cURL
1. AI Chat Request
$ curl -X POST http://localhost:8000/api/ai/chat \
-H "X-API-Key: sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ" \
-H "Content-Type: application/json" \
-d '{
"message": "I need help with a billing question",
"context": {
"user_id": "customer_123",
"session_id": "support_session_456"
}
}'
2. Store Memory
$ curl -X POST http://localhost:8000/api/memory/memories \
-H "X-API-Key: sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ" \
-H "Content-Type: application/json" \
-d '{
"content": "Customer prefers email notifications over SMS",
"context": "user_preferences_customer_123",
"tags": ["preferences", "notifications"],
"metadata": {
"category": "preferences",
"importance": "medium"
}
}'
3. Search Memory
$ curl -X POST http://localhost:8000/api/memory/search \
-H "X-API-Key: sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ" \
-H "Content-Type: application/json" \
-d '{
"query": "notification preferences",
"limit": 5
}'
4. Get Memory Statistics
$ curl -X GET http://localhost:8000/api/memory/stats \
-H "X-API-Key: sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ"
Production-Ready Intelligent Application Advanced
Full Stack Integration
Production Ready
Enterprise Features
AI + Memory
import asyncio
import logging
import aiohttp
import json
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List, Optional
# Production-ready configuration
@dataclass
class SILEConfig:
api_key: str
base_url: str = "http://localhost:8000"
timeout: int = 30
max_retries: int = 3
log_level: str = "INFO"
class IntelligentCustomerPlatform:
"""
Production-ready intelligent customer platform powered by SILE
Features: AI chat, persistent memory, sentiment analysis, auto-escalation
"""
def __init__(self, config: SILEConfig):
self.config = config
self.session = None
self.logger = self._setup_logging()
self.headers = {
'X-API-Key': config.api_key,
'Content-Type': 'application/json'
}
def _setup_logging(self) -> logging.Logger:
"""Configure structured logging for production"""
logging.basicConfig(
level=getattr(logging, self.config.log_level),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
return logging.getLogger('intelligent_platform')
async def initialize(self):
"""Initialize async session and validate connection"""
self.session = aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=self.config.timeout),
headers=self.headers
)
# Validate API connection
try:
async with self.session.get(f"{self.config.base_url}/api/health") as response:
if response.status == 200:
self.logger.info("ā
SILE API connection validated")
else:
raise ConnectionError(f"API health check failed: {response.status}")
except Exception as e:
self.logger.error(f"ā Failed to connect to SILE API: {e}")
raise
async def process_customer_message(
self,
customer_id: str,
message: str,
context: Optional[Dict] = None
) -> Dict:
"""Process customer message with full AI intelligence"""
self.logger.info(f"Processing message from customer {customer_id}")
try:
# Step 1: Get relevant memories for context
memories = await self.search_customer_memories(customer_id, message)
# Step 2: Process with AI chat system
chat_payload = {
"message": message,
"context": {
"user_id": customer_id,
"session_id": f"support_{customer_id}_{datetime.now().timestamp()}",
"platform": "customer_platform",
"relevant_memories": memories,
**(context or {})
}
}
async with self.session.post(
f"{self.config.base_url}/api/ai/chat",
json=chat_payload
) as response:
if response.status == 200:
result = await response.json()
else:
raise Exception(f"AI chat failed: {response.status}")
# Step 3: Store interaction in memory
await self.store_interaction_memory(customer_id, message, result)
# Step 4: Check for escalation needs
if self._needs_escalation(result):
await self.trigger_escalation(customer_id, result)
self.logger.info(f"ā
Successfully processed message for {customer_id}")
return {
"success": True,
"response": result["response"],
"intent": result.get("intent"),
"model_used": result.get("model_used"),
"escalated": self._needs_escalation(result),
"timestamp": datetime.now().isoformat()
}
except Exception as e:
self.logger.error(f"ā Error processing message: {e}")
return {
"success": False,
"error": str(e),
"fallback_response": "I'm experiencing technical difficulties. Let me connect you with a human agent."
}
async def search_customer_memories(self, customer_id: str, query: str) -> List[Dict]:
"""Search for relevant customer memories to enhance context"""
try:
search_payload = {
"query": f"customer:{customer_id} {query}",
"limit": 5
}
async with self.session.post(
f"{self.config.base_url}/api/memory/search",
json=search_payload
) as response:
if response.status == 200:
data = await response.json()
return data.get("results", [])
else:
self.logger.warning(f"Memory search failed: {response.status}")
return []
except Exception as e:
self.logger.warning(f"Memory search error: {e}")
return []
async def store_interaction_memory(self, customer_id: str, message: str, ai_result: Dict):
"""Store customer interaction for future context"""
memory_payload = {
"content": f"Customer: {message}\nAI: {ai_result.get('response', '')}",
"context": f"customer_support_{customer_id}",
"tags": ["support", "interaction", ai_result.get("intent", "general")],
"metadata": {
"customer_id": customer_id,
"intent": ai_result.get("intent"),
"model_used": ai_result.get("model_used"),
"timestamp": datetime.now().isoformat()
}
}
try:
async with self.session.post(
f"{self.config.base_url}/api/memory/memories",
json=memory_payload
) as response:
if response.status == 200:
self.logger.debug(f"Stored memory for customer {customer_id}")
except Exception as e:
self.logger.warning(f"Failed to store memory: {e}")
def _needs_escalation(self, ai_result: Dict) -> bool:
"""Determine if interaction needs human escalation"""
intent = ai_result.get("intent", "").lower()
escalation_intents = ["frustrated", "angry", "complaint", "refund", "cancel"]
return any(trigger in intent for trigger in escalation_intents)
async def trigger_escalation(self, customer_id: str, ai_result: Dict):
"""Trigger escalation to human agent"""
self.logger.info(f"šØ Escalating customer {customer_id} to human agent")
# In production: integrate with ticketing system, Slack, etc.
async def get_memory_stats(self) -> Dict:
"""Get memory system statistics"""
async with self.session.get(f"{self.config.base_url}/api/memory/stats") as response:
return await response.json() if response.status == 200 else {}
async def close(self):
"""Clean up resources"""
if self.session:
await self.session.close()
self.logger.info("š Platform shut down cleanly")
# Production usage example
async def main():
"""Example of production deployment"""
config = SILEConfig(
api_key="sile_nidgbNEmjc9zmPx6lo9VLwTKGR9qHmuPgphMpeh3ltQ",
base_url="http://localhost:8000",
timeout=30
)
platform = IntelligentCustomerPlatform(config)
try:
await platform.initialize()
# Example customer interactions
scenarios = [
("cust_001", "Hi, I'm having trouble logging into my account"),
("cust_002", "I'm really frustrated! My order is late and customer service is terrible!"),
("cust_001", "Actually, I figured out the login issue. Thanks!")
]
for customer_id, message in scenarios:
print(f"\nšÆ Processing: {customer_id}: {message}")
result = await platform.process_customer_message(customer_id, message)
if result["success"]:
print(f"š¤ AI Response: {result['response']}")
print(f"šÆ Intent: {result['intent']}")
print(f"ā” Model: {result['model_used']}")
if result["escalated"]:
print(f"šØ ESCALATED to human agent")
else:
print(f"ā Error: {result['error']}")
print(f"š Fallback: {result['fallback_response']}")
# Show memory statistics
stats = await platform.get_memory_stats()
print(f"\nš Memory Stats: {stats.get('total_memories', 0)} memories stored")
finally:
await platform.close()
if __name__ == "__main__":
asyncio.run(main())
Production Architecture
šļø Application Flow
1
Customer message received
2
Search relevant memories
3
AI processes with context
4
Store interaction memory
5
Auto-escalate if needed
ā” Key Features
AI-powered responses
Persistent customer memory
Intent detection
Auto-escalation
Error handling
Async performance