AI Integration System
Overview
PadawanForge implements a sophisticated AI integration system that provides intelligent content generation, NPC interactions, and adaptive learning capabilities. This system leverages multiple AI models and providers to deliver personalized experiences and dynamic content creation.
Architecture
Core Components
- AIService: Central AI service management
- NpcAgent: Intelligent NPC behavior and interactions
- Model Management: Multi-provider AI model handling
- Content Generation: Dynamic statement and content creation
- Adaptive Learning: Performance-based difficulty adjustment
AI Features
- Multi-Provider Support: OpenAI, Anthropic, Cloudflare AI, and more
- Model Selection: Adaptive model choice based on task requirements
- Content Generation: Educational statements and interactive content
- NPC Intelligence: Dynamic NPC behavior and conversations
- Performance Optimization: Efficient AI request handling
AIService Implementation
Core AIService Class
import { AIService, AIModel, AIProvider, AIRequest, AIResponse } from '@/lib/services/AIService';
// Central AI service for managing multiple providers and models
class AIService {
private providers: Map<string, AIProvider>;
private models: Map<string, AIModel>;
private defaultProvider: string;
private circuitBreaker: CircuitBreaker;
constructor(config: AIServiceConfig) {
this.providers = new Map();
this.models = new Map();
this.defaultProvider = config.defaultProvider;
this.circuitBreaker = new CircuitBreaker();
this.initializeProviders(config.providers);
}
// Initialize AI providers
private initializeProviders(providerConfigs: AIProviderConfig[]): void {
for (const config of providerConfigs) {
const provider = this.createProvider(config);
this.providers.set(config.name, provider);
// Register provider's models
for (const model of config.models) {
this.models.set(model.id, {
...model,
provider: config.name
});
}
}
}
// Create AI provider instance
private createProvider(config: AIProviderConfig): AIProvider {
switch (config.type) {
case 'openai':
return new OpenAIProvider(config);
case 'anthropic':
return new AnthropicProvider(config);
case 'cloudflare':
return new CloudflareAIProvider(config);
case 'local':
return new LocalAIProvider(config);
default:
throw new Error(`Unsupported provider type: ${config.type}`);
}
}
// Generate content using AI
async generateContent(request: AIRequest): Promise<AIResponse> {
const model = this.selectModel(request);
const provider = this.providers.get(model.provider);
if (!provider) {
throw new Error(`Provider not found: ${model.provider}`);
}
try {
const response = await this.circuitBreaker.execute(() =>
provider.generate(request, model)
);
return this.processResponse(response, request);
} catch (error) {
return this.handleError(error, request);
}
}
// Select appropriate model for request
private selectModel(request: AIRequest): AIModel {
// Check if specific model is requested
if (request.modelId) {
const model = this.models.get(request.modelId);
if (model) return model;
}
// Select model based on task type
switch (request.taskType) {
case 'content_generation':
return this.selectContentModel(request);
case 'conversation':
return this.selectConversationModel(request);
case 'analysis':
return this.selectAnalysisModel(request);
default:
return this.getDefaultModel();
}
}
// Select model for content generation
private selectContentModel(request: AIRequest): AIModel {
const contentModels = Array.from(this.models.values())
.filter(model => model.capabilities.includes('content_generation'))
.sort((a, b) => b.performance - a.performance);
return contentModels[0] || this.getDefaultModel();
}
// Select model for conversation
private selectConversationModel(request: AIRequest): AIModel {
const conversationModels = Array.from(this.models.values())
.filter(model => model.capabilities.includes('conversation'))
.sort((a, b) => b.performance - a.performance);
return conversationModels[0] || this.getDefaultModel();
}
// Process AI response
private processResponse(response: any, request: AIRequest): AIResponse {
return {
content: response.content,
model: response.model,
provider: response.provider,
usage: response.usage,
metadata: {
taskType: request.taskType,
timestamp: new Date(),
requestId: request.id
}
};
}
// Handle AI errors
private handleError(error: any, request: AIRequest): AIResponse {
console.error('AI request failed:', error);
// Log error for monitoring
this.logError(error, request);
// Return fallback response
return {
content: this.getFallbackContent(request),
model: 'fallback',
provider: 'system',
usage: { tokens: 0, cost: 0 },
metadata: {
taskType: request.taskType,
timestamp: new Date(),
requestId: request.id,
error: error.message
}
};
}
}
AI Provider Implementations
// OpenAI Provider
class OpenAIProvider implements AIProvider {
private config: OpenAIConfig;
private client: OpenAI;
constructor(config: OpenAIConfig) {
this.config = config;
this.client = new OpenAI({
apiKey: config.apiKey,
baseURL: config.baseURL
});
}
async generate(request: AIRequest, model: AIModel): Promise<AIResponse> {
const completion = await this.client.chat.completions.create({
model: model.id,
messages: this.formatMessages(request.messages),
temperature: request.temperature || 0.7,
max_tokens: request.maxTokens || 1000,
stream: false
});
return {
content: completion.choices[0].message.content,
model: model.id,
provider: 'openai',
usage: {
tokens: completion.usage?.total_tokens || 0,
cost: this.calculateCost(completion.usage, model)
}
};
}
private formatMessages(messages: AIMessage[]): any[] {
return messages.map(msg => ({
role: msg.role,
content: msg.content
}));
}
private calculateCost(usage: any, model: AIModel): number {
// Calculate cost based on token usage and model pricing
const tokenCost = usage.total_tokens * model.costPerToken;
return tokenCost;
}
}
// Cloudflare AI Provider
class CloudflareAIProvider implements AIProvider {
private config: CloudflareAIConfig;
private ai: any;
constructor(config: CloudflareAIConfig) {
this.config = config;
this.ai = config.ai;
}
async generate(request: AIRequest, model: AIModel): Promise<AIResponse> {
const response = await this.ai.run(model.id, {
messages: request.messages,
temperature: request.temperature || 0.7,
max_tokens: request.maxTokens || 1000
});
return {
content: response.response,
model: model.id,
provider: 'cloudflare',
usage: {
tokens: response.usage?.total_tokens || 0,
cost: 0 // Cloudflare AI is included in Workers pricing
}
};
}
}
NpcAgent Implementation
Core NpcAgent Class
import { NpcAgent, NPCConfig, NPCResponse, NPCContext } from '@/lib/services/NpcAgent';
// Intelligent NPC agent for dynamic interactions
class NpcAgent {
private config: NPCConfig;
private aiService: AIService;
private memory: NPCMemory;
private personality: NPCPersonality;
constructor(config: NPCConfig, aiService: AIService) {
this.config = config;
this.aiService = aiService;
this.memory = new NPCMemory(config.memorySize);
this.personality = new NPCPersonality(config.personality);
}
// Generate NPC response
async generateResponse(context: NPCContext): Promise<NPCResponse> {
try {
// Update memory with context
this.memory.addInteraction(context);
// Build conversation history
const history = this.memory.getRecentInteractions(10);
// Generate response using AI
const aiRequest: AIRequest = {
id: generateUUID(),
taskType: 'conversation',
messages: this.buildMessages(context, history),
temperature: this.personality.getResponseTemperature(),
maxTokens: 500
};
const aiResponse = await this.aiService.generateContent(aiRequest);
// Process and format response
const response = this.processResponse(aiResponse, context);
// Update memory with response
this.memory.addResponse(response);
return response;
} catch (error) {
return this.generateFallbackResponse(context);
}
}
// Build messages for AI request
private buildMessages(context: NPCContext, history: NPCInteraction[]): AIMessage[] {
const messages: AIMessage[] = [];
// Add system prompt
messages.push({
role: 'system',
content: this.buildSystemPrompt()
});
// Add conversation history
for (const interaction of history) {
messages.push({
role: 'user',
content: interaction.userMessage
});
if (interaction.npcResponse) {
messages.push({
role: 'assistant',
content: interaction.npcResponse
});
}
}
// Add current context
messages.push({
role: 'user',
content: context.userMessage
});
return messages;
}
// Build system prompt for NPC
private buildSystemPrompt(): string {
return `
You are ${this.config.name}, a ${this.config.role} in the PadawanForge cognitive training platform.
Personality: ${this.personality.getDescription()}
Knowledge: ${this.config.knowledge}
Goals: ${this.config.goals}
Guidelines:
- Stay in character at all times
- Provide helpful and educational responses
- Adapt your communication style to the user's level
- Use your knowledge to enhance the learning experience
- Be encouraging and supportive
- Keep responses concise but informative
Current context: ${this.getCurrentContext()}
`.trim();
}
// Process AI response
private processResponse(aiResponse: AIResponse, context: NPCContext): NPCResponse {
return {
id: generateUUID(),
content: aiResponse.content,
npcId: this.config.id,
timestamp: new Date(),
context: {
userLevel: context.userLevel,
topic: context.topic,
mood: this.analyzeMood(aiResponse.content)
},
metadata: {
model: aiResponse.model,
provider: aiResponse.provider,
tokens: aiResponse.usage.tokens
}
};
}
// Analyze response mood
private analyzeMood(content: string): string {
const positiveWords = ['great', 'excellent', 'wonderful', 'amazing', 'fantastic'];
const negativeWords = ['difficult', 'challenging', 'hard', 'confusing', 'frustrating'];
const lowerContent = content.toLowerCase();
const positiveCount = positiveWords.filter(word => lowerContent.includes(word)).length;
const negativeCount = negativeWords.filter(word => lowerContent.includes(word)).length;
if (positiveCount > negativeCount) return 'positive';
if (negativeCount > positiveCount) return 'negative';
return 'neutral';
}
// Generate fallback response
private generateFallbackResponse(context: NPCContext): NPCResponse {
const fallbackResponses = [
"I'm having trouble processing that right now. Could you rephrase your question?",
"Let me think about that for a moment...",
"That's an interesting question. Let me help you with that.",
"I'd be happy to help you with that topic."
];
const randomResponse = fallbackResponses[Math.floor(Math.random() * fallbackResponses.length)];
return {
id: generateUUID(),
content: randomResponse,
npcId: this.config.id,
timestamp: new Date(),
context: {
userLevel: context.userLevel,
topic: context.topic,
mood: 'neutral'
},
metadata: {
model: 'fallback',
provider: 'system',
tokens: 0
}
};
}
}
NPC Memory System
// NPC memory for maintaining conversation context
class NPCMemory {
private interactions: NPCInteraction[];
private maxSize: number;
constructor(maxSize: number = 100) {
this.interactions = [];
this.maxSize = maxSize;
}
// Add interaction to memory
addInteraction(context: NPCContext): void {
const interaction: NPCInteraction = {
id: generateUUID(),
timestamp: new Date(),
userMessage: context.userMessage,
userLevel: context.userLevel,
topic: context.topic,
npcResponse: null
};
this.interactions.push(interaction);
this.trimMemory();
}
// Add NPC response to memory
addResponse(response: NPCResponse): void {
if (this.interactions.length > 0) {
const lastInteraction = this.interactions[this.interactions.length - 1];
lastInteraction.npcResponse = response.content;
}
}
// Get recent interactions
getRecentInteractions(count: number): NPCInteraction[] {
return this.interactions.slice(-count);
}
// Get interactions by topic
getInteractionsByTopic(topic: string): NPCInteraction[] {
return this.interactions.filter(interaction =>
interaction.topic === topic
);
}
// Get user level progression
getUserLevelProgression(): UserLevelProgression {
const levels = this.interactions.map(i => i.userLevel);
const uniqueLevels = [...new Set(levels)];
return {
currentLevel: levels[levels.length - 1] || 1,
levelsSeen: uniqueLevels,
progressionRate: this.calculateProgressionRate()
};
}
// Trim memory to prevent overflow
private trimMemory(): void {
if (this.interactions.length > this.maxSize) {
this.interactions = this.interactions.slice(-this.maxSize);
}
}
// Calculate user progression rate
private calculateProgressionRate(): number {
if (this.interactions.length < 2) return 0;
const recentInteractions = this.interactions.slice(-10);
const levelChanges = recentInteractions.filter((interaction, index) => {
if (index === 0) return false;
return interaction.userLevel > recentInteractions[index - 1].userLevel;
});
return levelChanges.length / recentInteractions.length;
}
}
NPC Personality System
// NPC personality for consistent character behavior
class NPCPersonality {
private traits: PersonalityTrait[];
private communicationStyle: CommunicationStyle;
private knowledgeAreas: string[];
constructor(config: NPCPersonalityConfig) {
this.traits = config.traits;
this.communicationStyle = config.communicationStyle;
this.knowledgeAreas = config.knowledgeAreas;
}
// Get personality description
getDescription(): string {
const traitDescriptions = this.traits.map(trait => trait.description);
const styleDescription = this.communicationStyle.description;
return `${traitDescriptions.join(', ')}. ${styleDescription}`;
}
// Get response temperature based on personality
getResponseTemperature(): number {
const baseTemperature = 0.7;
const creativityTrait = this.traits.find(t => t.name === 'creativity');
if (creativityTrait) {
return baseTemperature + (creativityTrait.intensity * 0.3);
}
return baseTemperature;
}
// Adapt communication to user level
adaptToUserLevel(userLevel: number): string {
if (userLevel <= 3) {
return "Use simple language and provide clear explanations.";
} else if (userLevel <= 7) {
return "Use moderate complexity and encourage deeper thinking.";
} else {
return "Use advanced concepts and challenge the user's understanding.";
}
}
// Get personality-based response modifiers
getResponseModifiers(): ResponseModifiers {
return {
enthusiasm: this.getEnthusiasmLevel(),
formality: this.getFormalityLevel(),
detail: this.getDetailLevel()
};
}
private getEnthusiasmLevel(): number {
const enthusiasmTrait = this.traits.find(t => t.name === 'enthusiasm');
return enthusiasmTrait?.intensity || 0.5;
}
private getFormalityLevel(): number {
const formalityTrait = this.traits.find(t => t.name === 'formality');
return formalityTrait?.intensity || 0.5;
}
private getDetailLevel(): number {
const detailTrait = this.traits.find(t => t.name === 'detail');
return detailTrait?.intensity || 0.5;
}
}
Content Generation System
Statement Generation
// Generate educational statements
class StatementGenerator {
private aiService: AIService;
private topics: Topic[];
private difficultyLevels: DifficultyLevel[];
constructor(aiService: AIService) {
this.aiService = aiService;
this.topics = this.loadTopics();
this.difficultyLevels = this.loadDifficultyLevels();
}
// Generate statements for a topic
async generateStatements(topic: string, difficulty: string, count: number = 10): Promise<Statement[]> {
const statements: Statement[] = [];
for (let i = 0; i < count; i++) {
const statement = await this.generateSingleStatement(topic, difficulty);
if (statement) {
statements.push(statement);
}
}
return statements;
}
// Generate single statement
private async generateSingleStatement(topic: string, difficulty: string): Promise<Statement | null> {
const prompt = this.buildStatementPrompt(topic, difficulty);
const aiRequest: AIRequest = {
id: generateUUID(),
taskType: 'content_generation',
messages: [
{
role: 'system',
content: prompt
}
],
temperature: 0.8,
maxTokens: 200
};
try {
const response = await this.aiService.generateContent(aiRequest);
return this.parseStatement(response.content, topic, difficulty);
} catch (error) {
console.error('Failed to generate statement:', error);
return null;
}
}
// Build statement generation prompt
private buildStatementPrompt(topic: string, difficulty: string): string {
const difficultyConfig = this.difficultyLevels.find(d => d.name === difficulty);
return `
Generate a ${difficulty} difficulty true/false statement about ${topic}.
Requirements:
- The statement should be educational and factual
- Difficulty level: ${difficultyConfig?.description || difficulty}
- Target audience: Students learning about ${topic}
- Statement should be clear and unambiguous
- Include the correct answer (true or false)
Format your response as:
Statement: [The statement text]
Answer: [true/false]
Explanation: [Brief explanation of why the answer is correct]
Generate one statement now:
`.trim();
}
// Parse AI response into statement
private parseStatement(content: string, topic: string, difficulty: string): Statement | null {
try {
const lines = content.split('\n');
let statementText = '';
let answer = '';
let explanation = '';
for (const line of lines) {
if (line.startsWith('Statement:')) {
statementText = line.replace('Statement:', '').trim();
} else if (line.startsWith('Answer:')) {
answer = line.replace('Answer:', '').trim().toLowerCase();
} else if (line.startsWith('Explanation:')) {
explanation = line.replace('Explanation:', '').trim();
}
}
if (statementText && answer && (answer === 'true' || answer === 'false')) {
return {
id: generateUUID(),
text: statementText,
answer: answer === 'true',
explanation,
topic,
difficulty,
createdAt: new Date(),
metadata: {
generated: true,
aiModel: 'gpt-4',
confidence: 0.9
}
};
}
} catch (error) {
console.error('Failed to parse statement:', error);
}
return null;
}
}
Model Management
Model Selection and Optimization
// AI model management and optimization
class ModelManager {
private models: Map<string, AIModel>;
private performanceMetrics: Map<string, ModelPerformance>;
private costTracker: CostTracker;
constructor() {
this.models = new Map();
this.performanceMetrics = new Map();
this.costTracker = new CostTracker();
}
// Register a new model
registerModel(model: AIModel): void {
this.models.set(model.id, model);
this.performanceMetrics.set(model.id, {
totalRequests: 0,
successfulRequests: 0,
averageResponseTime: 0,
totalCost: 0,
lastUsed: null
});
}
// Select best model for task
selectModel(taskType: string, requirements: ModelRequirements): AIModel {
const candidates = Array.from(this.models.values())
.filter(model => this.meetsRequirements(model, requirements))
.sort((a, b) => this.calculateScore(b, taskType) - this.calculateScore(a, taskType));
return candidates[0] || this.getDefaultModel();
}
// Check if model meets requirements
private meetsRequirements(model: AIModel, requirements: ModelRequirements): boolean {
return (
model.capabilities.includes(requirements.taskType) &&
model.maxTokens >= requirements.maxTokens &&
model.costPerToken <= requirements.maxCostPerToken
);
}
// Calculate model score for selection
private calculateScore(model: AIModel, taskType: string): number {
const performance = this.performanceMetrics.get(model.id);
if (!performance) return 0;
const successRate = performance.successfulRequests / Math.max(performance.totalRequests, 1);
const costEfficiency = 1 / (model.costPerToken + 0.001);
const speedScore = 1 / (performance.averageResponseTime + 1);
return (successRate * 0.4) + (costEfficiency * 0.3) + (speedScore * 0.3);
}
// Update model performance metrics
updateMetrics(modelId: string, metrics: ModelMetrics): void {
const performance = this.performanceMetrics.get(modelId);
if (!performance) return;
performance.totalRequests++;
performance.lastUsed = new Date();
if (metrics.success) {
performance.successfulRequests++;
}
// Update average response time
const totalTime = performance.averageResponseTime * (performance.totalRequests - 1);
performance.averageResponseTime = (totalTime + metrics.responseTime) / performance.totalRequests;
// Update cost
performance.totalCost += metrics.cost;
}
// Get model recommendations
getRecommendations(taskType: string): ModelRecommendation[] {
const recommendations: ModelRecommendation[] = [];
for (const model of this.models.values()) {
if (model.capabilities.includes(taskType)) {
const performance = this.performanceMetrics.get(model.id);
const score = this.calculateScore(model, taskType);
recommendations.push({
modelId: model.id,
score,
performance: performance || {
totalRequests: 0,
successfulRequests: 0,
averageResponseTime: 0,
totalCost: 0,
lastUsed: null
},
costPerToken: model.costPerToken
});
}
}
return recommendations.sort((a, b) => b.score - a.score);
}
}
Integration Examples
API Integration
// AI service API endpoints
export async function POST(request: Request, locals: any) {
const url = new URL(request.url);
const body = await request.json();
if (url.pathname === '/api/ai/generate') {
const aiService = new AIService(locals.runtime.env);
const aiRequest: AIRequest = {
id: generateUUID(),
taskType: body.taskType,
messages: body.messages,
temperature: body.temperature,
maxTokens: body.maxTokens
};
try {
const response = await aiService.generateContent(aiRequest);
return new Response(JSON.stringify(response), {
status: 200,
headers: { 'Content-Type': 'application/json' }
});
} catch (error) {
return new Response(JSON.stringify({ error: error.message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
});
}
}
if (url.pathname === '/api/ai/npc/chat') {
const npcAgent = new NpcAgent(body.npcConfig, locals.runtime.env.aiService);
const context: NPCContext = {
userMessage: body.message,
userLevel: body.userLevel,
topic: body.topic,
sessionId: body.sessionId
};
try {
const response = await npcAgent.generateResponse(context);
return new Response(JSON.stringify(response), {
status: 200,
headers: { 'Content-Type': 'application/json' }
});
} catch (error) {
return new Response(JSON.stringify({ error: error.message }), {
status: 500,
headers: { 'Content-Type': 'application/json' }
});
}
}
return new Response('Not found', { status: 404 });
}
Component Integration
// React component for AI chat
import { useState, useEffect } from 'react';
import { NpcAgent } from '@/lib/services/NpcAgent';
function NPCChat({ npcConfig, userLevel }: { npcConfig: NPCConfig; userLevel: number }) {
const [messages, setMessages] = useState<ChatMessage[]>([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const sendMessage = async () => {
if (!input.trim()) return;
const userMessage: ChatMessage = {
id: generateUUID(),
content: input,
sender: 'user',
timestamp: new Date()
};
setMessages(prev => [...prev, userMessage]);
setInput('');
setLoading(true);
try {
const response = await fetch('/api/ai/npc/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
npcConfig,
message: input,
userLevel,
topic: 'general',
sessionId: 'session-123'
})
});
const npcResponse = await response.json();
const npcMessage: ChatMessage = {
id: npcResponse.id,
content: npcResponse.content,
sender: 'npc',
timestamp: new Date(npcResponse.timestamp)
};
setMessages(prev => [...prev, npcMessage]);
} catch (error) {
console.error('Failed to get NPC response:', error);
} finally {
setLoading(false);
}
};
return (
<div className="npc-chat">
<div className="chat-messages">
{messages.map(message => (
<div key={message.id} className={`message ${message.sender}`}>
<div className="message-content">{message.content}</div>
<div className="message-timestamp">
{message.timestamp.toLocaleTimeString()}
</div>
</div>
))}
{loading && (
<div className="message npc">
<div className="message-content">Thinking...</div>
</div>
)}
</div>
<div className="chat-input">
<input
type="text"
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
placeholder="Type your message..."
disabled={loading}
/>
<button onClick={sendMessage} disabled={loading || !input.trim()}>
Send
</button>
</div>
</div>
);
}
Testing
AI System Testing
describe('AI Integration System', () => {
let aiService: AIService;
let npcAgent: NpcAgent;
beforeEach(() => {
aiService = new AIService(mockConfig);
npcAgent = new NpcAgent(mockNPCConfig, aiService);
});
it('should generate content successfully', async () => {
const request: AIRequest = {
id: 'test-request',
taskType: 'content_generation',
messages: [
{ role: 'system', content: 'Generate a test statement' },
{ role: 'user', content: 'Create a statement about science' }
]
};
const response = await aiService.generateContent(request);
expect(response.content).toBeDefined();
expect(response.model).toBeDefined();
expect(response.provider).toBeDefined();
});
it('should generate NPC response', async () => {
const context: NPCContext = {
userMessage: 'Hello, how are you?',
userLevel: 5,
topic: 'general',
sessionId: 'test-session'
};
const response = await npcAgent.generateResponse(context);
expect(response.content).toBeDefined();
expect(response.npcId).toBe(mockNPCConfig.id);
expect(response.timestamp).toBeInstanceOf(Date);
});
it('should handle AI errors gracefully', async () => {
// Mock AI service to throw error
jest.spyOn(aiService, 'generateContent').mockRejectedValue(new Error('AI service unavailable'));
const context: NPCContext = {
userMessage: 'Test message',
userLevel: 3,
topic: 'general',
sessionId: 'test-session'
};
const response = await npcAgent.generateResponse(context);
expect(response.content).toBeDefined();
expect(response.metadata.model).toBe('fallback');
});
});
This comprehensive AI integration system provides intelligent content generation, dynamic NPC interactions, and adaptive learning capabilities with robust error handling and performance optimization.