Spaces:
Sleeping
Sleeping
Update src/ai_system.py
Browse files- src/ai_system.py +321 -72
src/ai_system.py
CHANGED
|
@@ -1,82 +1,159 @@
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
import logging
|
|
|
|
| 4 |
from datetime import datetime
|
| 5 |
-
from typing import Dict, List, Optional, Any
|
| 6 |
-
|
| 7 |
-
|
| 8 |
|
| 9 |
-
|
| 10 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
from .monitoring_system import ComprehensiveMonitor
|
| 12 |
|
| 13 |
class SaemsTunesAISystem:
|
| 14 |
-
"""
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
def __init__(
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
self.supabase = supabase_integration
|
| 20 |
self.security = security_system
|
| 21 |
self.monitor = monitor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
self.model = None
|
| 23 |
self.model_loaded = False
|
|
|
|
|
|
|
| 24 |
self.setup_logging()
|
| 25 |
self.load_model()
|
| 26 |
|
| 27 |
def setup_logging(self):
|
| 28 |
-
"""Setup logging"""
|
| 29 |
self.logger = logging.getLogger(__name__)
|
|
|
|
| 30 |
|
| 31 |
def load_model(self):
|
| 32 |
"""Load the Phi-3.5-mini-instruct model"""
|
| 33 |
try:
|
| 34 |
-
|
| 35 |
|
| 36 |
-
#
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
# Load the model
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
self.model = Llama(
|
| 45 |
-
model_path=model_path,
|
| 46 |
n_ctx=4096, # Context window
|
| 47 |
n_threads=4, # CPU threads
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
)
|
| 50 |
|
| 51 |
self.model_loaded = True
|
| 52 |
-
|
| 53 |
|
| 54 |
except Exception as e:
|
| 55 |
-
self.logger.error(f"Error loading model: {e}")
|
| 56 |
self.model_loaded = False
|
| 57 |
|
| 58 |
-
def process_query(
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
if not self.model_loaded:
|
| 61 |
-
|
|
|
|
| 62 |
|
| 63 |
try:
|
| 64 |
start_time = time.time()
|
| 65 |
|
| 66 |
-
# Get context from Supabase
|
| 67 |
-
context = self.supabase.get_music_context(query)
|
| 68 |
|
| 69 |
-
# Build prompt
|
| 70 |
-
prompt = self.
|
| 71 |
|
| 72 |
# Generate response
|
| 73 |
response = self.model.create_completion(
|
| 74 |
prompt,
|
| 75 |
-
max_tokens=
|
| 76 |
-
temperature=
|
| 77 |
-
top_p=
|
| 78 |
-
stop=["<|end|>", "</s>"],
|
| 79 |
-
echo=False
|
|
|
|
| 80 |
)
|
| 81 |
|
| 82 |
processing_time = time.time() - start_time
|
|
@@ -84,63 +161,235 @@ class SaemsTunesAISystem:
|
|
| 84 |
# Extract response text
|
| 85 |
response_text = response['choices'][0]['text'].strip()
|
| 86 |
|
|
|
|
|
|
|
|
|
|
| 87 |
# Record metrics
|
| 88 |
-
self.
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
|
|
|
|
|
|
| 97 |
|
| 98 |
return response_text
|
| 99 |
|
| 100 |
except Exception as e:
|
| 101 |
-
self.logger.error(f"Error processing query: {e}")
|
| 102 |
-
|
| 103 |
-
# Record error
|
| 104 |
-
self.
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
return
|
| 115 |
|
| 116 |
-
def
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
system_prompt = f"""<|system|>
|
| 120 |
-
You are the AI assistant for Saem's Tunes, a music
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
-
PLATFORM
|
| 123 |
-
-
|
| 124 |
-
- {context.get('artist_count', 0)}
|
| 125 |
-
-
|
|
|
|
| 126 |
|
| 127 |
CURRENT CONTEXT:
|
| 128 |
-
{context.get('summary', 'General
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
RESPONSE GUIDELINES:
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
-
|
| 138 |
"""
|
| 139 |
|
|
|
|
| 140 |
user_prompt = f"<|user|>\n{query}<|end|>\n<|assistant|>\n"
|
| 141 |
|
| 142 |
return system_prompt + user_prompt
|
| 143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
def is_healthy(self) -> bool:
|
| 145 |
-
"""Check if AI system is healthy"""
|
| 146 |
-
return self.model_loaded and self.supabase.is_connected()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import time
|
| 3 |
import logging
|
| 4 |
+
import asyncio
|
| 5 |
from datetime import datetime
|
| 6 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 7 |
+
import json
|
| 8 |
+
import requests
|
| 9 |
|
| 10 |
+
try:
|
| 11 |
+
from llama_cpp import Llama
|
| 12 |
+
except ImportError:
|
| 13 |
+
Llama = None
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
from huggingface_hub import hf_hub_download
|
| 17 |
+
except ImportError:
|
| 18 |
+
hf_hub_download = None
|
| 19 |
+
|
| 20 |
+
from .supabase_integration import AdvancedSupabaseIntegration
|
| 21 |
+
from .security_system import AdvancedSecuritySystem
|
| 22 |
from .monitoring_system import ComprehensiveMonitor
|
| 23 |
|
| 24 |
class SaemsTunesAISystem:
|
| 25 |
+
"""
|
| 26 |
+
Main AI system for Saem's Tunes music education and streaming platform.
|
| 27 |
+
Handles user queries with context from the Supabase database.
|
| 28 |
+
"""
|
| 29 |
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
supabase_integration: AdvancedSupabaseIntegration,
|
| 33 |
+
security_system: AdvancedSecuritySystem,
|
| 34 |
+
monitor: ComprehensiveMonitor,
|
| 35 |
+
model_name: str = "microsoft/Phi-3.5-mini-instruct",
|
| 36 |
+
model_repo: str = "Thetima4/Phi-3.5-mini-instruct-Q4_K_M-GGUF",
|
| 37 |
+
model_file: str = "Phi-3.5-mini-instruct-q4_k_m.gguf",
|
| 38 |
+
max_response_length: int = 500,
|
| 39 |
+
temperature: float = 0.7,
|
| 40 |
+
top_p: float = 0.9
|
| 41 |
+
):
|
| 42 |
self.supabase = supabase_integration
|
| 43 |
self.security = security_system
|
| 44 |
self.monitor = monitor
|
| 45 |
+
self.model_name = model_name
|
| 46 |
+
self.model_repo = model_repo
|
| 47 |
+
self.model_file = model_file
|
| 48 |
+
self.max_response_length = max_response_length
|
| 49 |
+
self.temperature = temperature
|
| 50 |
+
self.top_p = top_p
|
| 51 |
+
|
| 52 |
self.model = None
|
| 53 |
self.model_loaded = False
|
| 54 |
+
self.model_path = None
|
| 55 |
+
|
| 56 |
self.setup_logging()
|
| 57 |
self.load_model()
|
| 58 |
|
| 59 |
def setup_logging(self):
|
| 60 |
+
"""Setup logging for the AI system"""
|
| 61 |
self.logger = logging.getLogger(__name__)
|
| 62 |
+
self.logger.setLevel(logging.INFO)
|
| 63 |
|
| 64 |
def load_model(self):
|
| 65 |
"""Load the Phi-3.5-mini-instruct model"""
|
| 66 |
try:
|
| 67 |
+
self.logger.info(f"🔄 Loading {self.model_name} model...")
|
| 68 |
|
| 69 |
+
# Check if model file exists locally
|
| 70 |
+
local_path = f"./models/{self.model_file}"
|
| 71 |
+
if os.path.exists(local_path):
|
| 72 |
+
self.model_path = local_path
|
| 73 |
+
self.logger.info(f"✅ Found local model: {local_path}")
|
| 74 |
+
else:
|
| 75 |
+
# Download from Hugging Face Hub
|
| 76 |
+
if hf_hub_download is None:
|
| 77 |
+
self.logger.error("❌ huggingface_hub not available for model download")
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
self.logger.info(f"📥 Downloading model from {self.model_repo}")
|
| 81 |
+
self.model_path = hf_hub_download(
|
| 82 |
+
repo_id=self.model_repo,
|
| 83 |
+
filename=self.model_file,
|
| 84 |
+
cache_dir="./models",
|
| 85 |
+
local_dir_use_symlinks=False
|
| 86 |
+
)
|
| 87 |
+
self.logger.info(f"✅ Model downloaded: {self.model_path}")
|
| 88 |
|
| 89 |
# Load the model
|
| 90 |
+
if Llama is None:
|
| 91 |
+
self.logger.error("❌ llama-cpp-python not available for model loading")
|
| 92 |
+
return
|
| 93 |
+
|
| 94 |
self.model = Llama(
|
| 95 |
+
model_path=self.model_path,
|
| 96 |
n_ctx=4096, # Context window
|
| 97 |
n_threads=4, # CPU threads
|
| 98 |
+
n_batch=512,
|
| 99 |
+
verbose=False,
|
| 100 |
+
use_mlock=False,
|
| 101 |
+
use_mmap=True
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# Test the model
|
| 105 |
+
test_response = self.model.create_completion(
|
| 106 |
+
"Hello",
|
| 107 |
+
max_tokens=10,
|
| 108 |
+
temperature=0.1
|
| 109 |
)
|
| 110 |
|
| 111 |
self.model_loaded = True
|
| 112 |
+
self.logger.info("✅ Model loaded and tested successfully!")
|
| 113 |
|
| 114 |
except Exception as e:
|
| 115 |
+
self.logger.error(f"❌ Error loading model: {e}")
|
| 116 |
self.model_loaded = False
|
| 117 |
|
| 118 |
+
def process_query(
|
| 119 |
+
self,
|
| 120 |
+
query: str,
|
| 121 |
+
user_id: str,
|
| 122 |
+
conversation_id: Optional[str] = None
|
| 123 |
+
) -> str:
|
| 124 |
+
"""
|
| 125 |
+
Process user query and generate response with context from Supabase.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
query: User's question
|
| 129 |
+
user_id: Unique user identifier
|
| 130 |
+
conversation_id: Optional conversation ID for context
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
AI-generated response
|
| 134 |
+
"""
|
| 135 |
if not self.model_loaded:
|
| 136 |
+
self.logger.warning("Model not loaded, returning fallback response")
|
| 137 |
+
return self.get_fallback_response(query)
|
| 138 |
|
| 139 |
try:
|
| 140 |
start_time = time.time()
|
| 141 |
|
| 142 |
+
# Get comprehensive context from Supabase
|
| 143 |
+
context = self.supabase.get_music_context(query, user_id)
|
| 144 |
|
| 145 |
+
# Build enhanced prompt with context
|
| 146 |
+
prompt = self.build_enhanced_prompt(query, context, user_id)
|
| 147 |
|
| 148 |
# Generate response
|
| 149 |
response = self.model.create_completion(
|
| 150 |
prompt,
|
| 151 |
+
max_tokens=self.max_response_length,
|
| 152 |
+
temperature=self.temperature,
|
| 153 |
+
top_p=self.top_p,
|
| 154 |
+
stop=["<|end|>", "</s>", "###", "Human:", "Assistant:"],
|
| 155 |
+
echo=False,
|
| 156 |
+
stream=False
|
| 157 |
)
|
| 158 |
|
| 159 |
processing_time = time.time() - start_time
|
|
|
|
| 161 |
# Extract response text
|
| 162 |
response_text = response['choices'][0]['text'].strip()
|
| 163 |
|
| 164 |
+
# Clean up response
|
| 165 |
+
response_text = self.clean_response(response_text)
|
| 166 |
+
|
| 167 |
# Record metrics
|
| 168 |
+
self.record_metrics(
|
| 169 |
+
query=query,
|
| 170 |
+
response=response_text,
|
| 171 |
+
processing_time=processing_time,
|
| 172 |
+
user_id=user_id,
|
| 173 |
+
conversation_id=conversation_id,
|
| 174 |
+
context_used=context,
|
| 175 |
+
success=True
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
self.logger.info(f"✅ Query processed in {processing_time:.2f}s: {query[:50]}...")
|
| 179 |
|
| 180 |
return response_text
|
| 181 |
|
| 182 |
except Exception as e:
|
| 183 |
+
self.logger.error(f"❌ Error processing query: {e}")
|
| 184 |
+
|
| 185 |
+
# Record error metrics
|
| 186 |
+
self.record_metrics(
|
| 187 |
+
query=query,
|
| 188 |
+
response="",
|
| 189 |
+
processing_time=0,
|
| 190 |
+
user_id=user_id,
|
| 191 |
+
conversation_id=conversation_id,
|
| 192 |
+
error_message=str(e),
|
| 193 |
+
success=False
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
return self.get_error_response(e)
|
| 197 |
|
| 198 |
+
def build_enhanced_prompt(
|
| 199 |
+
self,
|
| 200 |
+
query: str,
|
| 201 |
+
context: Dict[str, Any],
|
| 202 |
+
user_id: str
|
| 203 |
+
) -> str:
|
| 204 |
+
"""
|
| 205 |
+
Build comprehensive prompt with context from Saem's Tunes platform.
|
| 206 |
|
| 207 |
+
Args:
|
| 208 |
+
query: User's question
|
| 209 |
+
context: Context from Supabase database
|
| 210 |
+
user_id: User identifier for personalization
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
Formatted prompt for the model
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
# System prompt with platform context
|
| 217 |
system_prompt = f"""<|system|>
|
| 218 |
+
You are the AI assistant for Saem's Tunes, a comprehensive music education and streaming platform.
|
| 219 |
+
|
| 220 |
+
PLATFORM OVERVIEW:
|
| 221 |
+
🎵 **Music Streaming**: {context.get('stats', {}).get('track_count', 0)}+ tracks, {context.get('stats', {}).get('artist_count', 0)}+ artists
|
| 222 |
+
📚 **Education**: Courses, lessons, quizzes, and learning paths
|
| 223 |
+
👥 **Community**: User profiles, favorites, social features
|
| 224 |
+
🎨 **Creator Tools**: Music upload, artist analytics, promotion tools
|
| 225 |
+
💎 **Premium**: Subscription-based premium features
|
| 226 |
|
| 227 |
+
PLATFORM STATISTICS:
|
| 228 |
+
- Total Tracks: {context.get('stats', {}).get('track_count', 0)}
|
| 229 |
+
- Total Artists: {context.get('stats', {}).get('artist_count', 0)}
|
| 230 |
+
- Total Users: {context.get('stats', {}).get('user_count', 0)}
|
| 231 |
+
- Total Courses: {context.get('stats', {}).get('course_count', 0)}
|
| 232 |
|
| 233 |
CURRENT CONTEXT:
|
| 234 |
+
{context.get('summary', 'General platform information')}
|
| 235 |
+
|
| 236 |
+
POPULAR CONTENT:
|
| 237 |
+
{self.format_popular_content(context)}
|
| 238 |
|
| 239 |
RESPONSE GUIDELINES:
|
| 240 |
+
1. Be passionate about music and education
|
| 241 |
+
2. Provide specific, actionable information
|
| 242 |
+
3. Reference platform features when relevant
|
| 243 |
+
4. Keep responses concise (under {self.max_response_length} words)
|
| 244 |
+
5. Be encouraging and supportive
|
| 245 |
+
6. If unsure, guide users to relevant platform sections
|
| 246 |
+
7. Personalize responses when user context is available
|
| 247 |
+
|
| 248 |
+
PLATFORM FEATURES TO MENTION:
|
| 249 |
+
- Music streaming and discovery
|
| 250 |
+
- Educational courses and learning paths
|
| 251 |
+
- Playlist creation and sharing
|
| 252 |
+
- Artist tools and music upload
|
| 253 |
+
- Community features and social interaction
|
| 254 |
+
- Premium subscription benefits
|
| 255 |
+
- Mobile app availability
|
| 256 |
|
| 257 |
+
ANSWER THE USER'S QUESTION:<|end|>
|
| 258 |
"""
|
| 259 |
|
| 260 |
+
# User query
|
| 261 |
user_prompt = f"<|user|>\n{query}<|end|>\n<|assistant|>\n"
|
| 262 |
|
| 263 |
return system_prompt + user_prompt
|
| 264 |
|
| 265 |
+
def format_popular_content(self, context: Dict[str, Any]) -> str:
|
| 266 |
+
"""Format popular content for the prompt"""
|
| 267 |
+
content_lines = []
|
| 268 |
+
|
| 269 |
+
# Popular tracks
|
| 270 |
+
if context.get('tracks'):
|
| 271 |
+
content_lines.append("🎵 Popular Tracks:")
|
| 272 |
+
for track in context['tracks'][:3]:
|
| 273 |
+
content_lines.append(f" - {track.get('title', 'Unknown')} by {track.get('artist', 'Unknown')}")
|
| 274 |
+
|
| 275 |
+
# Popular artists
|
| 276 |
+
if context.get('artists'):
|
| 277 |
+
content_lines.append("👨🎤 Popular Artists:")
|
| 278 |
+
for artist in context['artists'][:3]:
|
| 279 |
+
content_lines.append(f" - {artist.get('name', 'Unknown')} ({artist.get('genre', 'Various')})")
|
| 280 |
+
|
| 281 |
+
# Recent courses
|
| 282 |
+
if context.get('courses'):
|
| 283 |
+
content_lines.append("📚 Recent Courses:")
|
| 284 |
+
for course in context['courses'][:2]:
|
| 285 |
+
content_lines.append(f" - {course.get('title', 'Unknown')} ({course.get('level', 'All Levels')})")
|
| 286 |
+
|
| 287 |
+
return "\n".join(content_lines) if content_lines else "No specific content data available"
|
| 288 |
+
|
| 289 |
+
def clean_response(self, response: str) -> str:
|
| 290 |
+
"""Clean and format the AI response"""
|
| 291 |
+
# Remove any trailing incomplete sentences
|
| 292 |
+
if '.' in response:
|
| 293 |
+
response = response.rsplit('.', 1)[0] + '.'
|
| 294 |
+
|
| 295 |
+
# Remove any markdown formatting if present
|
| 296 |
+
response = response.replace('**', '').replace('__', '')
|
| 297 |
+
|
| 298 |
+
# Ensure response ends with proper punctuation
|
| 299 |
+
if response and response[-1] not in ['.', '!', '?']:
|
| 300 |
+
response += '.'
|
| 301 |
+
|
| 302 |
+
return response.strip()
|
| 303 |
+
|
| 304 |
+
def record_metrics(
|
| 305 |
+
self,
|
| 306 |
+
query: str,
|
| 307 |
+
response: str,
|
| 308 |
+
processing_time: float,
|
| 309 |
+
user_id: str,
|
| 310 |
+
conversation_id: Optional[str] = None,
|
| 311 |
+
context_used: Optional[Dict] = None,
|
| 312 |
+
error_message: Optional[str] = None,
|
| 313 |
+
success: bool = True
|
| 314 |
+
):
|
| 315 |
+
"""Record metrics for monitoring and analytics"""
|
| 316 |
+
metrics = {
|
| 317 |
+
'model_name': 'phi3.5-mini-Q4_K_M',
|
| 318 |
+
'processing_time_ms': processing_time * 1000,
|
| 319 |
+
'input_tokens': len(query.split()),
|
| 320 |
+
'output_tokens': len(response.split()) if response else 0,
|
| 321 |
+
'total_tokens': len(query.split()) + (len(response.split()) if response else 0),
|
| 322 |
+
'success': success,
|
| 323 |
+
'user_id': user_id,
|
| 324 |
+
'conversation_id': conversation_id,
|
| 325 |
+
'timestamp': datetime.now(),
|
| 326 |
+
'query_length': len(query),
|
| 327 |
+
'response_length': len(response) if response else 0
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
if error_message:
|
| 331 |
+
metrics['error_message'] = error_message
|
| 332 |
+
|
| 333 |
+
if context_used:
|
| 334 |
+
metrics['context_used'] = {
|
| 335 |
+
'has_tracks': bool(context_used.get('tracks')),
|
| 336 |
+
'has_artists': bool(context_used.get('artists')),
|
| 337 |
+
'has_courses': bool(context_used.get('courses')),
|
| 338 |
+
'context_summary': context_used.get('summary', '')
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
self.monitor.record_inference(metrics)
|
| 342 |
+
|
| 343 |
+
def get_fallback_response(self, query: str) -> str:
|
| 344 |
+
"""Get fallback response when model is unavailable"""
|
| 345 |
+
fallback_responses = [
|
| 346 |
+
"I'd love to help you with that! Our platform offers comprehensive music streaming and education features. ",
|
| 347 |
+
"That's a great question about Saem's Tunes! We have extensive music content and educational resources available. ",
|
| 348 |
+
"I appreciate your question about our music platform! Let me share some information about our features. "
|
| 349 |
+
]
|
| 350 |
+
|
| 351 |
+
# Add context-specific fallbacks
|
| 352 |
+
query_lower = query.lower()
|
| 353 |
+
|
| 354 |
+
if any(term in query_lower for term in ['playlist', 'create', 'make']):
|
| 355 |
+
base_response = "You can create playlists by going to the Library section and clicking 'Create New Playlist'. "
|
| 356 |
+
elif any(term in query_lower for term in ['course', 'learn', 'education']):
|
| 357 |
+
base_response = "We offer various music courses for different skill levels in our Education section. "
|
| 358 |
+
elif any(term in query_lower for term in ['upload', 'artist', 'create']):
|
| 359 |
+
base_response = "Artists can upload their music through the Creator Studio after verification. "
|
| 360 |
+
elif any(term in query_lower for term in ['premium', 'subscribe', 'payment']):
|
| 361 |
+
base_response = "Our premium subscription offers ad-free listening, offline downloads, and exclusive content. "
|
| 362 |
+
else:
|
| 363 |
+
base_response = "Our platform combines music streaming with comprehensive educational resources. "
|
| 364 |
+
|
| 365 |
+
import random
|
| 366 |
+
return base_response + random.choice(fallback_responses)
|
| 367 |
+
|
| 368 |
+
def get_error_response(self, error: Exception) -> str:
|
| 369 |
+
"""Get user-friendly error response"""
|
| 370 |
+
error_responses = [
|
| 371 |
+
"I apologize, but I'm having trouble accessing the full information right now. ",
|
| 372 |
+
"I'm experiencing some technical difficulties at the moment. ",
|
| 373 |
+
"I'm unable to process your request completely due to a temporary issue. "
|
| 374 |
+
]
|
| 375 |
+
|
| 376 |
+
base_response = "Please try again in a few moments, or contact support if the issue persists."
|
| 377 |
+
|
| 378 |
+
import random
|
| 379 |
+
return random.choice(error_responses) + base_response
|
| 380 |
+
|
| 381 |
def is_healthy(self) -> bool:
|
| 382 |
+
"""Check if AI system is healthy and ready"""
|
| 383 |
+
return self.model_loaded and self.supabase.is_connected()
|
| 384 |
+
|
| 385 |
+
def get_system_info(self) -> Dict[str, Any]:
|
| 386 |
+
"""Get system information for monitoring"""
|
| 387 |
+
return {
|
| 388 |
+
"model_loaded": self.model_loaded,
|
| 389 |
+
"model_name": self.model_name,
|
| 390 |
+
"model_path": self.model_path,
|
| 391 |
+
"max_response_length": self.max_response_length,
|
| 392 |
+
"temperature": self.temperature,
|
| 393 |
+
"top_p": self.top_p,
|
| 394 |
+
"supabase_connected": self.supabase.is_connected()
|
| 395 |
+
}
|