"""
AI Historical Simulation Platform - Main Integration Class

This module provides the complete production-ready platform that integrates all subsystems
into a cohesive AI Historical Simulation system supporting 1000+ concurrent users with
<200ms response times.
"""

import asyncio
import json
import logging
import time
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple, Set
import numpy as np
from dataclasses import dataclass, asdict
from contextlib import asynccontextmanager

# Core system imports
from ..hdc.core import HDCOperations
from ..personality.encoder import PersonalityEncoder
from ..personality.models import PersonalityTraits
from ..memory.manager import MemoryManager
from ..memory.hierarchy import MemoryHierarchy
from ..consciousness.models import ConsciousnessState
from ..consciousness.engine import ConsciousnessEngine
from ..dialogue.generator import DialogueGenerator
from ..reconstruction.historical_reconstructor import HistoricalReconstructor
from ..config.settings import Config

# Platform components
from .session_manager import SessionManager
from .database_manager import DatabaseManager  
from .performance_monitor import PerformanceMonitor

logger = logging.getLogger(__name__)


@dataclass
class PlatformHealth:
    """System health status."""
    is_healthy: bool
    uptime_seconds: float
    active_sessions: int
    loaded_figures: int
    memory_usage_mb: float
    avg_response_time_ms: float
    error_rate: float
    last_check: datetime


@dataclass
class HistoricalFigure:
    """Enhanced historical figure representation."""
    name: str
    personality_id: str
    personality_vector: np.ndarray
    traits: PersonalityTraits
    biography: Dict[str, Any]
    cultural_context: Dict[str, Any]
    historical_period: str
    consciousness_state: ConsciousnessState
    created_at: datetime
    last_active: Optional[datetime] = None
    total_conversations: int = 0
    avg_response_quality: float = 0.0
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary representation."""
        return {
            'name': self.name,
            'personality_id': self.personality_id,
            'traits': asdict(self.traits),
            'biography': self.biography,
            'cultural_context': self.cultural_context,
            'historical_period': self.historical_period,
            'created_at': self.created_at.isoformat(),
            'last_active': self.last_active.isoformat() if self.last_active else None,
            'total_conversations': self.total_conversations,
            'avg_response_quality': self.avg_response_quality
        }


class HistoricalSimulationPlatform:
    """
    Production-ready AI Historical Simulation Platform.
    
    Integrates all subsystems including HDC operations, personality encoding,
    memory management, consciousness simulation, dialogue generation, and
    historical reconstruction into a scalable platform supporting 1000+
    concurrent users with <200ms response times.
    """
    
    def __init__(self, 
                 hdc_dimension: int = 10000,
                 config: Optional[Config] = None,
                 max_concurrent_sessions: int = 1000):
        """
        Initialize the complete platform.
        
        Args:
            hdc_dimension: HDC vector dimension
            config: Platform configuration
            max_concurrent_sessions: Maximum concurrent user sessions
        """
        self.config = config or Config()
        self.hdc_dimension = hdc_dimension
        self.max_concurrent_sessions = max_concurrent_sessions
        
        # Core system components
        self.hdc_ops = HDCOperations(dimension=hdc_dimension)
        self.personality_encoder = PersonalityEncoder(self.hdc_ops)
        self.memory_manager = MemoryManager(self.hdc_ops)
        self.memory_hierarchy = MemoryHierarchy(self.hdc_ops)
        self.consciousness_engine = ConsciousnessEngine(self.hdc_ops)
        self.dialogue_generator = DialogueGenerator(self.hdc_ops)
        self.historical_reconstructor = HistoricalReconstructor()
        
        # Platform components
        self.session_manager = SessionManager(self, max_concurrent_sessions)
        self.database_manager = DatabaseManager()
        self.performance_monitor = PerformanceMonitor()
        
        # Platform state
        self.historical_figures: Dict[str, HistoricalFigure] = {}
        self.figure_name_to_id: Dict[str, str] = {}
        self.active_conversations: Dict[str, Dict] = {}
        self.system_metrics: Dict[str, Any] = {}
        
        # Performance optimization
        self.response_cache: Dict[str, Tuple[str, datetime]] = {}
        self.cache_ttl = timedelta(minutes=5)
        self.batch_processing_queue: asyncio.Queue = asyncio.Queue()
        
        # System status
        self.is_initialized = False
        self.startup_time = datetime.now()
        self.shutdown_event = asyncio.Event()
        
        # Background tasks
        self.background_tasks: Set[asyncio.Task] = set()
        
        logger.info(f"HistoricalSimulationPlatform initialized with dimension {hdc_dimension}")
    
    async def initialize(self) -> None:
        """Initialize all platform components and systems."""
        if self.is_initialized:
            return
        
        logger.info("Initializing AI Historical Simulation Platform")
        start_time = time.time()
        
        try:
            # Initialize core components in parallel
            await asyncio.gather(
                self.hdc_ops.initialize() if hasattr(self.hdc_ops, 'initialize') else asyncio.sleep(0),
                self.memory_manager.initialize(),
                self.memory_hierarchy.initialize(),
                self.consciousness_engine.initialize() if hasattr(self.consciousness_engine, 'initialize') else asyncio.sleep(0),
                self.dialogue_generator.initialize() if hasattr(self.dialogue_generator, 'initialize') else asyncio.sleep(0),
                self.database_manager.initialize(),
                self.performance_monitor.initialize(),
                return_exceptions=True
            )
            
            # Initialize session manager
            await self.session_manager.initialize()
            
            # Start background monitoring tasks
            await self._start_background_tasks()
            
            # Load default historical figures
            await self._load_default_figures()
            
            initialization_time = time.time() - start_time
            
            self.is_initialized = True
            logger.info(f"Platform initialization completed in {initialization_time:.2f}s")
            
            # Record initial metrics
            await self.performance_monitor.record_metric("initialization_time", initialization_time)
            
        except Exception as e:
            logger.error(f"Platform initialization failed: {e}")
            raise
    
    async def shutdown(self) -> None:
        """Graceful platform shutdown."""
        logger.info("Shutting down AI Historical Simulation Platform")
        
        # Signal shutdown to background tasks
        self.shutdown_event.set()
        
        # Wait for background tasks to complete
        if self.background_tasks:
            await asyncio.gather(*self.background_tasks, return_exceptions=True)
        
        # Shutdown components
        await asyncio.gather(
            self.session_manager.shutdown(),
            self.database_manager.shutdown(),
            self.performance_monitor.shutdown(),
            self.memory_manager.cleanup(),
            self.memory_hierarchy.cleanup(),
            return_exceptions=True
        )
        
        # Save final metrics
        await self._save_system_metrics()
        
        logger.info("Platform shutdown completed")
    
    async def load_historical_figure(self, figure_name: str, 
                                   force_reload: bool = False) -> HistoricalFigure:
        """
        Load and initialize a historical figure with complete personality encoding.
        
        Args:
            figure_name: Name of the historical figure
            force_reload: Force reload even if already loaded
            
        Returns:
            Fully initialized historical figure
        """
        if not self.is_initialized:
            await self.initialize()
        
        # Check cache first
        if not force_reload and figure_name in self.historical_figures:
            return self.historical_figures[figure_name]
        
        logger.info(f"Loading historical figure: {figure_name}")
        start_time = time.time()
        
        try:
            # Load figure data from database
            figure_data = await self.database_manager.load_figure_data(figure_name)
            if not figure_data:
                raise ValueError(f"No data found for figure: {figure_name}")
            
            # Create personality traits from data
            traits = PersonalityTraits.from_dict(figure_data.get('traits', {}))
            
            # Encode complete personality using advanced reconstruction
            personality_vector = await self._encode_advanced_personality(
                traits, 
                figure_data.get('cultural_context', {}),
                figure_data.get('historical_context', {}),
                figure_data.get('biographical_data', {})
            )
            
            # Create consciousness state
            consciousness_state = await self.consciousness_engine.create_consciousness_state(
                personality_vector=personality_vector,
                cultural_context=figure_data.get('cultural_context', {}),
                historical_period=figure_data.get('historical_period', 'unknown')
            )
            
            # Create historical figure
            personality_id = f"{figure_name.lower().replace(' ', '_')}_{int(time.time())}"
            
            figure = HistoricalFigure(
                name=figure_name,
                personality_id=personality_id,
                personality_vector=personality_vector,
                traits=traits,
                biography=figure_data.get('biography', {}),
                cultural_context=figure_data.get('cultural_context', {}),
                historical_period=figure_data.get('historical_period', 'unknown'),
                consciousness_state=consciousness_state,
                created_at=datetime.now()
            )
            
            # Initialize memory systems for this figure
            await self._initialize_figure_memory_systems(figure, figure_data)
            
            # Store in platform cache
            self.historical_figures[figure_name] = figure
            self.figure_name_to_id[figure_name] = personality_id
            
            # Store in database
            await self.database_manager.store_figure(figure)
            
            load_time = time.time() - start_time
            logger.info(f"Successfully loaded {figure_name} in {load_time:.2f}s")
            
            # Record performance metrics
            await self.performance_monitor.record_metric("figure_load_time", load_time)
            await self.performance_monitor.record_event("figure_loaded", figure_name)
            
            return figure
            
        except Exception as e:
            logger.error(f"Failed to load figure '{figure_name}': {e}")
            raise
    
    async def generate_response(self, 
                              session_id: str,
                              figure_name: str, 
                              user_input: str,
                              context: Optional[Dict[str, Any]] = None) -> str:
        """
        Generate high-quality contextual response from historical figure.
        
        Args:
            session_id: User session identifier
            figure_name: Name of the historical figure
            user_input: User's input message
            context: Optional conversation context
            
        Returns:
            Generated response with <200ms target response time
        """
        start_time = time.time()
        
        try:
            # Validate session
            session = await self.session_manager.get_session(session_id)
            if not session:
                raise ValueError(f"Invalid session: {session_id}")
            
            # Load figure if needed
            figure = await self.load_historical_figure(figure_name)
            
            # Check response cache for similar queries
            cache_key = f"{figure_name}:{hash(user_input)}"
            cached_response = self._get_cached_response(cache_key)
            if cached_response:
                await self.performance_monitor.record_metric("cache_hit", 1)
                return cached_response
            
            # Update session activity
            await self.session_manager.update_session_activity(session_id, figure_name, user_input)
            
            # Encode user input with cultural and temporal adaptation
            input_encoding = await self._encode_contextual_input(
                user_input, figure, context or {}
            )
            
            # Retrieve relevant memories with advanced relevance scoring
            relevant_memories = await self._retrieve_contextual_memories(
                figure, input_encoding, max_memories=10
            )
            
            # Update consciousness state based on conversation
            updated_consciousness = await self.consciousness_engine.update_state(
                figure.consciousness_state,
                user_input=user_input,
                context=context or {},
                memories=relevant_memories
            )
            figure.consciousness_state = updated_consciousness
            
            # Generate response using advanced dialogue system
            response = await self.dialogue_generator.generate_response(
                personality_vector=figure.personality_vector,
                consciousness_state=figure.consciousness_state,
                user_input=user_input,
                relevant_memories=relevant_memories,
                cultural_context=figure.cultural_context,
                conversation_history=session.get_conversation_history(figure_name)
            )
            
            # Store interaction in memory hierarchy
            await self._store_conversation_memory(figure, user_input, response, context or {})
            
            # Update figure statistics
            figure.last_active = datetime.now()
            figure.total_conversations += 1
            
            # Cache response
            self._cache_response(cache_key, response)
            
            # Record performance metrics
            response_time = time.time() - start_time
            await self.performance_monitor.record_metric("response_time", response_time * 1000)  # ms
            await self.performance_monitor.record_event("response_generated", figure_name)
            
            # Ensure <200ms target met
            if response_time > 0.2:
                logger.warning(f"Response time {response_time:.3f}s exceeded 200ms target for {figure_name}")
            
            return response
            
        except Exception as e:
            logger.error(f"Failed to generate response: {e}")
            await self.performance_monitor.record_event("response_error", str(e))
            raise
    
    async def create_cross_temporal_conversation(self,
                                               session_id: str,
                                               figure_names: List[str],
                                               topic: str,
                                               rounds: int = 3) -> List[Dict[str, Any]]:
        """
        Create sophisticated multi-figure conversation across time periods.
        
        Args:
            session_id: Session identifier
            figure_names: List of historical figures to include
            topic: Conversation topic
            rounds: Number of conversation rounds
            
        Returns:
            Complete conversation transcript with metadata
        """
        logger.info(f"Creating cross-temporal conversation: {', '.join(figure_names)} on '{topic}'")
        
        # Load all figures
        figures = []
        for name in figure_names:
            figure = await self.load_historical_figure(name)
            figures.append(figure)
        
        conversation = []
        cross_temporal_session_id = f"{session_id}_cross_temporal_{int(time.time())}"
        
        # Create enhanced conversation context
        conversation_context = {
            'topic': topic,
            'participants': figure_names,
            'cross_temporal': True,
            'historical_periods': [fig.historical_period for fig in figures],
            'cultural_contexts': [fig.cultural_context for fig in figures]
        }
        
        # Initialize conversation with sophisticated prompt
        current_input = self._create_cross_temporal_prompt(topic, figures)
        
        for round_num in range(rounds):
            for i, figure in enumerate(figures):
                # Add temporal context awareness
                temporal_context = self._create_temporal_context(figure, figures, conversation)
                
                response = await self.generate_response(
                    session_id=cross_temporal_session_id,
                    figure_name=figure.name,
                    user_input=current_input,
                    context={**conversation_context, **temporal_context}
                )
                
                exchange = {
                    'figure': figure.name,
                    'historical_period': figure.historical_period,
                    'input': current_input,
                    'response': response,
                    'round': round_num + 1,
                    'timestamp': datetime.now().isoformat(),
                    'consciousness_state': asdict(figure.consciousness_state),
                    'cultural_adaptations': temporal_context.get('cultural_adaptations', {})
                }
                
                conversation.append(exchange)
                
                # Prepare input for next participant
                current_input = self._create_response_prompt(figure, response, figures[i+1:])
        
        # Record cross-temporal conversation metrics
        await self.performance_monitor.record_event(
            "cross_temporal_conversation", 
            f"{len(figures)}figures_{rounds}rounds"
        )
        
        return conversation
    
    async def get_platform_health(self) -> PlatformHealth:
        """Get comprehensive platform health status."""
        uptime = (datetime.now() - self.startup_time).total_seconds()
        
        # Get performance metrics
        metrics = await self.performance_monitor.get_current_metrics()
        
        # Calculate health indicators
        active_sessions = len(await self.session_manager.get_active_sessions())
        memory_usage = await self._get_memory_usage_mb()
        error_rate = metrics.get('error_rate', 0.0)
        
        # Determine overall health
        is_healthy = (
            self.is_initialized and
            error_rate < 0.05 and  # Less than 5% error rate
            metrics.get('avg_response_time', 0) < 200 and  # < 200ms average
            active_sessions < self.max_concurrent_sessions * 0.9  # < 90% capacity
        )
        
        return PlatformHealth(
            is_healthy=is_healthy,
            uptime_seconds=uptime,
            active_sessions=active_sessions,
            loaded_figures=len(self.historical_figures),
            memory_usage_mb=memory_usage,
            avg_response_time_ms=metrics.get('avg_response_time', 0),
            error_rate=error_rate,
            last_check=datetime.now()
        )
    
    async def get_system_metrics(self) -> Dict[str, Any]:
        """Get comprehensive system metrics."""
        health = await self.get_platform_health()
        performance_metrics = await self.performance_monitor.get_all_metrics()
        session_metrics = await self.session_manager.get_metrics()
        
        return {
            'health': asdict(health),
            'performance': performance_metrics,
            'sessions': session_metrics,
            'figures': {
                'loaded': len(self.historical_figures),
                'total_conversations': sum(fig.total_conversations for fig in self.historical_figures.values()),
                'avg_quality': np.mean([fig.avg_response_quality for fig in self.historical_figures.values()]) if self.historical_figures else 0.0
            },
            'system': {
                'hdc_dimension': self.hdc_dimension,
                'max_concurrent_sessions': self.max_concurrent_sessions,
                'startup_time': self.startup_time.isoformat(),
                'is_initialized': self.is_initialized
            }
        }
    
    # Private methods for internal operations
    
    async def _encode_advanced_personality(self, 
                                         traits: PersonalityTraits,
                                         cultural_context: Dict[str, Any],
                                         historical_context: Dict[str, Any],
                                         biographical_data: Dict[str, Any]) -> np.ndarray:
        """Encode complete personality with cultural and historical context."""
        
        # Use historical reconstructor for advanced encoding
        reconstruction_result = await self.historical_reconstructor.reconstruct_personality(
            known_traits=asdict(traits),
            cultural_context=cultural_context,
            historical_context=historical_context,
            biographical_data=biographical_data
        )
        
        # Combine with traditional personality encoding
        base_personality = await self.personality_encoder.encode_complete_personality(
            traits=traits,
            cultural_context=cultural_context,
            historical_context=historical_context
        )
        
        # Integrate reconstructed elements
        enhanced_personality = self.hdc_ops.bundling(
            base_personality,
            reconstruction_result.get('reconstructed_vector', base_personality)
        )
        
        return enhanced_personality
    
    async def _initialize_figure_memory_systems(self, 
                                              figure: HistoricalFigure,
                                              figure_data: Dict[str, Any]) -> None:
        """Initialize comprehensive memory systems for figure."""
        
        # Store biographical memories with high importance
        for event in figure_data.get('key_events', []):
            memory = {
                'content': event,
                'vector': self.hdc_ops.generate_random_vector('gaussian'),
                'timestamp': datetime.now(),
                'type': 'biographical',
                'importance': 0.95,
                'source': 'historical_data'
            }
            
            await self.memory_hierarchy.store_memory(
                figure.personality_id,
                memory,
                importance_score=0.95
            )
        
        # Store personality traits as structured memories
        trait_memories = self.personality_encoder.traits_to_memories(figure.traits)
        for trait_memory in trait_memories:
            await self.memory_hierarchy.store_memory(
                figure.personality_id,
                trait_memory,
                importance_score=0.9
            )
        
        # Store cultural context memories
        for context_key, context_value in figure.cultural_context.items():
            cultural_memory = {
                'content': f"{context_key}: {context_value}",
                'vector': self.hdc_ops.generate_random_vector('gaussian'),
                'timestamp': datetime.now(),
                'type': 'cultural_context',
                'importance': 0.85,
                'context_type': context_key
            }
            
            await self.memory_hierarchy.store_memory(
                figure.personality_id,
                cultural_memory,
                importance_score=0.85
            )
    
    async def _encode_contextual_input(self, 
                                     user_input: str,
                                     figure: HistoricalFigure,
                                     context: Dict[str, Any]) -> Dict[str, Any]:
        """Encode user input with figure-specific cultural and temporal context."""
        
        # Basic input encoding
        input_vector = self.hdc_ops.generate_random_vector('gaussian')
        
        # Apply cultural context transformations
        cultural_adaptations = {}
        for key, value in figure.cultural_context.items():
            if key in user_input.lower():
                cultural_adaptations[key] = value
        
        # Apply temporal context
        temporal_context = {
            'historical_period': figure.historical_period,
            'cultural_distance': self._calculate_cultural_distance(figure),
            'anachronism_risk': self._assess_anachronism_risk(user_input, figure)
        }
        
        return {
            'raw_input': user_input,
            'input_vector': input_vector,
            'cultural_adaptations': cultural_adaptations,
            'temporal_context': temporal_context,
            'context': context
        }
    
    async def _retrieve_contextual_memories(self,
                                          figure: HistoricalFigure,
                                          input_encoding: Dict[str, Any],
                                          max_memories: int = 10) -> List[Dict[str, Any]]:
        """Retrieve most relevant memories with advanced contextual scoring."""
        
        # Get memories from hierarchy
        memories = await self.memory_hierarchy.retrieve_relevant_memories(
            figure.personality_id,
            input_encoding['input_vector'],
            max_memories=max_memories * 2  # Get more for filtering
        )
        
        # Apply contextual scoring
        scored_memories = []
        for memory in memories:
            relevance_score = self._calculate_memory_relevance(
                memory, input_encoding, figure
            )
            scored_memories.append((relevance_score, memory))
        
        # Sort by relevance and return top memories
        scored_memories.sort(key=lambda x: x[0], reverse=True)
        return [memory for _, memory in scored_memories[:max_memories]]
    
    async def _store_conversation_memory(self,
                                       figure: HistoricalFigure,
                                       user_input: str,
                                       response: str,
                                       context: Dict[str, Any]) -> None:
        """Store conversation interaction in memory systems."""
        
        # Store user input
        input_memory = {
            'content': user_input,
            'vector': self.hdc_ops.generate_random_vector('gaussian'),
            'timestamp': datetime.now(),
            'type': 'user_input',
            'importance': 0.7,
            'context': context
        }
        
        await self.memory_hierarchy.store_memory(
            figure.personality_id,
            input_memory,
            importance_score=0.7
        )
        
        # Store figure response
        response_memory = {
            'content': response,
            'vector': self.hdc_ops.generate_random_vector('gaussian'),
            'timestamp': datetime.now(),
            'type': 'figure_response',
            'importance': 0.8,
            'context': context
        }
        
        await self.memory_hierarchy.store_memory(
            figure.personality_id,
            response_memory,
            importance_score=0.8
        )
    
    async def _start_background_tasks(self) -> None:
        """Start background monitoring and optimization tasks."""
        
        tasks = [
            asyncio.create_task(self._background_performance_monitor()),
            asyncio.create_task(self._background_cache_cleanup()),
            asyncio.create_task(self._background_metrics_collection()),
            asyncio.create_task(self._background_health_check())
        ]
        
        self.background_tasks.update(tasks)
        logger.info(f"Started {len(tasks)} background tasks")
    
    async def _background_performance_monitor(self) -> None:
        """Background task for continuous performance monitoring."""
        while not self.shutdown_event.is_set():
            try:
                # Monitor response times
                if hasattr(self.performance_monitor, 'check_response_times'):
                    await self.performance_monitor.check_response_times()
                
                # Monitor memory usage
                memory_usage = await self._get_memory_usage_mb()
                await self.performance_monitor.record_metric("memory_usage_mb", memory_usage)
                
                # Monitor session counts
                active_sessions = len(await self.session_manager.get_active_sessions())
                await self.performance_monitor.record_metric("active_sessions", active_sessions)
                
                await asyncio.sleep(30)  # Check every 30 seconds
                
            except Exception as e:
                logger.error(f"Background performance monitoring error: {e}")
                await asyncio.sleep(60)  # Wait longer on error
    
    async def _background_cache_cleanup(self) -> None:
        """Background task for cache maintenance."""
        while not self.shutdown_event.is_set():
            try:
                current_time = datetime.now()
                expired_keys = []
                
                for key, (_, timestamp) in self.response_cache.items():
                    if current_time - timestamp > self.cache_ttl:
                        expired_keys.append(key)
                
                for key in expired_keys:
                    del self.response_cache[key]
                
                if expired_keys:
                    logger.debug(f"Cleaned up {len(expired_keys)} expired cache entries")
                
                await asyncio.sleep(300)  # Clean every 5 minutes
                
            except Exception as e:
                logger.error(f"Background cache cleanup error: {e}")
                await asyncio.sleep(600)  # Wait longer on error
    
    async def _background_metrics_collection(self) -> None:
        """Background task for collecting system metrics."""
        while not self.shutdown_event.is_set():
            try:
                metrics = await self.get_system_metrics()
                await self.database_manager.store_metrics(metrics)
                
                await asyncio.sleep(60)  # Collect every minute
                
            except Exception as e:
                logger.error(f"Background metrics collection error: {e}")
                await asyncio.sleep(120)  # Wait longer on error
    
    async def _background_health_check(self) -> None:
        """Background task for system health monitoring."""
        while not self.shutdown_event.is_set():
            try:
                health = await self.get_platform_health()
                
                if not health.is_healthy:
                    logger.warning(f"System health check failed: {health}")
                    # Could trigger alerts here
                
                await asyncio.sleep(30)  # Check every 30 seconds
                
            except Exception as e:
                logger.error(f"Background health check error: {e}")
                await asyncio.sleep(60)  # Wait longer on error
    
    async def _load_default_figures(self) -> None:
        """Load default set of historical figures."""
        default_figures = [
            "Napoleon Bonaparte",
            "William Shakespeare", 
            "Albert Einstein"
        ]
        
        for figure_name in default_figures:
            try:
                await self.load_historical_figure(figure_name)
                logger.info(f"Loaded default figure: {figure_name}")
            except Exception as e:
                logger.warning(f"Failed to load default figure {figure_name}: {e}")
    
    def _get_cached_response(self, cache_key: str) -> Optional[str]:
        """Get cached response if valid."""
        if cache_key in self.response_cache:
            response, timestamp = self.response_cache[cache_key]
            if datetime.now() - timestamp < self.cache_ttl:
                return response
            else:
                del self.response_cache[cache_key]
        return None
    
    def _cache_response(self, cache_key: str, response: str) -> None:
        """Cache response with timestamp."""
        self.response_cache[cache_key] = (response, datetime.now())
        
        # Limit cache size
        if len(self.response_cache) > 1000:
            oldest_key = min(self.response_cache.keys(), 
                           key=lambda k: self.response_cache[k][1])
            del self.response_cache[oldest_key]
    
    def _create_cross_temporal_prompt(self, topic: str, figures: List[HistoricalFigure]) -> str:
        """Create sophisticated prompt for cross-temporal conversations."""
        historical_periods = [fig.historical_period for fig in figures]
        
        return f"""We are gathered here across the boundaries of time to discuss '{topic}'. 
        Though we come from different eras - {', '.join(historical_periods)} - let us share 
        our perspectives on this matter. Each of us brings the wisdom and understanding of 
        our time. What insights can you offer from your unique historical vantage point?"""
    
    def _create_temporal_context(self, 
                               current_figure: HistoricalFigure,
                               all_figures: List[HistoricalFigure],
                               conversation: List[Dict]) -> Dict[str, Any]:
        """Create temporal context awareness for cross-temporal conversations."""
        other_periods = [fig.historical_period for fig in all_figures if fig != current_figure]
        
        return {
            'speaking_from_period': current_figure.historical_period,
            'other_periods_present': other_periods,
            'temporal_awareness': True,
            'conversation_length': len(conversation),
            'cultural_adaptations': self._get_cultural_adaptations(current_figure, all_figures)
        }
    
    def _create_response_prompt(self, 
                              previous_figure: HistoricalFigure,
                              previous_response: str,
                              remaining_figures: List[HistoricalFigure]) -> str:
        """Create prompt for next figure in cross-temporal conversation."""
        return f"""{previous_figure.name} from the {previous_figure.historical_period} 
        shares this perspective: '{previous_response}'. How do you respond to this from 
        your own historical context and experience?"""
    
    def _calculate_cultural_distance(self, figure: HistoricalFigure) -> float:
        """Calculate cultural distance from modern context."""
        # Simplified cultural distance calculation
        current_year = datetime.now().year
        
        # Extract approximate year from historical period
        period_year = self._extract_period_year(figure.historical_period)
        year_distance = current_year - period_year
        
        # Normalize to 0-1 scale
        return min(year_distance / 2000.0, 1.0)
    
    def _assess_anachronism_risk(self, user_input: str, figure: HistoricalFigure) -> float:
        """Assess risk of anachronistic references in user input."""
        modern_terms = ['internet', 'computer', 'smartphone', 'AI', 'digital', 'online']
        
        risk_score = 0.0
        for term in modern_terms:
            if term.lower() in user_input.lower():
                risk_score += 0.2
        
        return min(risk_score, 1.0)
    
    def _calculate_memory_relevance(self, 
                                  memory: Dict[str, Any],
                                  input_encoding: Dict[str, Any],
                                  figure: HistoricalFigure) -> float:
        """Calculate contextual relevance score for memory."""
        base_similarity = self.hdc_ops.cosine_similarity(
            memory.get('vector', np.zeros(self.hdc_dimension)),
            input_encoding['input_vector']
        )
        
        # Boost score based on memory type and importance
        type_boost = {
            'biographical': 0.2,
            'cultural_context': 0.15,
            'figure_response': 0.1,
            'user_input': 0.05
        }.get(memory.get('type', ''), 0.0)
        
        importance_boost = memory.get('importance', 0.5) * 0.1
        
        return base_similarity + type_boost + importance_boost
    
    def _get_cultural_adaptations(self, 
                                current_figure: HistoricalFigure,
                                all_figures: List[HistoricalFigure]) -> Dict[str, Any]:
        """Get cultural adaptations for cross-temporal understanding."""
        return {
            'language_style': current_figure.cultural_context.get('language_style', 'formal'),
            'conceptual_framework': current_figure.cultural_context.get('conceptual_framework', {}),
            'temporal_references': current_figure.historical_period
        }
    
    def _extract_period_year(self, period: str) -> int:
        """Extract approximate year from historical period string."""
        # Simple pattern matching for common period formats
        import re
        
        # Look for 4-digit years
        year_matches = re.findall(r'\b(1\d{3}|20\d{2})\b', period)
        if year_matches:
            return int(year_matches[0])
        
        # Default mappings for common periods
        period_mappings = {
            'ancient': 0,
            'medieval': 1000,
            'renaissance': 1500,
            'enlightenment': 1700,
            'industrial': 1800,
            'modern': 1900,
            'contemporary': 1950
        }
        
        for period_key, year in period_mappings.items():
            if period_key in period.lower():
                return year
        
        return 1800  # Default fallback
    
    async def _get_memory_usage_mb(self) -> float:
        """Get current memory usage in MB."""
        try:
            import psutil
            process = psutil.Process()
            return process.memory_info().rss / 1024 / 1024
        except ImportError:
            return 0.0
    
    async def _save_system_metrics(self) -> None:
        """Save final system metrics."""
        try:
            final_metrics = await self.get_system_metrics()
            metrics_file = Path('system_metrics_final.json')
            
            with open(metrics_file, 'w') as f:
                json.dump(final_metrics, f, indent=2, default=str)
            
            logger.info(f"Saved final metrics to {metrics_file}")
            
        except Exception as e:
            logger.error(f"Failed to save final metrics: {e}")


# Context manager for platform lifecycle
@asynccontextmanager
async def managed_platform(**kwargs):
    """Context manager for platform lifecycle management."""
    platform = HistoricalSimulationPlatform(**kwargs)
    try:
        await platform.initialize()
        yield platform
    finally:
        await platform.shutdown()