"""
Cross-Temporal Dialogue Generation System.

This module implements the core dialogue generation system for authentic
historical conversations with personality conditioning and real-time validation.
"""

import numpy as np
import logging
from typing import Dict, List, Optional, Tuple, Union, Any
from dataclasses import dataclass, field
from enum import Enum
import time
import asyncio
from concurrent.futures import ThreadPoolExecutor

from ..personality.models import PersonalityVector
from ..memory.manager import MemoryManager
from .llm_interface import LLMInterface
from .anachronism_detector import AnachronismDetector
from .cultural_adapter import CulturalAdapter

logger = logging.getLogger(__name__)


class DialogueMode(Enum):
    """Dialogue generation modes."""
    HISTORICAL_ACCURACY = "historical_accuracy"
    EDUCATIONAL = "educational"
    ENTERTAINMENT = "entertainment"
    RESEARCH = "research"


@dataclass
class DialogueContext:
    """Context for dialogue generation."""
    historical_period: str
    participants: List[str]
    topic: str
    location: str
    cultural_context: str
    time_period_start: int
    time_period_end: int
    language_style: str = "period_appropriate"
    formality_level: float = 0.7
    educational_objectives: List[str] = field(default_factory=list)


@dataclass
class GenerationRequest:
    """Request for dialogue generation."""
    speaker_id: str
    context: DialogueContext
    input_message: str
    conversation_history: List[Dict[str, str]] = field(default_factory=list)
    constraints: Dict[str, Any] = field(default_factory=dict)
    target_accuracy: float = 0.95
    max_response_length: int = 500
    temperature: float = 0.7


@dataclass
class GenerationResponse:
    """Response from dialogue generation."""
    generated_text: str
    accuracy_score: float
    cultural_authenticity: float
    anachronism_detected: bool
    confidence_score: float
    generation_time_ms: float
    metadata: Dict[str, Any] = field(default_factory=dict)


class CrossTemporalDialogueGenerator:
    """
    Advanced dialogue generation system for cross-temporal conversations.
    
    Features:
    - LLM integration with personality conditioning
    - Real-time historical accuracy validation (>95% target)
    - Cultural context adaptation
    - Anachronism prevention and correction
    - Multi-participant conversation management
    
    Implements personality-conditioned language generation:
    P(output|input) = P_base(output|input) × P_personality(output|P_HDC)
    """
    
    def __init__(
        self,
        memory_manager: MemoryManager,
        llm_interface: LLMInterface,
        anachronism_detector: AnachronismDetector,
        cultural_adapter: CulturalAdapter,
        target_accuracy: float = 0.95,
        max_workers: int = 4
    ):
        """
        Initialize the dialogue generation system.
        
        Args:
            memory_manager: Memory system for personality storage
            llm_interface: Interface to language models
            anachronism_detector: System for detecting temporal inconsistencies
            cultural_adapter: System for cultural authenticity
            target_accuracy: Target historical accuracy (default 95%)
            max_workers: Maximum number of worker threads
        """
        self.memory_manager = memory_manager
        self.llm_interface = llm_interface
        self.anachronism_detector = anachronism_detector
        self.cultural_adapter = cultural_adapter
        self.target_accuracy = target_accuracy
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        
        # Performance tracking
        self.generation_stats = {
            'total_generations': 0,
            'successful_generations': 0,
            'accuracy_failures': 0,
            'anachronism_detections': 0,
            'average_generation_time': 0.0,
            'average_accuracy_score': 0.0,
            'personality_conditioning_uses': 0,
            'cultural_adaptations': 0
        }
        
        # Personality cache for faster access
        self.personality_cache = {}
        
        # Knowledge boundary cache for historical periods
        self.knowledge_boundaries = {}
        
        logger.info("CrossTemporalDialogueGenerator initialized with >95% accuracy target")
    
    async def generate_response(
        self,
        request: GenerationRequest
    ) -> GenerationResponse:
        """
        Generate a historically accurate dialogue response.
        
        Implements the core generation pipeline with personality conditioning:
        1. Load speaker personality from memory
        2. Apply cultural adaptation
        3. Generate with personality conditioning P(output|input) × P_personality(output|P_HDC)
        4. Validate historical accuracy (>95% target)
        5. Check for anachronisms
        6. Apply corrections if needed
        
        Args:
            request: Generation request with context and constraints
            
        Returns:
            Generated response with accuracy metrics
        """
        start_time = time.time()
        
        try:
            # Load speaker personality from memory
            personality = await self._load_personality(request.speaker_id)
            
            # Apply cultural adaptation
            adapted_context = await self._adapt_cultural_context(
                request.context, personality
            )
            
            # Apply knowledge boundary management
            knowledge_boundary = self._get_knowledge_boundary(
                adapted_context.time_period_start, adapted_context.time_period_end
            )
            
            # Generate initial response using LLM with personality conditioning
            initial_response = await self._generate_with_personality_conditioning(
                request, personality, adapted_context, knowledge_boundary
            )
            
            # Validate historical accuracy
            accuracy_score = await self._validate_historical_accuracy(
                initial_response, adapted_context
            )
            
            # Check for anachronisms
            anachronism_result = await self._check_anachronisms(
                initial_response, adapted_context
            )
            
            # Apply corrections if accuracy is below target or anachronisms detected
            final_response = initial_response
            corrections_applied = False
            
            if accuracy_score < self.target_accuracy or anachronism_result.detected:
                final_response = await self._apply_corrections(
                    initial_response, request, anachronism_result, adapted_context
                )
                corrections_applied = True
                
                # Re-validate after corrections
                accuracy_score = await self._validate_historical_accuracy(
                    final_response, adapted_context
                )
                anachronism_result = await self._check_anachronisms(
                    final_response, adapted_context
                )
            
            # Calculate cultural authenticity
            cultural_score = await self._calculate_cultural_authenticity(
                final_response, adapted_context, personality
            )
            
            # Calculate confidence score
            confidence = self._calculate_confidence(
                accuracy_score, cultural_score, anachronism_result.confidence
            )
            
            generation_time = (time.time() - start_time) * 1000
            
            # Update statistics
            self._update_stats(
                accuracy_score, generation_time, 
                anachronism_result.detected, corrections_applied
            )
            
            return GenerationResponse(
                generated_text=final_response,
                accuracy_score=accuracy_score,
                cultural_authenticity=cultural_score,
                anachronism_detected=anachronism_result.detected,
                confidence_score=confidence,
                generation_time_ms=generation_time,
                metadata={
                    'speaker_id': request.speaker_id,
                    'historical_period': adapted_context.historical_period,
                    'personality_similarity': personality.get_vector_norm(),
                    'corrections_applied': corrections_applied,
                    'validation_passes': 2 if corrections_applied else 1,
                    'knowledge_boundary_applied': True,
                    'cultural_context': adapted_context.cultural_context,
                    'target_accuracy_met': accuracy_score >= self.target_accuracy
                }
            )
            
        except Exception as e:
            logger.error(f"Dialogue generation failed: {e}")
            self.generation_stats['total_generations'] += 1
            raise
    
    async def _load_personality(self, speaker_id: str) -> PersonalityVector:
        """
        Load personality vector for speaker from memory system.
        
        Uses HDC-based personality encoding for authentic character representation.
        """
        if speaker_id in self.personality_cache:
            return self.personality_cache[speaker_id]
        
        try:
            # Retrieve from memory system using HDC operations
            personality_data = await self.memory_manager.retrieve_personality(speaker_id)
            
            if personality_data:
                personality = PersonalityVector.from_dict(personality_data)
            else:
                # Create default personality if not found
                logger.warning(f"No personality found for {speaker_id}, using default")
                personality = PersonalityVector(person_id=speaker_id)
            
            # Cache for future use
            self.personality_cache[speaker_id] = personality
            
            return personality
            
        except Exception as e:
            logger.error(f"Error loading personality for {speaker_id}: {e}")
            # Return default personality as fallback
            return PersonalityVector(person_id=speaker_id)
    
    def _get_knowledge_boundary(self, start_period: int, end_period: int) -> Dict[str, Any]:
        """
        Get knowledge boundary constraints for historical time period.
        
        Manages what knowledge is available to historical figures based on their era.
        """
        period_key = f"{start_period}-{end_period}"
        
        if period_key in self.knowledge_boundaries:
            return self.knowledge_boundaries[period_key]
        
        # Create knowledge boundary based on time period
        boundary = {
            'period_start': start_period,
            'period_end': end_period,
            'available_concepts': self._get_available_concepts(start_period, end_period),
            'unknown_concepts': self._get_unknown_concepts(start_period, end_period),
            'technological_level': self._get_technological_level(start_period, end_period),
            'scientific_knowledge': self._get_scientific_knowledge(start_period, end_period),
            'cultural_awareness': self._get_cultural_awareness(start_period, end_period)
        }
        
        self.knowledge_boundaries[period_key] = boundary
        return boundary
    
    def _get_available_concepts(self, start_period: int, end_period: int) -> List[str]:
        """Get concepts that would be known in the time period."""
        concepts = ['basic_language', 'human_nature', 'mortality', 'family', 'community']
        
        # Add concepts based on time period
        if start_period >= -3000:  # Bronze Age
            concepts.extend(['agriculture', 'writing', 'mathematics', 'astronomy'])
        
        if start_period >= -500:  # Classical Period
            concepts.extend(['philosophy', 'logic', 'geometry', 'politics'])
        
        if start_period >= 1000:  # Medieval Period
            concepts.extend(['theology', 'scholasticism', 'feudalism'])
        
        if start_period >= 1400:  # Renaissance
            concepts.extend(['humanism', 'perspective', 'scientific_method'])
        
        if start_period >= 1650:  # Enlightenment
            concepts.extend(['rationalism', 'natural_rights', 'separation_of_powers'])
        
        if start_period >= 1800:  # Modern Era
            concepts.extend(['nationalism', 'industrialization', 'evolution'])
        
        return concepts
    
    def _get_unknown_concepts(self, start_period: int, end_period: int) -> List[str]:
        """Get concepts that would be unknown in the time period."""
        unknown = []
        
        if end_period < 1400:
            unknown.extend(['printing_press', 'perspective_art', 'heliocentrism'])
        
        if end_period < 1650:
            unknown.extend(['scientific_revolution', 'telescope', 'microscope'])
        
        if end_period < 1800:
            unknown.extend(['steam_engine', 'electricity', 'vaccination'])
        
        if end_period < 1900:
            unknown.extend(['germ_theory', 'evolution', 'psychology'])
        
        if end_period < 1950:
            unknown.extend(['quantum_mechanics', 'relativity', 'dna_structure'])
        
        return unknown
    
    def _get_technological_level(self, start_period: int, end_period: int) -> Dict[str, float]:
        """Get technological sophistication level (0-1) for different domains."""
        levels = {}
        
        # Transportation
        if end_period < 1800:
            levels['transportation'] = 0.2  # Horse, sailing
        elif end_period < 1900:
            levels['transportation'] = 0.5  # Steam, railway
        else:
            levels['transportation'] = 0.8  # Automobile, aviation
        
        # Communication
        if end_period < 1800:
            levels['communication'] = 0.1  # Letter, messenger
        elif end_period < 1900:
            levels['communication'] = 0.4  # Telegraph
        else:
            levels['communication'] = 0.7  # Telephone, radio
        
        # Medicine
        if end_period < 1800:
            levels['medicine'] = 0.2  # Herbal, basic surgery
        elif end_period < 1900:
            levels['medicine'] = 0.4  # Antiseptics, anesthesia
        else:
            levels['medicine'] = 0.6  # Antibiotics, vaccines
        
        return levels
    
    def _get_scientific_knowledge(self, start_period: int, end_period: int) -> Dict[str, float]:
        """Get scientific knowledge level for different fields."""
        knowledge = {}
        
        # Physics
        if end_period < 1600:
            knowledge['physics'] = 0.2  # Aristotelian
        elif end_period < 1700:
            knowledge['physics'] = 0.5  # Newtonian
        elif end_period < 1900:
            knowledge['physics'] = 0.7  # Classical mechanics
        else:
            knowledge['physics'] = 0.9  # Modern physics
        
        # Biology
        if end_period < 1800:
            knowledge['biology'] = 0.2  # Basic observation
        elif end_period < 1900:
            knowledge['biology'] = 0.5  # Classification, evolution
        else:
            knowledge['biology'] = 0.8  # Genetics, molecular
        
        # Chemistry
        if end_period < 1700:
            knowledge['chemistry'] = 0.1  # Alchemy
        elif end_period < 1800:
            knowledge['chemistry'] = 0.4  # Phlogiston theory
        elif end_period < 1900:
            knowledge['chemistry'] = 0.7  # Atomic theory
        else:
            knowledge['chemistry'] = 0.9  # Modern chemistry
        
        return knowledge
    
    def _get_cultural_awareness(self, start_period: int, end_period: int) -> Dict[str, float]:
        """Get cultural and social awareness level."""
        awareness = {}
        
        # Global awareness
        if end_period < 1500:
            awareness['global_cultures'] = 0.2  # Local/regional
        elif end_period < 1800:
            awareness['global_cultures'] = 0.5  # Colonial contact
        else:
            awareness['global_cultures'] = 0.8  # Global communication
        
        # Social concepts
        if end_period < 1700:
            awareness['social_equality'] = 0.2  # Hierarchical society
        elif end_period < 1800:
            awareness['social_equality'] = 0.5  # Enlightenment ideas
        else:
            awareness['social_equality'] = 0.8  # Democratic ideals
        
        return awareness
    
    async def _adapt_cultural_context(
        self, 
        context: DialogueContext, 
        personality: PersonalityVector
    ) -> DialogueContext:
        """Adapt dialogue context for cultural authenticity."""
        adapted_context = await self.cultural_adapter.adapt_context(context, personality)
        self.generation_stats['cultural_adaptations'] += 1
        return adapted_context
    
    async def _generate_with_personality_conditioning(
        self,
        request: GenerationRequest,
        personality: PersonalityVector,
        adapted_context: DialogueContext,
        knowledge_boundary: Dict[str, Any]
    ) -> str:
        """
        Generate response using LLM with personality conditioning.
        
        Implements: P(output|input) = P_base(output|input) × P_personality(output|P_HDC)
        
        The personality conditioning integrates:
        1. HDC personality vector representation
        2. Historical knowledge boundaries
        3. Cultural context adaptation
        4. Time-period appropriate language patterns
        """
        # Create comprehensive personality conditioning
        personality_conditioning = self._create_personality_conditioning(
            personality, adapted_context, knowledge_boundary
        )
        
        # Generate response using LLM interface with conditioning
        response = await self.llm_interface.generate_response(
            prompt=request.input_message,
            context=adapted_context.__dict__,
            personality_conditioning=personality_conditioning,
            conversation_history=request.conversation_history,
            temperature=request.temperature,
            max_length=request.max_response_length
        )
        
        self.generation_stats['personality_conditioning_uses'] += 1
        
        return response
    
    def _create_personality_conditioning(
        self,
        personality: PersonalityVector,
        context: DialogueContext,
        knowledge_boundary: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        Create comprehensive personality conditioning parameters for LLM.
        
        Integrates HDC personality representation with historical constraints.
        """
        conditioning = {
            # Core personality traits from HDC encoding
            'big_five_traits': personality.big_five_traits.to_dict(),
            'cultural_dimensions': personality.cultural_dimensions.to_dict(),
            'cognitive_traits': personality.cognitive_traits.to_dict() if hasattr(personality, 'cognitive_traits') else {},
            
            # Historical context
            'historical_period': context.historical_period,
            'time_period_start': context.time_period_start,
            'time_period_end': context.time_period_end,
            'location': context.location,
            'cultural_context': context.cultural_context,
            
            # Language and communication style
            'formality_level': context.formality_level,
            'language_style': context.language_style,
            
            # Knowledge boundaries
            'available_concepts': knowledge_boundary.get('available_concepts', []),
            'unknown_concepts': knowledge_boundary.get('unknown_concepts', []),
            'technological_level': knowledge_boundary.get('technological_level', {}),
            'scientific_knowledge': knowledge_boundary.get('scientific_knowledge', {}),
            'cultural_awareness': knowledge_boundary.get('cultural_awareness', {}),
            
            # Confidence and authenticity thresholds
            'confidence_threshold': personality.confidence_score.overall_confidence if hasattr(personality, 'confidence_score') else 0.8,
            'authenticity_target': 0.9,
            'accuracy_target': self.target_accuracy
        }
        
        return conditioning
    
    async def _validate_historical_accuracy(
        self,
        response: str,
        context: DialogueContext
    ) -> float:
        """
        Validate historical accuracy of generated response.
        
        Uses multiple validation methods to achieve >95% accuracy target:
        1. Factual accuracy validation
        2. Linguistic period appropriateness
        3. Social/cultural context accuracy
        4. Knowledge boundary compliance
        """
        # Run multiple validation methods in parallel
        validations = await asyncio.gather(
            self._validate_factual_accuracy(response, context),
            self._validate_linguistic_accuracy(response, context),
            self._validate_social_context_accuracy(response, context),
            self._validate_knowledge_boundary_compliance(response, context)
        )
        
        # Weighted combination of validation scores for >95% target
        weights = [0.3, 0.25, 0.25, 0.2]  # Factual gets highest weight
        accuracy_score = sum(w * v for w, v in zip(weights, validations))
        
        # Ensure score doesn't exceed 1.0
        return min(accuracy_score, 1.0)
    
    async def _validate_factual_accuracy(
        self,
        response: str,
        context: DialogueContext
    ) -> float:
        """Validate factual accuracy against historical knowledge."""
        # Check for anachronistic facts
        anachronism_result = await self.anachronism_detector.detect_anachronisms(
            response, context
        )
        
        # Base score reduced by factual errors
        base_score = 0.95
        
        if anachronism_result.detected:
            # Penalty based on severity of anachronisms
            penalty = sum(match.severity for match in anachronism_result.matches) / 10.0
            base_score -= penalty
        
        return max(0.0, base_score)
    
    async def _validate_linguistic_accuracy(
        self,
        response: str,
        context: DialogueContext
    ) -> float:
        """Validate linguistic appropriateness for time period."""
        # Check language patterns, vocabulary, grammar for historical period
        base_score = 0.9
        
        # Simple validation - check for obviously modern language
        modern_terms = ['okay', 'cool', 'awesome', 'totally', 'like', 'basically']
        modern_count = sum(1 for term in modern_terms if term.lower() in response.lower())
        
        linguistic_penalty = modern_count * 0.1
        
        return max(0.0, base_score - linguistic_penalty)
    
    async def _validate_social_context_accuracy(
        self,
        response: str,
        context: DialogueContext
    ) -> float:
        """Validate social and cultural context appropriateness."""
        # Use cultural adapter for authenticity assessment
        cultural_score = await self.cultural_adapter.calculate_authenticity(
            response, context, PersonalityVector(person_id="temp")
        )
        
        return cultural_score
    
    async def _validate_knowledge_boundary_compliance(
        self,
        response: str,
        context: DialogueContext
    ) -> float:
        """Validate compliance with historical knowledge boundaries."""
        knowledge_boundary = self._get_knowledge_boundary(
            context.time_period_start, context.time_period_end
        )
        
        base_score = 0.9
        
        # Check for mentions of unknown concepts
        unknown_concepts = knowledge_boundary.get('unknown_concepts', [])
        violations = sum(
            1 for concept in unknown_concepts 
            if concept.lower() in response.lower()
        )
        
        boundary_penalty = violations * 0.15
        
        return max(0.0, base_score - boundary_penalty)
    
    async def _check_anachronisms(
        self,
        response: str,
        context: DialogueContext
    ):
        """Check for temporal inconsistencies."""
        return await self.anachronism_detector.detect_anachronisms(
            response, context
        )
    
    async def _apply_corrections(
        self,
        response: str,
        request: GenerationRequest,
        anachronism_result,
        context: DialogueContext
    ) -> str:
        """
        Apply corrections to improve accuracy and remove anachronisms.
        
        Multi-step correction process:
        1. Apply anachronism corrections
        2. Apply cultural corrections
        3. Apply knowledge boundary corrections
        4. Regenerate if necessary
        """
        corrections = []
        
        # Collect anachronism corrections
        if anachronism_result.detected:
            corrections.extend(anachronism_result.corrections)
        
        # Apply cultural corrections
        cultural_corrections = await self.cultural_adapter.suggest_corrections(
            response, context
        )
        corrections.extend(cultural_corrections)
        
        # Apply knowledge boundary corrections
        knowledge_corrections = self._suggest_knowledge_boundary_corrections(
            response, context
        )
        corrections.extend(knowledge_corrections)
        
        # Apply correction list
        corrected_response = await self._apply_correction_list(
            response, corrections, request
        )
        
        return corrected_response
    
    def _suggest_knowledge_boundary_corrections(
        self,
        response: str,
        context: DialogueContext
    ) -> List[Dict[str, str]]:
        """Suggest corrections for knowledge boundary violations."""
        corrections = []
        
        knowledge_boundary = self._get_knowledge_boundary(
            context.time_period_start, context.time_period_end
        )
        
        unknown_concepts = knowledge_boundary.get('unknown_concepts', [])
        
        # Simple replacement suggestions for common violations
        replacements = {
            'computer': 'calculating device',
            'internet': 'communication network',
            'telephone': 'speaking device',
            'automobile': 'horseless carriage',
            'television': 'moving picture box',
            'evolution': 'natural development',
            'bacteria': 'unseen agents of disease',
            'atom': 'indivisible particle'
        }
        
        for concept in unknown_concepts:
            if concept.lower() in response.lower() and concept in replacements:
                corrections.append({
                    'type': 'replacement',
                    'original': concept,
                    'replacement': replacements[concept],
                    'explanation': f"'{concept}' unknown in {context.historical_period}",
                    'confidence': 0.9
                })
        
        return corrections
    
    async def _apply_correction_list(
        self,
        response: str,
        corrections: List[Dict[str, str]],
        request: GenerationRequest
    ) -> str:
        """Apply a list of corrections to the response."""
        corrected = response
        
        for correction in corrections:
            if correction['type'] == 'replacement':
                corrected = corrected.replace(
                    correction['original'], 
                    correction['replacement']
                )
            elif correction['type'] == 'regeneration':
                # Regenerate specific parts with additional constraints
                corrected = await self._regenerate_with_constraints(
                    corrected, correction.get('constraints', {}), request
                )
        
        return corrected
    
    async def _regenerate_with_constraints(
        self,
        partial_response: str,
        constraints: Dict[str, Any],
        request: GenerationRequest
    ) -> str:
        """Regenerate parts of response with additional constraints."""
        # Add constraints to the original request
        enhanced_request = GenerationRequest(
            speaker_id=request.speaker_id,
            context=request.context,
            input_message=request.input_message,
            conversation_history=request.conversation_history,
            constraints={**request.constraints, **constraints},
            target_accuracy=request.target_accuracy,
            max_response_length=request.max_response_length,
            temperature=max(0.5, request.temperature - 0.2)  # Lower temperature for corrections
        )
        
        # Regenerate with constraints
        regenerated = await self._generate_with_personality_conditioning(
            enhanced_request,
            await self._load_personality(request.speaker_id),
            request.context,
            self._get_knowledge_boundary(
                request.context.time_period_start, 
                request.context.time_period_end
            )
        )
        
        return regenerated
    
    async def _calculate_cultural_authenticity(
        self,
        response: str,
        context: DialogueContext,
        personality: PersonalityVector
    ) -> float:
        """Calculate cultural authenticity score."""
        return await self.cultural_adapter.calculate_authenticity(
            response, context, personality
        )
    
    def _calculate_confidence(
        self,
        accuracy_score: float,
        cultural_score: float,
        anachronism_confidence: float
    ) -> float:
        """Calculate overall confidence score."""
        weights = [0.4, 0.3, 0.3]
        scores = [accuracy_score, cultural_score, anachronism_confidence]
        
        confidence = sum(w * s for w, s in zip(weights, scores))
        return min(confidence, 1.0)
    
    def _update_stats(
        self,
        accuracy_score: float,
        generation_time: float,
        anachronism_detected: bool,
        corrections_applied: bool
    ):
        """Update generation statistics."""
        self.generation_stats['total_generations'] += 1
        
        if accuracy_score >= self.target_accuracy:
            self.generation_stats['successful_generations'] += 1
        else:
            self.generation_stats['accuracy_failures'] += 1
        
        if anachronism_detected:
            self.generation_stats['anachronism_detections'] += 1
        
        # Update running averages
        total = self.generation_stats['total_generations']
        self.generation_stats['average_generation_time'] = (
            (self.generation_stats['average_generation_time'] * (total - 1) + 
             generation_time) / total
        )
        
        self.generation_stats['average_accuracy_score'] = (
            (self.generation_stats['average_accuracy_score'] * (total - 1) + 
             accuracy_score) / total
        )
    
    def get_performance_metrics(self) -> Dict[str, Any]:
        """Get current performance metrics."""
        total = self.generation_stats['total_generations']
        
        if total == 0:
            return self.generation_stats.copy()
        
        metrics = self.generation_stats.copy()
        metrics.update({
            'success_rate': self.generation_stats['successful_generations'] / total,
            'accuracy_failure_rate': self.generation_stats['accuracy_failures'] / total,
            'anachronism_rate': self.generation_stats['anachronism_detections'] / total,
            'target_accuracy_met': (
                self.generation_stats['average_accuracy_score'] >= self.target_accuracy
            ),
            'personality_conditioning_rate': self.generation_stats['personality_conditioning_uses'] / total,
            'cultural_adaptation_rate': self.generation_stats['cultural_adaptations'] / total
        })
        
        return metrics
    
    def reset_stats(self):
        """Reset performance statistics."""
        self.generation_stats = {
            'total_generations': 0,
            'successful_generations': 0,
            'accuracy_failures': 0,
            'anachronism_detections': 0,
            'average_generation_time': 0.0,
            'average_accuracy_score': 0.0,
            'personality_conditioning_uses': 0,
            'cultural_adaptations': 0
        }
        logger.info("Generation statistics reset")
    
    async def batch_generate(
        self,
        requests: List[GenerationRequest]
    ) -> List[GenerationResponse]:
        """Generate responses for multiple requests in parallel."""
        tasks = [self.generate_response(request) for request in requests]
        responses = await asyncio.gather(*tasks, return_exceptions=True)
        
        # Handle any exceptions
        results = []
        for i, response in enumerate(responses):
            if isinstance(response, Exception):
                logger.error(f"Batch generation failed for request {i}: {response}")
                # Create error response
                results.append(GenerationResponse(
                    generated_text="[Generation Error]",
                    accuracy_score=0.0,
                    cultural_authenticity=0.0,
                    anachronism_detected=True,
                    confidence_score=0.0,
                    generation_time_ms=0.0,
                    metadata={'error': str(response)}
                ))
            else:
                results.append(response)
        
        return results
    
    def shutdown(self):
        """Shutdown the dialogue generator."""
        self.executor.shutdown(wait=True)
        logger.info("CrossTemporalDialogueGenerator shutdown complete")