"""
Comprehensive Validator

This module provides multi-dimensional evaluation matrix with weighted scoring,
integrating Historical Accuracy (40%), Personality Consistency (30%), Technical 
Reliability (20%), Safety (10%), real-time validation pipeline integration,
and comprehensive audit logging and monitoring.

Key Features:
- Multi-dimensional evaluation matrix with weighted scoring
- Historical Accuracy (40%), Personality Consistency (30%), Technical Reliability (20%), Safety (10%)
- Real-time validation pipeline integration
- Comprehensive audit logging and monitoring
- Automated quality assurance workflows
- Performance benchmarking and optimization
"""

import numpy as np
from typing import Dict, List, Tuple, Optional, Any, Callable, Union
from dataclasses import dataclass, field
from enum import Enum
import logging
from datetime import datetime, timedelta
from collections import defaultdict, deque
import json
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
import threading
import uuid

from .historical_accuracy_validator import HistoricalAccuracyValidator, ValidationResult
from .personality_consistency_tester import PersonalityConsistencyTester, ConsistencyResult
from .persona_vector_safety_monitor import PersonaVectorSafetyMonitor, SafetyViolation
from .bias_detection_system import BiasDetectionSystem, BiasDetectionResult

logger = logging.getLogger(__name__)


class ValidationDimension(Enum):
    """Dimensions of comprehensive validation."""
    HISTORICAL_ACCURACY = "historical_accuracy"
    PERSONALITY_CONSISTENCY = "personality_consistency" 
    TECHNICAL_RELIABILITY = "technical_reliability"
    SAFETY = "safety"
    BIAS_DETECTION = "bias_detection"


class ValidationStatus(Enum):
    """Status of validation process."""
    PENDING = "pending"
    IN_PROGRESS = "in_progress"
    COMPLETED = "completed"
    FAILED = "failed"
    TIMEOUT = "timeout"


class QualityLevel(Enum):
    """Quality levels for validation results."""
    EXCELLENT = "excellent"  # 90-100%
    GOOD = "good"           # 80-89%
    ACCEPTABLE = "acceptable"  # 70-79%
    POOR = "poor"           # 60-69%
    UNACCEPTABLE = "unacceptable"  # <60%


@dataclass
class ValidationWeights:
    """Validation dimension weights for scoring."""
    historical_accuracy: float = 0.40
    personality_consistency: float = 0.30
    technical_reliability: float = 0.20
    safety: float = 0.10
    bias_detection: float = 0.0  # Additional weight if needed
    
    def __post_init__(self):
        """Validate weights sum to 1.0."""
        total = (self.historical_accuracy + self.personality_consistency + 
                self.technical_reliability + self.safety + self.bias_detection)
        
        if not (0.95 <= total <= 1.05):  # Allow small floating point error
            logger.warning("Validation weights sum to %.3f instead of 1.0", total)
    
    def normalize(self):
        """Normalize weights to sum to 1.0."""
        total = (self.historical_accuracy + self.personality_consistency + 
                self.technical_reliability + self.safety + self.bias_detection)
        
        if total > 0:
            self.historical_accuracy /= total
            self.personality_consistency /= total
            self.technical_reliability /= total
            self.safety /= total
            self.bias_detection /= total


@dataclass
class DimensionScore:
    """Score for individual validation dimension."""
    dimension: ValidationDimension
    score: float  # 0.0 to 1.0
    confidence: float  # 0.0 to 1.0
    weight: float
    details: Dict[str, Any]
    validation_time: float
    errors: List[str] = field(default_factory=list)
    warnings: List[str] = field(default_factory=list)
    
    @property
    def weighted_score(self) -> float:
        """Calculate weighted score."""
        return self.score * self.weight
    
    def quality_level(self) -> QualityLevel:
        """Determine quality level based on score."""
        if self.score >= 0.9:
            return QualityLevel.EXCELLENT
        elif self.score >= 0.8:
            return QualityLevel.GOOD
        elif self.score >= 0.7:
            return QualityLevel.ACCEPTABLE
        elif self.score >= 0.6:
            return QualityLevel.POOR
        else:
            return QualityLevel.UNACCEPTABLE


@dataclass
class ComprehensiveValidationResult:
    """Result of comprehensive multi-dimensional validation."""
    validation_id: str
    timestamp: datetime
    overall_score: float  # 0.0 to 1.0
    overall_confidence: float  # 0.0 to 1.0
    quality_level: QualityLevel
    
    # Dimension scores
    dimension_scores: Dict[ValidationDimension, DimensionScore]
    
    # Validation metadata
    figure_id: str
    content: str
    context: Dict[str, Any]
    validation_time: float
    status: ValidationStatus
    
    # Issues and recommendations
    critical_issues: List[str]
    warnings: List[str]
    recommendations: List[str]
    
    # Detailed results from each validator
    historical_accuracy_result: Optional[ValidationResult] = None
    personality_consistency_result: Optional[ConsistencyResult] = None
    safety_violations: List[SafetyViolation] = field(default_factory=list)
    bias_detection_results: List[BiasDetectionResult] = field(default_factory=list)
    
    # Technical metrics
    performance_metrics: Dict[str, Any] = field(default_factory=dict)
    
    def passes_quality_threshold(self, threshold: float = 0.8) -> bool:
        """Check if validation passes quality threshold."""
        return self.overall_score >= threshold
    
    def has_critical_issues(self) -> bool:
        """Check if validation has critical issues."""
        return len(self.critical_issues) > 0
    
    def requires_manual_review(self) -> bool:
        """Check if validation requires manual review."""
        return (not self.passes_quality_threshold() or 
                self.has_critical_issues() or 
                self.overall_confidence < 0.7)


@dataclass
class ValidationPipeline:
    """Configuration for validation pipeline."""
    enabled_dimensions: List[ValidationDimension]
    weights: ValidationWeights
    quality_threshold: float = 0.8
    timeout_seconds: float = 300.0  # 5 minutes
    parallel_execution: bool = True
    real_time_monitoring: bool = True
    audit_logging: bool = True


@dataclass
class AuditLogEntry:
    """Entry in validation audit log."""
    timestamp: datetime
    validation_id: str
    event_type: str
    figure_id: str
    dimension: Optional[ValidationDimension]
    details: Dict[str, Any]
    user_id: Optional[str] = None
    session_id: Optional[str] = None


class ComprehensiveValidator:
    """
    Comprehensive validator integrating all validation dimensions.
    
    This system provides multi-dimensional evaluation with weighted scoring,
    real-time validation pipeline integration, and comprehensive audit logging.
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        Initialize comprehensive validator.
        
        Args:
            config: Configuration dictionary with validation parameters
        """
        self.config = config or {}
        
        # Initialize validation components
        self.historical_validator = HistoricalAccuracyValidator(
            self.config.get('historical_accuracy', {})
        )
        self.consistency_tester = PersonalityConsistencyTester(
            self.config.get('personality_consistency', {})
        )
        self.safety_monitor = PersonaVectorSafetyMonitor(
            self.config.get('safety_monitor', {})
        )
        self.bias_detector = BiasDetectionSystem(
            self.config.get('bias_detection', {})
        )
        
        # Validation configuration
        self.default_weights = ValidationWeights()
        if 'weights' in self.config:
            weight_config = self.config['weights']
            self.default_weights = ValidationWeights(
                historical_accuracy=weight_config.get('historical_accuracy', 0.40),
                personality_consistency=weight_config.get('personality_consistency', 0.30),
                technical_reliability=weight_config.get('technical_reliability', 0.20),
                safety=weight_config.get('safety', 0.10),
                bias_detection=weight_config.get('bias_detection', 0.0)
            )
        
        self.quality_threshold = self.config.get('quality_threshold', 0.8)
        self.validation_timeout = self.config.get('validation_timeout', 300.0)
        self.max_workers = self.config.get('max_workers', 4)
        
        # Validation state
        self.active_validations: Dict[str, ComprehensiveValidationResult] = {}
        self.validation_history = deque(maxlen=10000)
        self.audit_log = deque(maxlen=50000)
        
        # Performance tracking
        self.performance_stats = {
            'total_validations': 0,
            'successful_validations': 0,
            'failed_validations': 0,
            'average_validation_time': 0.0,
            'dimension_performance': defaultdict(lambda: {'count': 0, 'avg_time': 0.0, 'avg_score': 0.0})
        }
        
        # Thread pool for parallel execution
        self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
        
        # Real-time monitoring
        self.monitoring_enabled = self.config.get('real_time_monitoring', True)
        self.monitoring_thread = None
        self.monitoring_queue = deque()
        self.monitoring_lock = threading.Lock()
        
        logger.info("ComprehensiveValidator initialized with %d validation dimensions",
                   len(ValidationDimension))
        
        if self.monitoring_enabled:
            self._start_monitoring()
    
    def _start_monitoring(self):
        """Start real-time validation monitoring."""
        if self.monitoring_thread and self.monitoring_thread.is_alive():
            return
        
        self.monitoring_thread = threading.Thread(
            target=self._monitoring_loop, daemon=True
        )
        self.monitoring_thread.start()
        logger.info("Started real-time validation monitoring")
    
    def _monitoring_loop(self):
        """Real-time monitoring loop."""
        while self.monitoring_enabled:
            try:
                with self.monitoring_lock:
                    if self.monitoring_queue:
                        validation_request = self.monitoring_queue.popleft()
                    else:
                        validation_request = None
                
                if validation_request:
                    # Process validation request asynchronously
                    future = self.executor.submit(
                        self._execute_validation_request,
                        validation_request
                    )
                    
                    # Don't wait for completion in monitoring loop
                
                time.sleep(0.1)  # Short sleep to prevent busy waiting
                
            except Exception as e:
                logger.error("Error in validation monitoring loop: %s", e)
                time.sleep(1.0)
    
    def validate_comprehensive(self,
                             content: str,
                             figure_id: str,
                             persona_vector: Optional[np.ndarray] = None,
                             context: Optional[Dict[str, Any]] = None,
                             weights: Optional[ValidationWeights] = None,
                             dimensions: Optional[List[ValidationDimension]] = None,
                             real_time: bool = False) -> ComprehensiveValidationResult:
        """
        Perform comprehensive multi-dimensional validation.
        
        Args:
            content: Content to validate
            figure_id: Historical figure identifier
            persona_vector: Optional persona vector for safety monitoring
            context: Additional context information
            weights: Custom validation weights
            dimensions: Specific dimensions to validate
            real_time: Whether to use real-time monitoring
            
        Returns:
            ComprehensiveValidationResult with detailed validation analysis
        """
        validation_id = str(uuid.uuid4())
        start_time = time.time()
        
        # Use default values if not provided
        weights = weights or self.default_weights
        dimensions = dimensions or list(ValidationDimension)
        context = context or {}
        
        logger.info("Starting comprehensive validation %s for figure %s",
                   validation_id, figure_id)
        
        # Create validation request
        validation_request = {
            'validation_id': validation_id,
            'content': content,
            'figure_id': figure_id,
            'persona_vector': persona_vector,
            'context': context,
            'weights': weights,
            'dimensions': dimensions,
            'timestamp': datetime.now()
        }
        
        # Log validation start
        self._log_audit_event(
            validation_id, 'validation_started', figure_id,
            details={'dimensions': [d.value for d in dimensions]}
        )
        
        if real_time and self.monitoring_enabled:
            # Queue for real-time processing
            with self.monitoring_lock:
                self.monitoring_queue.append(validation_request)
            
            # Return placeholder result - real result will be available later
            return self._create_pending_result(validation_id, figure_id, content, context)
        else:
            # Synchronous processing
            return self._execute_validation_request(validation_request)
    
    def _execute_validation_request(self, request: Dict[str, Any]) -> ComprehensiveValidationResult:
        """Execute validation request."""
        validation_id = request['validation_id']
        content = request['content']
        figure_id = request['figure_id']
        persona_vector = request['persona_vector']
        context = request['context']
        weights = request['weights']
        dimensions = request['dimensions']
        start_time = time.time()
        
        try:
            # Initialize result
            result = ComprehensiveValidationResult(
                validation_id=validation_id,
                timestamp=datetime.now(),
                overall_score=0.0,
                overall_confidence=0.0,
                quality_level=QualityLevel.UNACCEPTABLE,
                dimension_scores={},
                figure_id=figure_id,
                content=content,
                context=context,
                validation_time=0.0,
                status=ValidationStatus.IN_PROGRESS,
                critical_issues=[],
                warnings=[],
                recommendations=[]
            )
            
            self.active_validations[validation_id] = result
            
            # Execute validation dimensions in parallel
            if self.config.get('parallel_execution', True):
                dimension_results = self._validate_dimensions_parallel(
                    dimensions, content, figure_id, persona_vector, context, weights
                )
            else:
                dimension_results = self._validate_dimensions_sequential(
                    dimensions, content, figure_id, persona_vector, context, weights
                )
            
            # Process dimension results
            result.dimension_scores = dimension_results
            
            # Calculate overall scores
            self._calculate_overall_scores(result, weights)
            
            # Analyze results and generate recommendations
            self._analyze_validation_results(result)
            
            # Update performance metrics
            result.validation_time = time.time() - start_time
            result.status = ValidationStatus.COMPLETED
            
            # Log completion
            self._log_audit_event(
                validation_id, 'validation_completed', figure_id,
                details={
                    'overall_score': result.overall_score,
                    'quality_level': result.quality_level.value,
                    'validation_time': result.validation_time
                }
            )
            
            # Update statistics
            self._update_performance_stats(result)
            
            # Store in history
            self.validation_history.append(result)
            
            logger.info("Comprehensive validation %s completed: score=%.3f, quality=%s, time=%.2fs",
                       validation_id, result.overall_score, result.quality_level.value, result.validation_time)
            
            return result
            
        except Exception as e:
            logger.error("Validation %s failed: %s", validation_id, e)
            
            # Create failure result
            result = self._create_failure_result(validation_id, figure_id, content, context, str(e))
            result.validation_time = time.time() - start_time
            
            self._log_audit_event(
                validation_id, 'validation_failed', figure_id,
                details={'error': str(e)}
            )
            
            return result
        
        finally:
            # Clean up active validation
            if validation_id in self.active_validations:
                del self.active_validations[validation_id]
    
    def _validate_dimensions_parallel(self,
                                    dimensions: List[ValidationDimension],
                                    content: str,
                                    figure_id: str,
                                    persona_vector: Optional[np.ndarray],
                                    context: Dict[str, Any],
                                    weights: ValidationWeights) -> Dict[ValidationDimension, DimensionScore]:
        """Validate dimensions in parallel."""
        futures = {}
        
        # Submit validation tasks
        for dimension in dimensions:
            future = self.executor.submit(
                self._validate_single_dimension,
                dimension, content, figure_id, persona_vector, context, weights
            )
            futures[future] = dimension
        
        # Collect results
        results = {}
        for future in as_completed(futures, timeout=self.validation_timeout):
            try:
                dimension = futures[future]
                dimension_score = future.result()
                results[dimension] = dimension_score
            except Exception as e:
                dimension = futures[future]
                logger.error("Error validating %s: %s", dimension.value, e)
                results[dimension] = self._create_error_dimension_score(dimension, str(e))
        
        return results
    
    def _validate_dimensions_sequential(self,
                                      dimensions: List[ValidationDimension],
                                      content: str,
                                      figure_id: str,
                                      persona_vector: Optional[np.ndarray],
                                      context: Dict[str, Any],
                                      weights: ValidationWeights) -> Dict[ValidationDimension, DimensionScore]:
        """Validate dimensions sequentially."""
        results = {}
        
        for dimension in dimensions:
            try:
                dimension_score = self._validate_single_dimension(
                    dimension, content, figure_id, persona_vector, context, weights
                )
                results[dimension] = dimension_score
            except Exception as e:
                logger.error("Error validating %s: %s", dimension.value, e)
                results[dimension] = self._create_error_dimension_score(dimension, str(e))
        
        return results
    
    def _validate_single_dimension(self,
                                 dimension: ValidationDimension,
                                 content: str,
                                 figure_id: str,
                                 persona_vector: Optional[np.ndarray],
                                 context: Dict[str, Any],
                                 weights: ValidationWeights) -> DimensionScore:
        """Validate single dimension."""
        start_time = time.time()
        
        try:
            if dimension == ValidationDimension.HISTORICAL_ACCURACY:
                return self._validate_historical_accuracy(
                    content, figure_id, context, weights.historical_accuracy
                )
            
            elif dimension == ValidationDimension.PERSONALITY_CONSISTENCY:
                return self._validate_personality_consistency(
                    content, figure_id, persona_vector, context, weights.personality_consistency
                )
            
            elif dimension == ValidationDimension.TECHNICAL_RELIABILITY:
                return self._validate_technical_reliability(
                    content, figure_id, persona_vector, context, weights.technical_reliability
                )
            
            elif dimension == ValidationDimension.SAFETY:
                return self._validate_safety(
                    content, figure_id, persona_vector, context, weights.safety
                )
            
            elif dimension == ValidationDimension.BIAS_DETECTION:
                return self._validate_bias_detection(
                    content, figure_id, context, weights.bias_detection
                )
            
            else:
                raise ValueError(f"Unknown validation dimension: {dimension}")
                
        except Exception as e:
            logger.error("Error in %s validation: %s", dimension.value, e)
            return self._create_error_dimension_score(dimension, str(e))
    
    def _validate_historical_accuracy(self, content: str, figure_id: str,
                                    context: Dict[str, Any], weight: float) -> DimensionScore:
        """Validate historical accuracy dimension."""
        start_time = time.time()
        
        # Extract historical period from context
        historical_period = context.get('historical_period', 'Unknown Period')
        
        # Perform historical accuracy validation
        result = self.historical_validator.validate_historical_accuracy(
            content, figure_id, historical_period
        )
        
        validation_time = time.time() - start_time
        
        # Convert to dimension score
        dimension_score = DimensionScore(
            dimension=ValidationDimension.HISTORICAL_ACCURACY,
            score=result.accuracy_score,
            confidence=result.confidence,
            weight=weight,
            details={
                'validation_type': result.validation_type.value,
                'expert_area': result.expert_area.value,
                'sources_consulted': result.sources_consulted,
                'inconsistencies': result.inconsistencies,
                'recommendations': result.recommendations
            },
            validation_time=validation_time
        )
        
        # Store detailed result
        self.active_validations[self._get_current_validation_id()].historical_accuracy_result = result
        
        return dimension_score
    
    def _validate_personality_consistency(self, content: str, figure_id: str,
                                        persona_vector: Optional[np.ndarray],
                                        context: Dict[str, Any], weight: float) -> DimensionScore:
        """Validate personality consistency dimension."""
        start_time = time.time()
        
        if persona_vector is None:
            logger.warning("No persona vector provided for personality consistency validation")
            return DimensionScore(
                dimension=ValidationDimension.PERSONALITY_CONSISTENCY,
                score=0.5,  # Neutral score
                confidence=0.3,  # Low confidence
                weight=weight,
                details={'error': 'No persona vector provided'},
                validation_time=0.01,
                warnings=['No persona vector provided for consistency testing']
            )
        
        # Test personality consistency
        result = self.consistency_tester.test_consistency(figure_id)
        
        validation_time = time.time() - start_time
        
        dimension_score = DimensionScore(
            dimension=ValidationDimension.PERSONALITY_CONSISTENCY,
            score=result.overall_consistency,
            confidence=result.confidence_level,
            weight=weight,
            details={
                'temporal_continuity': result.temporal_continuity,
                'cross_situational_consistency': result.cross_situational_consistency,
                'trait_consistency_scores': {k.value: v for k, v in result.trait_consistency_scores.items()},
                'meets_threshold': result.meets_threshold,
                'recommendations': result.recommendations
            },
            validation_time=validation_time
        )
        
        if not result.meets_threshold:
            dimension_score.warnings.append(f"Consistency below threshold: {result.overall_consistency:.3f}")
        
        # Store detailed result
        validation_id = self._get_current_validation_id()
        if validation_id in self.active_validations:
            self.active_validations[validation_id].personality_consistency_result = result
        
        return dimension_score
    
    def _validate_technical_reliability(self, content: str, figure_id: str,
                                      persona_vector: Optional[np.ndarray],
                                      context: Dict[str, Any], weight: float) -> DimensionScore:
        """Validate technical reliability dimension."""
        start_time = time.time()
        
        # Technical reliability metrics
        reliability_scores = []
        details = {}
        
        # Content quality metrics
        content_quality = self._assess_content_quality(content)
        reliability_scores.append(content_quality)
        details['content_quality'] = content_quality
        
        # Vector quality metrics (if available)
        if persona_vector is not None:
            vector_quality = self._assess_vector_quality(persona_vector)
            reliability_scores.append(vector_quality)
            details['vector_quality'] = vector_quality
        
        # Response consistency
        response_consistency = self._assess_response_consistency(content, figure_id)
        reliability_scores.append(response_consistency)
        details['response_consistency'] = response_consistency
        
        # System performance
        system_performance = self._assess_system_performance(context)
        reliability_scores.append(system_performance)
        details['system_performance'] = system_performance
        
        # Calculate overall technical reliability
        overall_score = np.mean(reliability_scores)
        confidence = 0.8  # Technical metrics tend to be reliable
        
        validation_time = time.time() - start_time
        
        return DimensionScore(
            dimension=ValidationDimension.TECHNICAL_RELIABILITY,
            score=overall_score,
            confidence=confidence,
            weight=weight,
            details=details,
            validation_time=validation_time
        )
    
    def _validate_safety(self, content: str, figure_id: str,
                        persona_vector: Optional[np.ndarray],
                        context: Dict[str, Any], weight: float) -> DimensionScore:
        """Validate safety dimension."""
        start_time = time.time()
        
        safety_score = 1.0  # Start with perfect safety score
        confidence = 1.0
        details = {}
        violations = []
        
        # Monitor persona vector safety (if available)
        if persona_vector is not None:
            vector_violations = self.safety_monitor.monitor_persona_vector(
                persona_vector, figure_id, context, real_time=False
            )
            violations.extend(vector_violations)
            
            if vector_violations:
                # Reduce safety score based on violations
                max_severity = max(v.severity for v in vector_violations)
                safety_score *= (1.0 - max_severity)
        
        # Content safety analysis
        content_safety = self._assess_content_safety(content)
        safety_score *= content_safety
        details['content_safety'] = content_safety
        
        # Response appropriateness
        response_appropriateness = self._assess_response_appropriateness(content, figure_id, context)
        safety_score *= response_appropriateness
        details['response_appropriateness'] = response_appropriateness
        
        validation_time = time.time() - start_time
        
        dimension_score = DimensionScore(
            dimension=ValidationDimension.SAFETY,
            score=safety_score,
            confidence=confidence,
            weight=weight,
            details=details,
            validation_time=validation_time
        )
        
        # Add warnings for safety violations
        for violation in violations:
            dimension_score.warnings.append(f"Safety violation: {violation.deviation_type.value}")
        
        # Store safety violations
        validation_id = self._get_current_validation_id()
        if validation_id in self.active_validations:
            self.active_validations[validation_id].safety_violations.extend(violations)
        
        return dimension_score
    
    def _validate_bias_detection(self, content: str, figure_id: str,
                               context: Dict[str, Any], weight: float) -> DimensionScore:
        """Validate bias detection dimension."""
        start_time = time.time()
        
        # Detect bias in content
        bias_results = self.bias_detector.detect_bias(content, context)
        
        # Calculate bias score (1.0 = no bias, 0.0 = maximum bias)
        if bias_results:
            # Bias detected - reduce score based on severity
            max_bias_severity = max(result.severity.value for result in bias_results
                                  if hasattr(result.severity, 'value'))
            
            # Map severity to numerical values
            severity_mapping = {
                'low': 0.1,
                'moderate': 0.3,
                'high': 0.6,
                'critical': 1.0
            }
            
            max_bias_value = severity_mapping.get(max_bias_severity, 0.5)
            bias_score = 1.0 - max_bias_value
        else:
            bias_score = 1.0  # No bias detected
        
        confidence = 0.9 if bias_results else 0.7  # Lower confidence when no bias detected
        
        validation_time = time.time() - start_time
        
        details = {
            'bias_count': len(bias_results),
            'bias_types': [result.bias_type.value for result in bias_results],
            'bias_categories': [result.bias_category.value for result in bias_results]
        }
        
        dimension_score = DimensionScore(
            dimension=ValidationDimension.BIAS_DETECTION,
            score=bias_score,
            confidence=confidence,
            weight=weight,
            details=details,
            validation_time=validation_time
        )
        
        # Add warnings for detected biases
        for bias_result in bias_results:
            dimension_score.warnings.append(f"Bias detected: {bias_result.bias_type.value}")
        
        # Store bias results
        validation_id = self._get_current_validation_id()
        if validation_id in self.active_validations:
            self.active_validations[validation_id].bias_detection_results.extend(bias_results)
        
        return dimension_score
    
    # Technical reliability assessment methods
    
    def _assess_content_quality(self, content: str) -> float:
        """Assess content quality metrics."""
        quality_score = 1.0
        
        # Length appropriateness
        if len(content) < 50:
            quality_score *= 0.7  # Too short
        elif len(content) > 5000:
            quality_score *= 0.9  # Very long but acceptable
        
        # Basic language quality
        sentences = content.split('.')
        if sentences:
            avg_sentence_length = np.mean([len(s.split()) for s in sentences if s.strip()])
            
            if avg_sentence_length < 5:
                quality_score *= 0.8  # Very short sentences
            elif avg_sentence_length > 50:
                quality_score *= 0.9  # Very long sentences
        
        return max(0.0, min(1.0, quality_score))
    
    def _assess_vector_quality(self, persona_vector: np.ndarray) -> float:
        """Assess persona vector quality."""
        quality_score = 1.0
        
        # Check vector properties
        vector_norm = np.linalg.norm(persona_vector)
        if vector_norm < 0.1:
            quality_score *= 0.3  # Very small magnitude
        elif vector_norm > 10.0:
            quality_score *= 0.7  # Very large magnitude
        
        # Check for NaN or infinite values
        if np.any(np.isnan(persona_vector)) or np.any(np.isinf(persona_vector)):
            quality_score = 0.0
        
        # Check distribution properties
        vector_std = np.std(persona_vector)
        if vector_std < 0.01:
            quality_score *= 0.5  # Too uniform
        
        return quality_score
    
    def _assess_response_consistency(self, content: str, figure_id: str) -> float:
        """Assess response consistency."""
        # Simplified consistency check - in production would be more sophisticated
        return 0.85  # Default good consistency score
    
    def _assess_system_performance(self, context: Dict[str, Any]) -> float:
        """Assess system performance metrics."""
        performance_score = 0.9  # Default good performance
        
        # Check for performance indicators in context
        if 'response_time' in context:
            response_time = context['response_time']
            if response_time > 10.0:  # More than 10 seconds
                performance_score *= 0.7
            elif response_time > 5.0:  # More than 5 seconds
                performance_score *= 0.9
        
        return performance_score
    
    # Safety assessment methods
    
    def _assess_content_safety(self, content: str) -> float:
        """Assess content safety."""
        # Simple content safety check
        unsafe_keywords = [
            'violence', 'inappropriate', 'harmful', 'dangerous',
            'illegal', 'unethical', 'offensive'
        ]
        
        content_lower = content.lower()
        unsafe_count = sum(1 for keyword in unsafe_keywords if keyword in content_lower)
        
        # Reduce safety score based on unsafe content
        safety_reduction = min(0.8, unsafe_count * 0.1)
        safety_score = 1.0 - safety_reduction
        
        return max(0.0, safety_score)
    
    def _assess_response_appropriateness(self, content: str, figure_id: str, 
                                       context: Dict[str, Any]) -> float:
        """Assess response appropriateness for historical context."""
        # Check for historical appropriateness
        historical_period = context.get('historical_period', '')
        
        # Simple appropriateness check
        appropriateness_score = 0.9
        
        # Check for modern references in historical context
        modern_terms = ['internet', 'computer', 'smartphone', 'television', 'airplane']
        if any(term in content.lower() for term in modern_terms):
            if 'ancient' in historical_period.lower() or 'medieval' in historical_period.lower():
                appropriateness_score *= 0.3  # Major anachronism
        
        return appropriateness_score
    
    # Result processing methods
    
    def _calculate_overall_scores(self, result: ComprehensiveValidationResult, 
                                 weights: ValidationWeights):
        """Calculate overall validation scores."""
        if not result.dimension_scores:
            result.overall_score = 0.0
            result.overall_confidence = 0.0
            result.quality_level = QualityLevel.UNACCEPTABLE
            return
        
        # Calculate weighted overall score
        total_weighted_score = 0.0
        total_weight = 0.0
        confidence_scores = []
        
        for dimension, dimension_score in result.dimension_scores.items():
            total_weighted_score += dimension_score.weighted_score
            total_weight += dimension_score.weight
            confidence_scores.append(dimension_score.confidence)
        
        # Normalize if needed
        if total_weight > 0:
            result.overall_score = total_weighted_score / total_weight
        else:
            result.overall_score = 0.0
        
        # Calculate overall confidence
        result.overall_confidence = np.mean(confidence_scores) if confidence_scores else 0.0
        
        # Determine quality level
        result.quality_level = self._determine_quality_level(result.overall_score)
    
    def _determine_quality_level(self, score: float) -> QualityLevel:
        """Determine quality level based on score."""
        if score >= 0.9:
            return QualityLevel.EXCELLENT
        elif score >= 0.8:
            return QualityLevel.GOOD
        elif score >= 0.7:
            return QualityLevel.ACCEPTABLE
        elif score >= 0.6:
            return QualityLevel.POOR
        else:
            return QualityLevel.UNACCEPTABLE
    
    def _analyze_validation_results(self, result: ComprehensiveValidationResult):
        """Analyze validation results and generate recommendations."""
        critical_issues = []
        warnings = []
        recommendations = []
        
        # Analyze dimension scores
        for dimension, dimension_score in result.dimension_scores.items():
            if dimension_score.score < 0.6:
                critical_issues.append(f"{dimension.value} score critically low: {dimension_score.score:.3f}")
            elif dimension_score.score < 0.8:
                warnings.append(f"{dimension.value} score below optimal: {dimension_score.score:.3f}")
            
            # Add dimension-specific warnings and errors
            warnings.extend(dimension_score.warnings)
            critical_issues.extend(dimension_score.errors)
        
        # Generate recommendations based on low-scoring dimensions
        low_scoring_dimensions = [
            (dim, score) for dim, score in result.dimension_scores.items() 
            if score.score < 0.8
        ]
        
        for dimension, dimension_score in low_scoring_dimensions:
            if dimension == ValidationDimension.HISTORICAL_ACCURACY:
                recommendations.append("Review historical facts and cross-reference with authoritative sources")
            elif dimension == ValidationDimension.PERSONALITY_CONSISTENCY:
                recommendations.append("Improve personality consistency across different contexts")
            elif dimension == ValidationDimension.TECHNICAL_RELIABILITY:
                recommendations.append("Address technical reliability issues and system performance")
            elif dimension == ValidationDimension.SAFETY:
                recommendations.append("Review content for safety violations and inappropriate material")
            elif dimension == ValidationDimension.BIAS_DETECTION:
                recommendations.append("Address detected biases and improve fairness")
        
        # Overall recommendations
        if result.overall_score < self.quality_threshold:
            recommendations.append("Overall quality below threshold - comprehensive review required")
        
        if result.overall_confidence < 0.7:
            recommendations.append("Low validation confidence - additional validation recommended")
        
        # Store results
        result.critical_issues = critical_issues
        result.warnings = warnings
        result.recommendations = recommendations
    
    # Utility methods
    
    def _get_current_validation_id(self) -> Optional[str]:
        """Get current validation ID from thread context."""
        # Simplified implementation - in production would use proper thread-local storage
        if self.active_validations:
            return list(self.active_validations.keys())[-1]
        return None
    
    def _create_pending_result(self, validation_id: str, figure_id: str, 
                             content: str, context: Dict[str, Any]) -> ComprehensiveValidationResult:
        """Create pending validation result."""
        return ComprehensiveValidationResult(
            validation_id=validation_id,
            timestamp=datetime.now(),
            overall_score=0.0,
            overall_confidence=0.0,
            quality_level=QualityLevel.UNACCEPTABLE,
            dimension_scores={},
            figure_id=figure_id,
            content=content,
            context=context,
            validation_time=0.0,
            status=ValidationStatus.PENDING,
            critical_issues=[],
            warnings=['Validation in progress'],
            recommendations=[]
        )
    
    def _create_failure_result(self, validation_id: str, figure_id: str,
                             content: str, context: Dict[str, Any], 
                             error: str) -> ComprehensiveValidationResult:
        """Create failure validation result."""
        return ComprehensiveValidationResult(
            validation_id=validation_id,
            timestamp=datetime.now(),
            overall_score=0.0,
            overall_confidence=0.0,
            quality_level=QualityLevel.UNACCEPTABLE,
            dimension_scores={},
            figure_id=figure_id,
            content=content,
            context=context,
            validation_time=0.0,
            status=ValidationStatus.FAILED,
            critical_issues=[f"Validation failed: {error}"],
            warnings=[],
            recommendations=["Retry validation after addressing technical issues"]
        )
    
    def _create_error_dimension_score(self, dimension: ValidationDimension, 
                                    error: str) -> DimensionScore:
        """Create error dimension score."""
        return DimensionScore(
            dimension=dimension,
            score=0.0,
            confidence=0.0,
            weight=0.0,
            details={'error': error},
            validation_time=0.0,
            errors=[error]
        )
    
    def _log_audit_event(self, validation_id: str, event_type: str, figure_id: str,
                        dimension: Optional[ValidationDimension] = None,
                        details: Optional[Dict[str, Any]] = None,
                        user_id: Optional[str] = None):
        """Log audit event."""
        entry = AuditLogEntry(
            timestamp=datetime.now(),
            validation_id=validation_id,
            event_type=event_type,
            figure_id=figure_id,
            dimension=dimension,
            details=details or {},
            user_id=user_id
        )
        
        self.audit_log.append(entry)
    
    def _update_performance_stats(self, result: ComprehensiveValidationResult):
        """Update performance statistics."""
        self.performance_stats['total_validations'] += 1
        
        if result.status == ValidationStatus.COMPLETED:
            self.performance_stats['successful_validations'] += 1
        else:
            self.performance_stats['failed_validations'] += 1
        
        # Update average validation time
        total_validations = self.performance_stats['total_validations']
        current_avg = self.performance_stats['average_validation_time']
        new_avg = ((current_avg * (total_validations - 1)) + result.validation_time) / total_validations
        self.performance_stats['average_validation_time'] = new_avg
        
        # Update dimension performance
        for dimension, dimension_score in result.dimension_scores.items():
            dim_stats = self.performance_stats['dimension_performance'][dimension.value]
            dim_stats['count'] += 1
            
            # Update average time
            count = dim_stats['count']
            current_avg_time = dim_stats['avg_time']
            new_avg_time = ((current_avg_time * (count - 1)) + dimension_score.validation_time) / count
            dim_stats['avg_time'] = new_avg_time
            
            # Update average score
            current_avg_score = dim_stats['avg_score']
            new_avg_score = ((current_avg_score * (count - 1)) + dimension_score.score) / count
            dim_stats['avg_score'] = new_avg_score
    
    # Public interface methods
    
    def get_validation_result(self, validation_id: str) -> Optional[ComprehensiveValidationResult]:
        """Get validation result by ID."""
        # Check active validations first
        if validation_id in self.active_validations:
            return self.active_validations[validation_id]
        
        # Search in history
        for result in self.validation_history:
            if result.validation_id == validation_id:
                return result
        
        return None
    
    def get_validation_history(self, figure_id: Optional[str] = None,
                             limit: Optional[int] = None) -> List[ComprehensiveValidationResult]:
        """Get validation history."""
        history = list(self.validation_history)
        
        if figure_id:
            history = [r for r in history if r.figure_id == figure_id]
        
        # Sort by timestamp (most recent first)
        history.sort(key=lambda x: x.timestamp, reverse=True)
        
        if limit:
            history = history[:limit]
        
        return history
    
    def get_performance_statistics(self) -> Dict[str, Any]:
        """Get validation performance statistics."""
        stats = self.performance_stats.copy()
        
        # Add derived metrics
        if stats['total_validations'] > 0:
            stats['success_rate'] = stats['successful_validations'] / stats['total_validations']
            stats['failure_rate'] = stats['failed_validations'] / stats['total_validations']
        
        # Add current system status
        stats['active_validations'] = len(self.active_validations)
        stats['monitoring_enabled'] = self.monitoring_enabled
        stats['queue_size'] = len(self.monitoring_queue)
        
        return stats
    
    def get_audit_log(self, validation_id: Optional[str] = None,
                     event_type: Optional[str] = None,
                     limit: Optional[int] = None) -> List[AuditLogEntry]:
        """Get audit log entries."""
        entries = list(self.audit_log)
        
        if validation_id:
            entries = [e for e in entries if e.validation_id == validation_id]
        
        if event_type:
            entries = [e for e in entries if e.event_type == event_type]
        
        # Sort by timestamp (most recent first)
        entries.sort(key=lambda x: x.timestamp, reverse=True)
        
        if limit:
            entries = entries[:limit]
        
        return entries
    
    def export_validation_report(self, validation_ids: Optional[List[str]] = None,
                               figure_id: Optional[str] = None) -> Dict[str, Any]:
        """Export comprehensive validation report."""
        # Get validation results
        if validation_ids:
            results = [self.get_validation_result(vid) for vid in validation_ids]
            results = [r for r in results if r is not None]
        else:
            results = self.get_validation_history(figure_id)
        
        if not results:
            return {'error': 'No validation results found'}
        
        # Generate report
        report = {
            'report_timestamp': datetime.now(),
            'validation_count': len(results),
            'figure_id': figure_id,
            'validation_results': results,
            'summary_statistics': self._calculate_report_statistics(results),
            'performance_statistics': self.get_performance_statistics(),
            'recommendations': self._generate_report_recommendations(results),
            'system_configuration': {
                'quality_threshold': self.quality_threshold,
                'validation_timeout': self.validation_timeout,
                'default_weights': self.default_weights
            }
        }
        
        return report
    
    def _calculate_report_statistics(self, results: List[ComprehensiveValidationResult]) -> Dict[str, Any]:
        """Calculate statistics for validation report."""
        if not results:
            return {}
        
        overall_scores = [r.overall_score for r in results]
        
        stats = {
            'average_overall_score': np.mean(overall_scores),
            'median_overall_score': np.median(overall_scores),
            'min_overall_score': np.min(overall_scores),
            'max_overall_score': np.max(overall_scores),
            'std_overall_score': np.std(overall_scores),
            'quality_distribution': {},
            'dimension_averages': {}
        }
        
        # Quality level distribution
        quality_counts = Counter(r.quality_level for r in results)
        stats['quality_distribution'] = {q.value: count for q, count in quality_counts.items()}
        
        # Dimension averages
        for dimension in ValidationDimension:
            dimension_scores = []
            for result in results:
                if dimension in result.dimension_scores:
                    dimension_scores.append(result.dimension_scores[dimension].score)
            
            if dimension_scores:
                stats['dimension_averages'][dimension.value] = np.mean(dimension_scores)
        
        return stats
    
    def _generate_report_recommendations(self, results: List[ComprehensiveValidationResult]) -> List[str]:
        """Generate recommendations based on validation results."""
        recommendations = set()
        
        # Analyze common issues
        dimension_issues = defaultdict(int)
        
        for result in results:
            for dimension, dimension_score in result.dimension_scores.items():
                if dimension_score.score < 0.8:
                    dimension_issues[dimension] += 1
        
        # Generate recommendations for common issues
        total_results = len(results)
        
        for dimension, issue_count in dimension_issues.items():
            if issue_count / total_results > 0.3:  # More than 30% of validations
                if dimension == ValidationDimension.HISTORICAL_ACCURACY:
                    recommendations.add("Implement systematic historical fact-checking process")
                elif dimension == ValidationDimension.PERSONALITY_CONSISTENCY:
                    recommendations.add("Enhance personality consistency training and validation")
                elif dimension == ValidationDimension.TECHNICAL_RELIABILITY:
                    recommendations.add("Address systemic technical reliability issues")
                elif dimension == ValidationDimension.SAFETY:
                    recommendations.add("Strengthen safety monitoring and content filtering")
                elif dimension == ValidationDimension.BIAS_DETECTION:
                    recommendations.add("Implement comprehensive bias mitigation strategies")
        
        return list(recommendations)
    
    def shutdown(self):
        """Shutdown comprehensive validator."""
        self.monitoring_enabled = False
        
        if self.monitoring_thread and self.monitoring_thread.is_alive():
            self.monitoring_thread.join(timeout=5.0)
        
        self.executor.shutdown(wait=True)
        
        # Shutdown component validators
        if hasattr(self.safety_monitor, 'shutdown'):
            self.safety_monitor.shutdown()
        if hasattr(self.bias_detector, 'shutdown'):
            self.bias_detector.shutdown()
        
        logger.info("ComprehensiveValidator shutdown completed")