"""
Personality Consistency Tester

This module provides cross-situational consistency testing across historical contexts,
temporal continuity verification using vector similarity, and behavioral pattern
stability validation over time with >85% consistency requirement.

Key Features:
- Cross-situational consistency testing across diverse historical contexts
- Temporal continuity verification: Continuity(t1, t2) = cosine(v_persona(t1), v_persona(t2))
- >85% consistency requirement validation with detailed reporting
- Behavioral pattern stability analysis over time
- Multi-dimensional personality trait tracking
- Deviation detection and correction mechanisms
"""

import numpy as np
from typing import Dict, List, Tuple, Optional, Any, Set
from dataclasses import dataclass, field
from enum import Enum
import logging
from datetime import datetime, timedelta
from collections import defaultdict, deque
import json
from scipy.spatial.distance import cosine
from scipy.stats import pearsonr, spearmanr
import warnings

logger = logging.getLogger(__name__)


class PersonalityDimension(Enum):
    """Personality dimensions for consistency testing."""
    OPENNESS = "openness"
    CONSCIENTIOUSNESS = "conscientiousness"
    EXTRAVERSION = "extraversion"
    AGREEABLENESS = "agreeableness"
    NEUROTICISM = "neuroticism"
    DOMINANCE = "dominance"
    WARMTH = "warmth"
    COMPETENCE = "competence"


class SituationalContext(Enum):
    """Types of situational contexts for testing."""
    MILITARY_LEADERSHIP = "military_leadership"
    POLITICAL_NEGOTIATION = "political_negotiation"
    PERSONAL_RELATIONSHIPS = "personal_relationships"
    PUBLIC_SPEAKING = "public_speaking"
    CRISIS_MANAGEMENT = "crisis_management"
    DIPLOMATIC_RELATIONS = "diplomatic_relations"
    CULTURAL_INTERACTIONS = "cultural_interactions"
    INTELLECTUAL_DISCOURSE = "intellectual_discourse"


@dataclass
class PersonalitySnapshot:
    """Snapshot of personality at a specific time/context."""
    timestamp: datetime
    context: SituationalContext
    personality_vector: np.ndarray
    trait_scores: Dict[PersonalityDimension, float]
    behavioral_indicators: Dict[str, float]
    confidence_score: float
    situation_description: str
    metadata: Dict[str, Any] = field(default_factory=dict)
    
    def __post_init__(self):
        """Validate personality snapshot data."""
        if self.personality_vector is not None:
            if len(self.personality_vector.shape) != 1:
                raise ValueError("Personality vector must be 1-dimensional")
        
        for trait, score in self.trait_scores.items():
            if not 0.0 <= score <= 1.0:
                logger.warning("Trait score %s = %.3f outside valid range [0,1]", trait, score)


@dataclass 
class ConsistencyResult:
    """Result of personality consistency analysis."""
    overall_consistency: float  # 0.0 to 1.0
    temporal_continuity: float  # 0.0 to 1.0
    cross_situational_consistency: float  # 0.0 to 1.0
    trait_consistency_scores: Dict[PersonalityDimension, float]
    inconsistent_periods: List[Tuple[datetime, datetime, float]]
    behavioral_deviations: List[Dict[str, Any]]
    confidence_level: float
    meets_threshold: bool
    recommendations: List[str]
    detailed_analysis: Dict[str, Any]
    timestamp: datetime = field(default_factory=datetime.now)


@dataclass
class DeviationAlert:
    """Alert for personality consistency deviation."""
    timestamp: datetime
    deviation_type: str
    severity: float  # 0.0 to 1.0
    affected_dimensions: List[PersonalityDimension]
    description: str
    context_info: Dict[str, Any]
    recommended_actions: List[str]


class PersonalityConsistencyTester:
    """
    Personality Consistency Tester for historical figure simulation.
    
    This system ensures >85% consistency across situations and time through:
    - Cross-situational consistency testing
    - Temporal continuity verification using cosine similarity
    - Behavioral pattern stability analysis
    - Multi-dimensional trait tracking
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """
        Initialize the personality consistency tester.
        
        Args:
            config: Configuration dictionary with testing parameters
        """
        self.config = config or {}
        self.consistency_threshold = self.config.get('consistency_threshold', 0.85)
        self.temporal_window_days = self.config.get('temporal_window_days', 30)
        self.max_snapshots = self.config.get('max_snapshots', 1000)
        self.vector_dimension = self.config.get('vector_dimension', 10000)
        
        # Storage for personality snapshots
        self.personality_snapshots: Dict[str, List[PersonalitySnapshot]] = defaultdict(list)
        
        # Consistency tracking
        self.consistency_history: Dict[str, List[ConsistencyResult]] = defaultdict(list)
        self.deviation_alerts: Dict[str, List[DeviationAlert]] = defaultdict(list)
        
        # Analysis windows for different time scales
        self.analysis_windows = {
            'short_term': timedelta(days=7),
            'medium_term': timedelta(days=30),
            'long_term': timedelta(days=365)
        }
        
        # Trait stability baselines
        self.trait_baselines: Dict[str, Dict[PersonalityDimension, float]] = {}
        
        # Performance statistics
        self.test_stats = {
            'total_tests': 0,
            'consistent_results': 0,
            'inconsistent_results': 0,
            'average_consistency': 0.0,
            'test_times': []
        }
        
        logger.info("PersonalityConsistencyTester initialized with threshold %.2f",
                   self.consistency_threshold)
    
    def add_personality_snapshot(self, figure_id: str, snapshot: PersonalitySnapshot):
        """
        Add a personality snapshot for consistency tracking.
        
        Args:
            figure_id: Unique identifier for historical figure
            snapshot: Personality snapshot to add
        """
        snapshots = self.personality_snapshots[figure_id]
        
        # Maintain maximum snapshots limit
        if len(snapshots) >= self.max_snapshots:
            snapshots.pop(0)  # Remove oldest snapshot
        
        snapshots.append(snapshot)
        
        # Update trait baselines if this is early data
        if len(snapshots) <= 10:
            self._update_trait_baselines(figure_id, snapshot)
        
        logger.debug("Added personality snapshot for %s in context %s",
                    figure_id, snapshot.context.value)
    
    def test_consistency(self, figure_id: str, 
                        time_window: Optional[timedelta] = None) -> ConsistencyResult:
        """
        Test personality consistency for a historical figure.
        
        Args:
            figure_id: Unique identifier for historical figure
            time_window: Time window for analysis (default: all available data)
            
        Returns:
            ConsistencyResult with detailed consistency analysis
        """
        start_time = datetime.now()
        
        if figure_id not in self.personality_snapshots:
            raise ValueError(f"No personality snapshots found for figure {figure_id}")
        
        snapshots = self.personality_snapshots[figure_id]
        
        # Filter snapshots by time window if specified
        if time_window:
            cutoff_time = datetime.now() - time_window
            snapshots = [s for s in snapshots if s.timestamp >= cutoff_time]
        
        if len(snapshots) < 2:
            logger.warning("Insufficient snapshots for consistency testing: %d", len(snapshots))
            return self._create_insufficient_data_result()
        
        logger.info("Testing consistency for %s with %d snapshots", figure_id, len(snapshots))
        
        # Perform multi-dimensional consistency analysis
        temporal_continuity = self._test_temporal_continuity(snapshots)
        cross_situational = self._test_cross_situational_consistency(snapshots)
        trait_consistency = self._test_trait_consistency(snapshots, figure_id)
        behavioral_stability = self._test_behavioral_stability(snapshots)
        
        # Identify inconsistent periods and deviations
        inconsistent_periods = self._identify_inconsistent_periods(snapshots)
        behavioral_deviations = self._identify_behavioral_deviations(snapshots, figure_id)
        
        # Calculate overall consistency (weighted average)
        weights = {
            'temporal': 0.3,
            'cross_situational': 0.3,
            'trait': 0.25,
            'behavioral': 0.15
        }
        
        overall_consistency = (
            temporal_continuity * weights['temporal'] +
            cross_situational * weights['cross_situational'] +
            np.mean(list(trait_consistency.values())) * weights['trait'] +
            behavioral_stability * weights['behavioral']
        )
        
        # Calculate confidence based on data quantity and quality
        confidence = self._calculate_confidence(snapshots)
        
        # Check if meets threshold
        meets_threshold = overall_consistency >= self.consistency_threshold
        
        # Generate recommendations
        recommendations = self._generate_recommendations(
            overall_consistency, temporal_continuity, cross_situational,
            trait_consistency, behavioral_deviations
        )
        
        # Create detailed analysis
        detailed_analysis = {
            'snapshot_count': len(snapshots),
            'time_span_days': (snapshots[-1].timestamp - snapshots[0].timestamp).days,
            'contexts_tested': list(set(s.context for s in snapshots)),
            'temporal_analysis': self._analyze_temporal_patterns(snapshots),
            'situational_analysis': self._analyze_situational_patterns(snapshots),
            'trait_stability_analysis': self._analyze_trait_stability(snapshots, figure_id)
        }
        
        result = ConsistencyResult(
            overall_consistency=overall_consistency,
            temporal_continuity=temporal_continuity,
            cross_situational_consistency=cross_situational,
            trait_consistency_scores=trait_consistency,
            inconsistent_periods=inconsistent_periods,
            behavioral_deviations=behavioral_deviations,
            confidence_level=confidence,
            meets_threshold=meets_threshold,
            recommendations=recommendations,
            detailed_analysis=detailed_analysis
        )
        
        # Store result and update statistics
        self.consistency_history[figure_id].append(result)
        test_time = (datetime.now() - start_time).total_seconds()
        self._update_test_statistics(result, test_time)
        
        # Generate deviation alerts if necessary
        if not meets_threshold:
            alert = self._generate_deviation_alert(figure_id, result)
            self.deviation_alerts[figure_id].append(alert)
        
        logger.info("Consistency test completed: overall=%.3f, temporal=%.3f, cross-situational=%.3f",
                   overall_consistency, temporal_continuity, cross_situational)
        
        return result
    
    def _test_temporal_continuity(self, snapshots: List[PersonalitySnapshot]) -> float:
        """
        Test temporal continuity using vector similarity.
        Formula: Continuity(t1, t2) = cosine(v_persona(t1), v_persona(t2))
        """
        if len(snapshots) < 2:
            return 1.0
        
        # Sort snapshots by timestamp
        sorted_snapshots = sorted(snapshots, key=lambda x: x.timestamp)
        
        continuity_scores = []
        
        # Calculate pairwise continuity for consecutive snapshots
        for i in range(len(sorted_snapshots) - 1):
            current = sorted_snapshots[i]
            next_snapshot = sorted_snapshots[i + 1]
            
            if current.personality_vector is not None and next_snapshot.personality_vector is not None:
                # Calculate cosine similarity (1 - cosine distance)
                similarity = 1 - cosine(current.personality_vector, next_snapshot.personality_vector)
                continuity_scores.append(similarity)
                
                logger.debug("Temporal continuity between %s and %s: %.3f",
                            current.timestamp, next_snapshot.timestamp, similarity)
        
        if not continuity_scores:
            logger.warning("No personality vectors available for temporal continuity calculation")
            return 0.5
        
        # Calculate weighted average (recent pairs weighted more heavily)
        weights = np.exp(np.linspace(0, 1, len(continuity_scores)))  # Exponential weighting
        weighted_continuity = np.average(continuity_scores, weights=weights)
        
        return max(0.0, min(1.0, weighted_continuity))
    
    def _test_cross_situational_consistency(self, snapshots: List[PersonalitySnapshot]) -> float:
        """Test consistency across different situational contexts."""
        # Group snapshots by context
        context_groups = defaultdict(list)
        for snapshot in snapshots:
            context_groups[snapshot.context].append(snapshot)
        
        if len(context_groups) < 2:
            logger.warning("Insufficient situational contexts for cross-situational testing")
            return 0.8  # Default high score if only one context
        
        # Calculate intra-context consistency for each context
        intra_context_consistencies = {}
        for context, context_snapshots in context_groups.items():
            if len(context_snapshots) >= 2:
                consistency = self._calculate_intra_context_consistency(context_snapshots)
                intra_context_consistencies[context] = consistency
        
        # Calculate inter-context consistency (between different contexts)
        inter_context_consistency = self._calculate_inter_context_consistency(context_groups)
        
        # Combine intra and inter context scores
        if intra_context_consistencies:
            avg_intra_consistency = np.mean(list(intra_context_consistencies.values()))
        else:
            avg_intra_consistency = 0.5
        
        # Weighted combination
        cross_situational_score = 0.6 * avg_intra_consistency + 0.4 * inter_context_consistency
        
        return max(0.0, min(1.0, cross_situational_score))
    
    def _calculate_intra_context_consistency(self, snapshots: List[PersonalitySnapshot]) -> float:
        """Calculate consistency within the same situational context."""
        if len(snapshots) < 2:
            return 1.0
        
        # Calculate pairwise similarities within context
        similarities = []
        
        for i in range(len(snapshots)):
            for j in range(i + 1, len(snapshots)):
                s1, s2 = snapshots[i], snapshots[j]
                
                if s1.personality_vector is not None and s2.personality_vector is not None:
                    similarity = 1 - cosine(s1.personality_vector, s2.personality_vector)
                    similarities.append(similarity)
        
        if not similarities:
            return 0.5
        
        return np.mean(similarities)
    
    def _calculate_inter_context_consistency(self, context_groups: Dict) -> float:
        """Calculate consistency between different situational contexts."""
        contexts = list(context_groups.keys())
        
        if len(contexts) < 2:
            return 1.0
        
        inter_similarities = []
        
        # Compare representative snapshots from different contexts
        for i in range(len(contexts)):
            for j in range(i + 1, len(contexts)):
                context1_snapshots = context_groups[contexts[i]]
                context2_snapshots = context_groups[contexts[j]]
                
                # Use most recent snapshot from each context as representative
                if context1_snapshots and context2_snapshots:
                    rep1 = max(context1_snapshots, key=lambda x: x.timestamp)
                    rep2 = max(context2_snapshots, key=lambda x: x.timestamp)
                    
                    if (rep1.personality_vector is not None and 
                        rep2.personality_vector is not None):
                        similarity = 1 - cosine(rep1.personality_vector, rep2.personality_vector)
                        inter_similarities.append(similarity)
        
        if not inter_similarities:
            return 0.5
        
        return np.mean(inter_similarities)
    
    def _test_trait_consistency(self, snapshots: List[PersonalitySnapshot], 
                              figure_id: str) -> Dict[PersonalityDimension, float]:
        """Test consistency for individual personality traits."""
        trait_consistency = {}
        
        for trait in PersonalityDimension:
            trait_scores = []
            
            for snapshot in snapshots:
                if trait in snapshot.trait_scores:
                    trait_scores.append(snapshot.trait_scores[trait])
            
            if len(trait_scores) < 2:
                trait_consistency[trait] = 1.0  # Perfect consistency if insufficient data
                continue
            
            # Calculate consistency as inverse of standard deviation
            trait_std = np.std(trait_scores)
            trait_mean = np.mean(trait_scores)
            
            # Normalize by expected variation (coefficient of variation)
            if trait_mean > 0:
                cv = trait_std / trait_mean
                consistency_score = max(0.0, 1.0 - cv)
            else:
                consistency_score = 1.0 - trait_std  # For zero mean
            
            # Apply baseline adjustment if available
            if figure_id in self.trait_baselines and trait in self.trait_baselines[figure_id]:
                baseline = self.trait_baselines[figure_id][trait]
                baseline_deviation = abs(trait_mean - baseline)
                consistency_score *= (1.0 - min(0.5, baseline_deviation))
            
            trait_consistency[trait] = max(0.0, min(1.0, consistency_score))
            
            logger.debug("Trait %s consistency: %.3f (std=%.3f, mean=%.3f)", 
                        trait.value, consistency_score, trait_std, trait_mean)
        
        return trait_consistency
    
    def _test_behavioral_stability(self, snapshots: List[PersonalitySnapshot]) -> float:
        """Test stability of behavioral indicators over time."""
        if len(snapshots) < 2:
            return 1.0
        
        # Extract all behavioral indicators
        all_indicators = set()
        for snapshot in snapshots:
            all_indicators.update(snapshot.behavioral_indicators.keys())
        
        if not all_indicators:
            return 0.5  # Neutral score if no behavioral indicators
        
        indicator_stabilities = []
        
        for indicator in all_indicators:
            values = []
            for snapshot in snapshots:
                if indicator in snapshot.behavioral_indicators:
                    values.append(snapshot.behavioral_indicators[indicator])
            
            if len(values) >= 2:
                # Calculate stability as inverse of coefficient of variation
                values_std = np.std(values)
                values_mean = np.mean(values)
                
                if values_mean != 0:
                    stability = max(0.0, 1.0 - (values_std / abs(values_mean)))
                else:
                    stability = 1.0 - values_std
                
                indicator_stabilities.append(stability)
        
        if not indicator_stabilities:
            return 0.5
        
        return np.mean(indicator_stabilities)
    
    def _identify_inconsistent_periods(self, snapshots: List[PersonalitySnapshot]) -> List[Tuple[datetime, datetime, float]]:
        """Identify periods of personality inconsistency."""
        if len(snapshots) < 3:
            return []
        
        sorted_snapshots = sorted(snapshots, key=lambda x: x.timestamp)
        inconsistent_periods = []
        
        # Use sliding window to detect inconsistent periods
        window_size = min(5, len(sorted_snapshots) // 2)
        
        for i in range(len(sorted_snapshots) - window_size + 1):
            window_snapshots = sorted_snapshots[i:i + window_size]
            
            # Calculate consistency within window
            window_consistency = self._calculate_window_consistency(window_snapshots)
            
            if window_consistency < self.consistency_threshold * 0.8:  # 80% of threshold
                start_time = window_snapshots[0].timestamp
                end_time = window_snapshots[-1].timestamp
                inconsistent_periods.append((start_time, end_time, window_consistency))
        
        return inconsistent_periods
    
    def _calculate_window_consistency(self, snapshots: List[PersonalitySnapshot]) -> float:
        """Calculate consistency within a window of snapshots."""
        if len(snapshots) < 2:
            return 1.0
        
        similarities = []
        
        for i in range(len(snapshots)):
            for j in range(i + 1, len(snapshots)):
                s1, s2 = snapshots[i], snapshots[j]
                
                if s1.personality_vector is not None and s2.personality_vector is not None:
                    similarity = 1 - cosine(s1.personality_vector, s2.personality_vector)
                    similarities.append(similarity)
        
        if not similarities:
            return 0.5
        
        return np.mean(similarities)
    
    def _identify_behavioral_deviations(self, snapshots: List[PersonalitySnapshot],
                                      figure_id: str) -> List[Dict[str, Any]]:
        """Identify significant behavioral deviations from baseline."""
        deviations = []
        
        if figure_id not in self.trait_baselines:
            return deviations
        
        baselines = self.trait_baselines[figure_id]
        
        for snapshot in snapshots:
            snapshot_deviations = {}
            
            for trait, baseline in baselines.items():
                if trait in snapshot.trait_scores:
                    current_score = snapshot.trait_scores[trait]
                    deviation = abs(current_score - baseline)
                    
                    # Significant deviation threshold (20% of scale)
                    if deviation > 0.2:
                        snapshot_deviations[trait.value] = {
                            'baseline': baseline,
                            'current': current_score,
                            'deviation': deviation,
                            'severity': min(1.0, deviation / 0.5)  # Scale to [0,1]
                        }
            
            if snapshot_deviations:
                deviations.append({
                    'timestamp': snapshot.timestamp,
                    'context': snapshot.context.value,
                    'deviations': snapshot_deviations,
                    'overall_severity': np.mean([d['severity'] for d in snapshot_deviations.values()])
                })
        
        return deviations
    
    def _update_trait_baselines(self, figure_id: str, snapshot: PersonalitySnapshot):
        """Update trait baselines using early snapshots."""
        if figure_id not in self.trait_baselines:
            self.trait_baselines[figure_id] = {}
        
        baselines = self.trait_baselines[figure_id]
        
        for trait, score in snapshot.trait_scores.items():
            if trait in baselines:
                # Update with exponential moving average
                alpha = 0.2  # Learning rate
                baselines[trait] = alpha * score + (1 - alpha) * baselines[trait]
            else:
                baselines[trait] = score
    
    def _calculate_confidence(self, snapshots: List[PersonalitySnapshot]) -> float:
        """Calculate confidence level based on data quantity and quality."""
        if not snapshots:
            return 0.0
        
        # Factors affecting confidence
        data_quantity_score = min(1.0, len(snapshots) / 20)  # Ideal: 20+ snapshots
        
        # Context diversity
        unique_contexts = len(set(s.context for s in snapshots))
        context_diversity_score = min(1.0, unique_contexts / len(SituationalContext))
        
        # Time span coverage
        if len(snapshots) > 1:
            time_span_days = (snapshots[-1].timestamp - snapshots[0].timestamp).days
            time_coverage_score = min(1.0, time_span_days / 365)  # Ideal: 1 year
        else:
            time_coverage_score = 0.1
        
        # Average snapshot confidence
        avg_snapshot_confidence = np.mean([s.confidence_score for s in snapshots])
        
        # Vector availability
        vector_availability = sum(1 for s in snapshots if s.personality_vector is not None) / len(snapshots)
        
        # Weighted combination
        confidence = (
            0.3 * data_quantity_score +
            0.2 * context_diversity_score +
            0.2 * time_coverage_score +
            0.2 * avg_snapshot_confidence +
            0.1 * vector_availability
        )
        
        return max(0.0, min(1.0, confidence))
    
    def _generate_recommendations(self, overall_consistency: float,
                                temporal_continuity: float,
                                cross_situational: float,
                                trait_consistency: Dict[PersonalityDimension, float],
                                behavioral_deviations: List[Dict[str, Any]]) -> List[str]:
        """Generate recommendations for improving consistency."""
        recommendations = []
        
        if overall_consistency < self.consistency_threshold:
            recommendations.append(f"Overall consistency ({overall_consistency:.3f}) below threshold ({self.consistency_threshold:.3f})")
        
        if temporal_continuity < 0.8:
            recommendations.append("Improve temporal continuity - personality changes too rapidly over time")
        
        if cross_situational < 0.8:
            recommendations.append("Enhance cross-situational consistency - personality varies too much across contexts")
        
        # Identify problematic traits
        inconsistent_traits = [trait.value for trait, score in trait_consistency.items() if score < 0.8]
        if inconsistent_traits:
            recommendations.append(f"Address inconsistent traits: {', '.join(inconsistent_traits)}")
        
        if behavioral_deviations:
            high_severity_deviations = [d for d in behavioral_deviations if d['overall_severity'] > 0.7]
            if high_severity_deviations:
                recommendations.append(f"Address {len(high_severity_deviations)} high-severity behavioral deviations")
        
        # Specific improvement suggestions
        if temporal_continuity < cross_situational:
            recommendations.append("Focus on maintaining personality stability over time")
        else:
            recommendations.append("Focus on maintaining consistent personality across different situations")
        
        return recommendations
    
    def _analyze_temporal_patterns(self, snapshots: List[PersonalitySnapshot]) -> Dict[str, Any]:
        """Analyze temporal patterns in personality data."""
        sorted_snapshots = sorted(snapshots, key=lambda x: x.timestamp)
        
        analysis = {
            'time_span_days': 0,
            'trend_analysis': {},
            'stability_periods': [],
            'change_points': []
        }
        
        if len(sorted_snapshots) < 2:
            return analysis
        
        time_span = (sorted_snapshots[-1].timestamp - sorted_snapshots[0].timestamp).days
        analysis['time_span_days'] = time_span
        
        # Analyze trends for each personality dimension
        for trait in PersonalityDimension:
            trait_values = []
            trait_times = []
            
            for snapshot in sorted_snapshots:
                if trait in snapshot.trait_scores:
                    trait_values.append(snapshot.trait_scores[trait])
                    trait_times.append(snapshot.timestamp.timestamp())
            
            if len(trait_values) >= 3:
                # Calculate trend correlation
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    correlation, p_value = pearsonr(trait_times, trait_values)
                
                analysis['trend_analysis'][trait.value] = {
                    'correlation': correlation,
                    'p_value': p_value,
                    'trend_direction': 'increasing' if correlation > 0.1 else 'decreasing' if correlation < -0.1 else 'stable'
                }
        
        return analysis
    
    def _analyze_situational_patterns(self, snapshots: List[PersonalitySnapshot]) -> Dict[str, Any]:
        """Analyze situational patterns in personality expression."""
        context_analysis = defaultdict(list)
        
        for snapshot in snapshots:
            context_analysis[snapshot.context.value].append({
                'timestamp': snapshot.timestamp,
                'trait_scores': snapshot.trait_scores,
                'confidence': snapshot.confidence_score
            })
        
        # Calculate statistics for each context
        context_stats = {}
        for context, context_data in context_analysis.items():
            if len(context_data) >= 2:
                # Calculate mean trait scores for this context
                trait_means = defaultdict(list)
                for data in context_data:
                    for trait, score in data['trait_scores'].items():
                        trait_means[trait.value].append(score)
                
                context_stats[context] = {
                    'count': len(context_data),
                    'avg_confidence': np.mean([d['confidence'] for d in context_data]),
                    'trait_means': {trait: np.mean(scores) for trait, scores in trait_means.items()},
                    'trait_stds': {trait: np.std(scores) for trait, scores in trait_means.items()}
                }
        
        return {
            'contexts_tested': list(context_analysis.keys()),
            'context_statistics': context_stats,
            'most_consistent_context': max(context_stats.keys(), 
                                         key=lambda k: context_stats[k]['avg_confidence']) if context_stats else None
        }
    
    def _analyze_trait_stability(self, snapshots: List[PersonalitySnapshot], 
                               figure_id: str) -> Dict[str, Any]:
        """Analyze trait stability patterns."""
        trait_analysis = {}
        
        for trait in PersonalityDimension:
            trait_values = []
            trait_times = []
            
            for snapshot in snapshots:
                if trait in snapshot.trait_scores:
                    trait_values.append(snapshot.trait_scores[trait])
                    trait_times.append(snapshot.timestamp)
            
            if len(trait_values) >= 2:
                trait_std = np.std(trait_values)
                trait_mean = np.mean(trait_values)
                trait_range = max(trait_values) - min(trait_values)
                
                # Calculate stability score
                stability_score = max(0.0, 1.0 - (trait_std / 0.5))  # Assuming max std of 0.5 for instability
                
                trait_analysis[trait.value] = {
                    'mean': trait_mean,
                    'std': trait_std,
                    'range': trait_range,
                    'stability_score': stability_score,
                    'sample_count': len(trait_values)
                }
                
                # Compare with baseline if available
                if (figure_id in self.trait_baselines and 
                    trait in self.trait_baselines[figure_id]):
                    baseline = self.trait_baselines[figure_id][trait]
                    trait_analysis[trait.value]['baseline_deviation'] = abs(trait_mean - baseline)
        
        return trait_analysis
    
    def _create_insufficient_data_result(self) -> ConsistencyResult:
        """Create result for insufficient data cases."""
        return ConsistencyResult(
            overall_consistency=0.5,
            temporal_continuity=0.5,
            cross_situational_consistency=0.5,
            trait_consistency_scores={trait: 0.5 for trait in PersonalityDimension},
            inconsistent_periods=[],
            behavioral_deviations=[],
            confidence_level=0.1,
            meets_threshold=False,
            recommendations=["Insufficient data for consistency testing - need at least 2 snapshots"],
            detailed_analysis={'insufficient_data': True}
        )
    
    def _update_test_statistics(self, result: ConsistencyResult, test_time: float):
        """Update testing performance statistics."""
        self.test_stats['total_tests'] += 1
        self.test_stats['test_times'].append(test_time)
        
        if result.meets_threshold:
            self.test_stats['consistent_results'] += 1
        else:
            self.test_stats['inconsistent_results'] += 1
        
        # Update rolling average
        alpha = 0.1  # Learning rate for exponential moving average
        if self.test_stats['average_consistency'] == 0.0:
            self.test_stats['average_consistency'] = result.overall_consistency
        else:
            self.test_stats['average_consistency'] = (
                alpha * result.overall_consistency + 
                (1 - alpha) * self.test_stats['average_consistency']
            )
    
    def _generate_deviation_alert(self, figure_id: str, result: ConsistencyResult) -> DeviationAlert:
        """Generate deviation alert for inconsistent results."""
        severity = 1.0 - result.overall_consistency
        
        affected_dimensions = [
            trait for trait, score in result.trait_consistency_scores.items()
            if score < self.consistency_threshold
        ]
        
        description = f"Consistency test failed: {result.overall_consistency:.3f} < {self.consistency_threshold:.3f}"
        
        recommended_actions = result.recommendations.copy()
        recommended_actions.append("Review recent personality snapshots for anomalies")
        recommended_actions.append("Consider adjusting personality model parameters")
        
        return DeviationAlert(
            timestamp=datetime.now(),
            deviation_type="consistency_failure",
            severity=severity,
            affected_dimensions=affected_dimensions,
            description=description,
            context_info={
                'figure_id': figure_id,
                'failed_consistency': result.overall_consistency,
                'threshold': self.consistency_threshold
            },
            recommended_actions=recommended_actions
        )
    
    # Public methods for system integration
    
    def get_consistency_history(self, figure_id: str, 
                              limit: Optional[int] = None) -> List[ConsistencyResult]:
        """Get consistency test history for a figure."""
        history = self.consistency_history.get(figure_id, [])
        
        if limit:
            return history[-limit:]
        
        return history
    
    def get_deviation_alerts(self, figure_id: str,
                           severity_threshold: float = 0.5) -> List[DeviationAlert]:
        """Get deviation alerts above severity threshold."""
        alerts = self.deviation_alerts.get(figure_id, [])
        
        return [alert for alert in alerts if alert.severity >= severity_threshold]
    
    def get_test_statistics(self) -> Dict[str, Any]:
        """Get testing performance statistics."""
        stats = self.test_stats.copy()
        
        if stats['total_tests'] > 0:
            stats['consistency_rate'] = stats['consistent_results'] / stats['total_tests']
            
        if stats['test_times']:
            stats['average_test_time'] = np.mean(stats['test_times'])
            stats['total_test_time'] = np.sum(stats['test_times'])
        
        return stats
    
    def clear_figure_data(self, figure_id: str):
        """Clear all data for a specific figure."""
        if figure_id in self.personality_snapshots:
            del self.personality_snapshots[figure_id]
        
        if figure_id in self.consistency_history:
            del self.consistency_history[figure_id]
        
        if figure_id in self.deviation_alerts:
            del self.deviation_alerts[figure_id]
        
        if figure_id in self.trait_baselines:
            del self.trait_baselines[figure_id]
        
        logger.info("Cleared all data for figure %s", figure_id)
    
    def export_consistency_report(self, figure_id: str) -> Dict[str, Any]:
        """Export comprehensive consistency report for a figure."""
        if figure_id not in self.personality_snapshots:
            raise ValueError(f"No data available for figure {figure_id}")
        
        latest_result = self.consistency_history[figure_id][-1] if self.consistency_history[figure_id] else None
        
        report = {
            'figure_id': figure_id,
            'report_timestamp': datetime.now(),
            'snapshot_count': len(self.personality_snapshots[figure_id]),
            'latest_consistency_result': latest_result,
            'consistency_history': self.consistency_history[figure_id],
            'deviation_alerts': self.deviation_alerts[figure_id],
            'trait_baselines': self.trait_baselines.get(figure_id, {}),
            'summary_statistics': self._calculate_figure_statistics(figure_id)
        }
        
        return report
    
    def _calculate_figure_statistics(self, figure_id: str) -> Dict[str, Any]:
        """Calculate summary statistics for a figure."""
        snapshots = self.personality_snapshots[figure_id]
        history = self.consistency_history[figure_id]
        
        if not snapshots:
            return {}
        
        stats = {
            'data_collection_period': {
                'start': min(s.timestamp for s in snapshots),
                'end': max(s.timestamp for s in snapshots),
                'duration_days': (max(s.timestamp for s in snapshots) - 
                                min(s.timestamp for s in snapshots)).days
            },
            'contexts_represented': list(set(s.context.value for s in snapshots)),
            'average_snapshot_confidence': np.mean([s.confidence_score for s in snapshots])
        }
        
        if history:
            stats['consistency_trend'] = {
                'latest_score': history[-1].overall_consistency,
                'average_score': np.mean([r.overall_consistency for r in history]),
                'best_score': max(r.overall_consistency for r in history),
                'worst_score': min(r.overall_consistency for r in history)
            }
        
        return stats