"""
Bayesian Confidence Calculation for Historical Data Reconstruction

This module implements Bayesian confidence intervals and uncertainty quantification
for historical personality reconstruction. It provides evidence credibility scoring,
consistency evaluation, and uncertainty measures for speculative elements.

Mathematical Framework:
P(P|E) = P(E|P) · P(P) / P(E)  (Bayes' Theorem)

Where:
- P: Personality hypothesis
- E: Evidence/observations
- P(P|E): Posterior probability (what we want)
- P(E|P): Likelihood (evidence given personality)
- P(P): Prior probability
- P(E): Evidence probability
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Any
from dataclasses import dataclass, field
from enum import Enum
import logging
from scipy.stats import beta, norm, chi2
from scipy.special import logsumexp
import warnings

logger = logging.getLogger(__name__)


class EvidenceType(Enum):
    """Types of historical evidence."""
    BIOGRAPHICAL_TEXT = "biographical_text"
    PERSONAL_LETTERS = "personal_letters"
    CONTEMPORARY_ACCOUNTS = "contemporary_accounts"
    BEHAVIORAL_RECORDS = "behavioral_records"
    CREATIVE_WORKS = "creative_works"
    HISTORICAL_DOCUMENTS = "historical_documents"
    ARCHAEOLOGICAL_ARTIFACTS = "archaeological_artifacts"
    INDIRECT_INFERENCE = "indirect_inference"


@dataclass
class EvidenceSource:
    """Represents a source of historical evidence."""
    source_id: str
    evidence_type: EvidenceType
    credibility_score: float  # [0, 1] - how reliable is this source
    temporal_distance: float  # Years from event to recording
    cultural_bias_score: float  # [0, 1] - 0=unbiased, 1=highly biased
    completeness_score: float  # [0, 1] - how complete is the information
    consistency_with_other_sources: float  # [0, 1] - internal consistency
    personality_relevance: Dict[str, float]  # Relevance to each trait
    content_summary: str = ""
    uncertainty_factors: List[str] = field(default_factory=list)


@dataclass
class BayesianEstimate:
    """Bayesian estimate for a personality trait."""
    posterior_mean: float
    posterior_std: float
    credible_interval_95: Tuple[float, float]
    credible_interval_68: Tuple[float, float]
    prior_mean: float
    prior_std: float
    likelihood_strength: float
    evidence_count: int
    epistemic_uncertainty: float  # Model uncertainty
    aleatoric_uncertainty: float  # Data uncertainty


@dataclass
class ConfidenceResult:
    """Complete confidence assessment result."""
    trait_estimates: Dict[str, BayesianEstimate]
    overall_confidence_score: float
    evidence_quality_score: float
    temporal_consistency_score: float
    cross_validation_score: float
    uncertainty_breakdown: Dict[str, float]
    speculative_elements: List[str]
    confidence_factors: Dict[str, float]


class EvidenceCredibilityScorer:
    """
    Scores the credibility and reliability of historical evidence sources.
    
    This component evaluates evidence based on source reliability, temporal
    distance, cultural bias, and consistency with other sources.
    """
    
    def __init__(self):
        """Initialize evidence credibility scorer."""
        # Default credibility weights for different evidence types
        self.evidence_type_credibility = {
            EvidenceType.PERSONAL_LETTERS: 0.9,  # Highest credibility
            EvidenceType.BIOGRAPHICAL_TEXT: 0.7,
            EvidenceType.CONTEMPORARY_ACCOUNTS: 0.8,
            EvidenceType.BEHAVIORAL_RECORDS: 0.85,
            EvidenceType.CREATIVE_WORKS: 0.6,
            EvidenceType.HISTORICAL_DOCUMENTS: 0.75,
            EvidenceType.ARCHAEOLOGICAL_ARTIFACTS: 0.5,
            EvidenceType.INDIRECT_INFERENCE: 0.3  # Lowest credibility
        }
        
        # Temporal decay parameters
        self.temporal_decay_rate = 0.1  # Per decade
        self.bias_penalty_weight = 0.3
        self.consistency_bonus_weight = 0.2
    
    def score_evidence_credibility(self, evidence: EvidenceSource) -> float:
        """
        Compute overall credibility score for an evidence source.
        
        Args:
            evidence: Evidence source to evaluate
            
        Returns:
            Credibility score [0, 1]
        """
        # Base credibility from evidence type
        base_credibility = self.evidence_type_credibility.get(
            evidence.evidence_type, 0.5
        )
        
        # Temporal decay factor
        decades_elapsed = evidence.temporal_distance / 10.0
        temporal_factor = np.exp(-self.temporal_decay_rate * decades_elapsed)
        
        # Cultural bias penalty
        bias_penalty = self.bias_penalty_weight * evidence.cultural_bias_score
        
        # Consistency bonus
        consistency_bonus = (self.consistency_bonus_weight * 
                           evidence.consistency_with_other_sources)
        
        # Completeness factor
        completeness_factor = evidence.completeness_score
        
        # Combined credibility score
        credibility = (base_credibility * temporal_factor * completeness_factor *
                      (1 - bias_penalty) + consistency_bonus)
        
        # Apply source-specific credibility
        final_credibility = credibility * evidence.credibility_score
        
        return float(np.clip(final_credibility, 0, 1))
    
    def compute_evidence_weights(self, 
                               evidence_sources: List[EvidenceSource],
                               trait_name: str) -> np.ndarray:
        """
        Compute evidence weights for a specific personality trait.
        
        Args:
            evidence_sources: List of evidence sources
            trait_name: Name of the personality trait
            
        Returns:
            Array of normalized weights for each evidence source
        """
        weights = []
        
        for evidence in evidence_sources:
            # Base credibility
            credibility = self.score_evidence_credibility(evidence)
            
            # Trait relevance
            trait_relevance = evidence.personality_relevance.get(trait_name, 0.1)
            
            # Combined weight
            weight = credibility * trait_relevance
            weights.append(weight)
        
        weights = np.array(weights)
        
        # Normalize weights
        if np.sum(weights) > 0:
            weights = weights / np.sum(weights)
        else:
            weights = np.ones(len(weights)) / len(weights)
        
        return weights
    
    def assess_evidence_consistency(self, 
                                  evidence_sources: List[EvidenceSource],
                                  trait_observations: List[float]) -> float:
        """
        Assess consistency across multiple evidence sources.
        
        Args:
            evidence_sources: List of evidence sources
            trait_observations: Observed trait values from each source
            
        Returns:
            Consistency score [0, 1]
        """
        if len(trait_observations) < 2:
            return 1.0
        
        # Compute weighted variance of observations
        weights = np.array([self.score_evidence_credibility(e) for e in evidence_sources])
        weights = weights / np.sum(weights) if np.sum(weights) > 0 else weights
        
        weighted_mean = np.average(trait_observations, weights=weights)
        weighted_variance = np.average(
            (np.array(trait_observations) - weighted_mean) ** 2, 
            weights=weights
        )
        
        # Convert variance to consistency score
        consistency = np.exp(-2 * weighted_variance)  # Exponential decay
        return float(np.clip(consistency, 0, 1))


class UncertaintyQuantifier:
    """
    Quantifies different types of uncertainty in personality reconstruction.
    
    Distinguishes between epistemic uncertainty (model uncertainty) and
    aleatoric uncertainty (inherent data uncertainty).
    """
    
    def __init__(self):
        """Initialize uncertainty quantifier."""
        self.epistemic_weight = 0.6  # Weight for model uncertainty
        self.aleatoric_weight = 0.4   # Weight for data uncertainty
    
    def quantify_uncertainty(self,
                           trait_estimates: List[float],
                           evidence_sources: List[EvidenceSource],
                           model_predictions: Optional[List[float]] = None) -> Dict[str, float]:
        """
        Quantify different types of uncertainty.
        
        Args:
            trait_estimates: Estimated trait values
            evidence_sources: Supporting evidence sources
            model_predictions: Optional model predictions for comparison
            
        Returns:
            Dictionary with uncertainty components
        """
        # Epistemic uncertainty (model uncertainty)
        epistemic_uncertainty = self._compute_epistemic_uncertainty(
            trait_estimates, model_predictions
        )
        
        # Aleatoric uncertainty (data uncertainty)
        aleatoric_uncertainty = self._compute_aleatoric_uncertainty(
            trait_estimates, evidence_sources
        )
        
        # Total uncertainty
        total_uncertainty = (self.epistemic_weight * epistemic_uncertainty +
                           self.aleatoric_weight * aleatoric_uncertainty)
        
        # Confidence (inverse of uncertainty)
        confidence = 1.0 - total_uncertainty
        
        return {
            'epistemic_uncertainty': epistemic_uncertainty,
            'aleatoric_uncertainty': aleatoric_uncertainty,
            'total_uncertainty': total_uncertainty,
            'confidence': max(0.0, confidence)
        }
    
    def _compute_epistemic_uncertainty(self,
                                     trait_estimates: List[float],
                                     model_predictions: Optional[List[float]]) -> float:
        """Compute epistemic (model) uncertainty."""
        if model_predictions is None or len(model_predictions) != len(trait_estimates):
            # Default epistemic uncertainty based on estimate variance
            if len(trait_estimates) < 2:
                return 0.3  # Default moderate uncertainty
            
            variance = np.var(trait_estimates)
            return float(min(1.0, 2 * np.sqrt(variance)))
        
        # Compare estimates with model predictions
        differences = np.array(trait_estimates) - np.array(model_predictions)
        mean_squared_difference = np.mean(differences ** 2)
        
        # Convert to uncertainty score
        epistemic_uncertainty = min(1.0, 2 * np.sqrt(mean_squared_difference))
        return float(epistemic_uncertainty)
    
    def _compute_aleatoric_uncertainty(self,
                                     trait_estimates: List[float],
                                     evidence_sources: List[EvidenceSource]) -> float:
        """Compute aleatoric (data) uncertainty."""
        if not evidence_sources:
            return 1.0  # Maximum uncertainty with no evidence
        
        # Average credibility of evidence sources
        credibility_scorer = EvidenceCredibilityScorer()
        credibilities = [credibility_scorer.score_evidence_credibility(e) 
                        for e in evidence_sources]
        avg_credibility = np.mean(credibilities)
        
        # Uncertainty from low credibility
        credibility_uncertainty = 1.0 - avg_credibility
        
        # Uncertainty from sparse data
        data_sparsity = max(0.0, 1.0 - len(evidence_sources) / 5.0)  # Assume 5 is sufficient
        
        # Combined aleatoric uncertainty
        aleatoric_uncertainty = 0.7 * credibility_uncertainty + 0.3 * data_sparsity
        return float(np.clip(aleatoric_uncertainty, 0, 1))


class BayesianConfidenceCalculator:
    """
    Main Bayesian confidence calculator for personality reconstruction.
    
    Implements Bayesian inference to compute posterior distributions and
    confidence intervals for personality traits based on historical evidence.
    """
    
    def __init__(self,
                 prior_mean: float = 0.5,
                 prior_std: float = 0.2,
                 min_evidence_threshold: int = 1):
        """
        Initialize Bayesian confidence calculator.
        
        Args:
            prior_mean: Prior mean for personality traits
            prior_std: Prior standard deviation
            min_evidence_threshold: Minimum evidence sources required
        """
        self.prior_mean = prior_mean
        self.prior_std = prior_std
        self.min_evidence_threshold = min_evidence_threshold
        
        self.credibility_scorer = EvidenceCredibilityScorer()
        self.uncertainty_quantifier = UncertaintyQuantifier()
    
    def calculate_bayesian_confidence(self,
                                    trait_observations: Dict[str, List[float]],
                                    evidence_sources: Dict[str, List[EvidenceSource]],
                                    trait_names: List[str]) -> ConfidenceResult:
        """
        Calculate Bayesian confidence intervals for personality traits.
        
        Args:
            trait_observations: Dictionary mapping trait_name -> observations
            evidence_sources: Dictionary mapping trait_name -> evidence sources
            trait_names: List of all personality trait names
            
        Returns:
            ConfidenceResult with Bayesian estimates and confidence measures
        """
        trait_estimates = {}
        uncertainty_breakdown = {}
        speculative_elements = []
        confidence_factors = {}
        
        overall_scores = []
        
        for trait_name in trait_names:
            observations = trait_observations.get(trait_name, [])
            sources = evidence_sources.get(trait_name, [])
            
            if len(observations) < self.min_evidence_threshold:
                # Insufficient evidence - mark as speculative
                speculative_elements.append(f"{trait_name}: insufficient evidence")
                
                # Use prior as posterior
                estimate = BayesianEstimate(
                    posterior_mean=self.prior_mean,
                    posterior_std=self.prior_std,
                    credible_interval_95=(
                        max(0, self.prior_mean - 1.96 * self.prior_std),
                        min(1, self.prior_mean + 1.96 * self.prior_std)
                    ),
                    credible_interval_68=(
                        max(0, self.prior_mean - 0.68 * self.prior_std),
                        min(1, self.prior_mean + 0.68 * self.prior_std)
                    ),
                    prior_mean=self.prior_mean,
                    prior_std=self.prior_std,
                    likelihood_strength=0.0,
                    evidence_count=0,
                    epistemic_uncertainty=0.8,
                    aleatoric_uncertainty=0.9
                )
            else:
                # Compute Bayesian estimate
                estimate = self._compute_bayesian_estimate(
                    observations, sources, trait_name
                )
            
            trait_estimates[trait_name] = estimate
            
            # Compute uncertainty breakdown
            uncertainty = self.uncertainty_quantifier.quantify_uncertainty(
                observations, sources
            )
            uncertainty_breakdown[trait_name] = uncertainty
            
            # Track confidence factors
            confidence_factors[trait_name] = {
                'evidence_count': len(observations),
                'evidence_quality': np.mean([
                    self.credibility_scorer.score_evidence_credibility(s) for s in sources
                ]) if sources else 0.0,
                'posterior_precision': 1.0 / (estimate.posterior_std ** 2),
                'consistency': self.credibility_scorer.assess_evidence_consistency(
                    sources, observations
                ) if len(observations) > 1 else 1.0
            }
            
            # Overall score contribution
            trait_confidence = uncertainty['confidence']
            overall_scores.append(trait_confidence)
        
        # Compute aggregate scores
        overall_confidence = float(np.mean(overall_scores)) if overall_scores else 0.0
        
        evidence_quality = float(np.mean([
            np.mean([self.credibility_scorer.score_evidence_credibility(s) for s in sources])
            for sources in evidence_sources.values() if sources
        ])) if any(evidence_sources.values()) else 0.0
        
        temporal_consistency = self._compute_temporal_consistency(evidence_sources)
        
        cross_validation_score = self._compute_cross_validation_score(
            trait_observations, evidence_sources
        )
        
        return ConfidenceResult(
            trait_estimates=trait_estimates,
            overall_confidence_score=overall_confidence,
            evidence_quality_score=evidence_quality,
            temporal_consistency_score=temporal_consistency,
            cross_validation_score=cross_validation_score,
            uncertainty_breakdown=uncertainty_breakdown,
            speculative_elements=speculative_elements,
            confidence_factors=confidence_factors
        )
    
    def _compute_bayesian_estimate(self,
                                 observations: List[float],
                                 sources: List[EvidenceSource],
                                 trait_name: str) -> BayesianEstimate:
        """Compute Bayesian estimate for a personality trait."""
        if not observations:
            raise ValueError("No observations provided")
        
        # Get evidence weights
        weights = self.credibility_scorer.compute_evidence_weights(sources, trait_name)
        
        # Compute likelihood parameters from weighted observations
        weighted_obs = np.array(observations) * weights
        effective_mean = np.sum(weighted_obs) / np.sum(weights) if np.sum(weights) > 0 else np.mean(observations)
        
        # Effective sample size based on evidence quality
        effective_n = np.sum(weights ** 2) if np.sum(weights ** 2) > 0 else len(observations)
        
        # Likelihood precision (inverse variance)
        if len(observations) > 1:
            weighted_var = np.average(
                (np.array(observations) - effective_mean) ** 2,
                weights=weights
            )
            likelihood_precision = effective_n / max(weighted_var, 1e-6)
        else:
            likelihood_precision = 1.0  # Default precision for single observation
        
        # Prior precision
        prior_precision = 1.0 / (self.prior_std ** 2)
        
        # Posterior parameters (conjugate normal-normal)
        posterior_precision = prior_precision + likelihood_precision
        posterior_mean = (
            (prior_precision * self.prior_mean + likelihood_precision * effective_mean) /
            posterior_precision
        )
        posterior_std = 1.0 / np.sqrt(posterior_precision)
        
        # Credible intervals
        ci_95 = (
            max(0, posterior_mean - 1.96 * posterior_std),
            min(1, posterior_mean + 1.96 * posterior_std)
        )
        
        ci_68 = (
            max(0, posterior_mean - 0.68 * posterior_std),
            min(1, posterior_mean + 0.68 * posterior_std)
        )
        
        # Compute uncertainties
        epistemic_uncertainty = self.uncertainty_quantifier._compute_epistemic_uncertainty(
            observations, None
        )
        
        aleatoric_uncertainty = self.uncertainty_quantifier._compute_aleatoric_uncertainty(
            observations, sources
        )
        
        return BayesianEstimate(
            posterior_mean=float(posterior_mean),
            posterior_std=float(posterior_std),
            credible_interval_95=ci_95,
            credible_interval_68=ci_68,
            prior_mean=self.prior_mean,
            prior_std=self.prior_std,
            likelihood_strength=float(likelihood_precision),
            evidence_count=len(observations),
            epistemic_uncertainty=epistemic_uncertainty,
            aleatoric_uncertainty=aleatoric_uncertainty
        )
    
    def _compute_temporal_consistency(self, 
                                    evidence_sources: Dict[str, List[EvidenceSource]]) -> float:
        """Compute temporal consistency across evidence sources."""
        if not any(evidence_sources.values()):
            return 0.0
        
        # Collect temporal distances for all evidence
        temporal_distances = []
        for sources in evidence_sources.values():
            for source in sources:
                temporal_distances.append(source.temporal_distance)
        
        if not temporal_distances:
            return 0.0
        
        # Lower variance in temporal distances indicates better consistency
        temporal_variance = np.var(temporal_distances)
        max_distance = max(temporal_distances)
        
        # Normalize variance by maximum distance
        normalized_variance = temporal_variance / (max_distance ** 2) if max_distance > 0 else 0
        
        # Convert to consistency score
        consistency = np.exp(-2 * normalized_variance)
        return float(np.clip(consistency, 0, 1))
    
    def _compute_cross_validation_score(self,
                                      trait_observations: Dict[str, List[float]],
                                      evidence_sources: Dict[str, List[EvidenceSource]]) -> float:
        """Compute cross-validation score using leave-one-out validation."""
        if not trait_observations or not any(trait_observations.values()):
            return 0.0
        
        cv_scores = []
        
        for trait_name, observations in trait_observations.items():
            if len(observations) < 2:
                continue
            
            sources = evidence_sources.get(trait_name, [])
            if len(sources) != len(observations):
                continue
            
            # Leave-one-out cross-validation
            fold_errors = []
            
            for i in range(len(observations)):
                # Training data (all except i-th observation)
                train_obs = [obs for j, obs in enumerate(observations) if j != i]
                train_sources = [src for j, src in enumerate(sources) if j != i]
                
                # Test data (i-th observation)
                test_obs = observations[i]
                
                if not train_obs:
                    continue
                
                # Fit model on training data
                train_estimate = self._compute_bayesian_estimate(
                    train_obs, train_sources, trait_name
                )
                
                # Predict test observation
                prediction = train_estimate.posterior_mean
                
                # Compute error
                error = abs(prediction - test_obs)
                fold_errors.append(error)
            
            if fold_errors:
                # Convert mean error to accuracy score
                mean_error = np.mean(fold_errors)
                accuracy = max(0.0, 1.0 - 2 * mean_error)  # Scale error to [0, 1]
                cv_scores.append(accuracy)
        
        if not cv_scores:
            return 0.5  # Default moderate score
        
        return float(np.mean(cv_scores))
    
    def compute_confidence_intervals(self,
                                   posterior_mean: float,
                                   posterior_std: float,
                                   confidence_levels: List[float] = [0.68, 0.95, 0.99]) -> Dict[float, Tuple[float, float]]:
        """
        Compute confidence intervals for different confidence levels.
        
        Args:
            posterior_mean: Posterior mean
            posterior_std: Posterior standard deviation
            confidence_levels: List of confidence levels (e.g., [0.68, 0.95, 0.99])
            
        Returns:
            Dictionary mapping confidence_level -> (lower_bound, upper_bound)
        """
        intervals = {}
        
        for level in confidence_levels:
            # Convert confidence level to z-score
            alpha = 1 - level
            z_score = norm.ppf(1 - alpha/2)
            
            # Compute interval
            margin_of_error = z_score * posterior_std
            lower_bound = max(0, posterior_mean - margin_of_error)
            upper_bound = min(1, posterior_mean + margin_of_error)
            
            intervals[level] = (float(lower_bound), float(upper_bound))
        
        return intervals