"""
Historical Personality Reconstructor

Main reconstruction engine that integrates all reconstruction techniques:
- Matrix completion for low-rank structure recovery
- Compressed sensing for sparse signal reconstruction  
- Temporal evolution modeling for personality changes
- Bayesian confidence calculation with uncertainty quantification

This module provides conflict resolution, dynamic memory updates, and
evidence weighting for comprehensive historical personality reconstruction.
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Any
from dataclasses import dataclass, field
import logging
from datetime import datetime
import json

from .matrix_completion import MatrixCompletion, CompletionResult, create_historical_personality_matrix
from .compressed_sensing import CompressedSensing, SparseRecoveryResult
from .temporal_evolution import TemporalEvolutionModel, PersonalitySnapshot, LifeEvent, EvolutionResult
from .confidence_calculator import BayesianConfidenceCalculator, ConfidenceResult, EvidenceSource, EvidenceType
from ..personality.models import PersonalityVector, BigFiveTraits, ConfidenceScore

logger = logging.getLogger(__name__)


@dataclass
class ReconstructionConfig:
    """Configuration for historical personality reconstruction."""
    # Matrix completion parameters
    matrix_nuclear_norm_weight: float = 1.0
    matrix_sparse_weight: float = 0.1
    matrix_completion_accuracy_target: float = 0.8
    
    # Compressed sensing parameters  
    cs_sparsity_penalty: float = 0.1
    cs_cultural_preservation_weight: float = 0.05
    cs_dictionary_type: str = 'personality'
    
    # Temporal evolution parameters
    temporal_maturation_rate: float = 0.01
    temporal_baseline_stability: float = 0.95
    temporal_coherence_weight: float = 0.1
    
    # Bayesian confidence parameters
    bayesian_prior_mean: float = 0.5
    bayesian_prior_std: float = 0.2
    min_evidence_threshold: int = 1
    
    # Integration parameters
    integration_method: str = 'weighted_ensemble'  # 'weighted_ensemble', 'hierarchical', 'sequential'
    conflict_resolution_strategy: str = 'evidence_weighted'  # 'evidence_weighted', 'confidence_weighted', 'majority_vote'
    uncertainty_propagation: bool = True
    
    # Memory update parameters
    memory_decay_rate: float = 0.05
    evidence_update_threshold: float = 0.1
    max_iterations: int = 100


@dataclass
class ReconstructionResult:
    """Complete results from historical personality reconstruction."""
    reconstructed_personality: PersonalityVector
    completion_results: Dict[str, CompletionResult]
    compressed_sensing_results: Dict[str, SparseRecoveryResult]  
    temporal_evolution_result: EvolutionResult
    confidence_result: ConfidenceResult
    
    # Integration metrics
    reconstruction_accuracy: float
    consistency_score: float
    conflict_resolution_log: List[str]
    evidence_utilization: Dict[str, float]
    
    # Metadata
    reconstruction_timestamp: float
    computation_time: float
    algorithm_versions: Dict[str, str]


class ConflictResolver:
    """
    Resolves conflicts between different reconstruction methods.
    
    When different algorithms produce conflicting personality estimates,
    this component uses evidence weighting and confidence measures to
    determine the most reliable reconstruction.
    """
    
    def __init__(self, strategy: str = 'evidence_weighted'):
        """
        Initialize conflict resolver.
        
        Args:
            strategy: Conflict resolution strategy
        """
        self.strategy = strategy
    
    def resolve_trait_conflicts(self,
                              trait_estimates: Dict[str, Dict[str, float]],
                              confidence_scores: Dict[str, Dict[str, float]],
                              evidence_weights: Dict[str, float]) -> Tuple[Dict[str, float], List[str]]:
        """
        Resolve conflicts in trait estimates from different methods.
        
        Args:
            trait_estimates: Method -> trait -> estimate
            confidence_scores: Method -> trait -> confidence  
            evidence_weights: Method -> overall evidence weight
            
        Returns:
            Tuple of (resolved_estimates, conflict_log)
        """
        methods = list(trait_estimates.keys())
        if not methods:
            return {}, []
        
        # Get all traits
        all_traits = set()
        for method_estimates in trait_estimates.values():
            all_traits.update(method_estimates.keys())
        
        resolved_estimates = {}
        conflict_log = []
        
        for trait in all_traits:
            # Collect estimates and confidences for this trait
            trait_values = []
            trait_confidences = []
            trait_methods = []
            
            for method in methods:
                if trait in trait_estimates[method]:
                    trait_values.append(trait_estimates[method][trait])
                    trait_confidences.append(confidence_scores.get(method, {}).get(trait, 0.5))
                    trait_methods.append(method)
            
            if not trait_values:
                continue
            
            # Check for conflicts (significant disagreement)
            if len(trait_values) > 1:
                value_range = max(trait_values) - min(trait_values)
                if value_range > 0.2:  # Significant disagreement threshold
                    conflict_log.append(
                        f"Conflict in {trait}: {dict(zip(trait_methods, trait_values))}"
                    )
            
            # Resolve using selected strategy
            if self.strategy == 'evidence_weighted':
                resolved_value = self._evidence_weighted_resolution(
                    trait_values, trait_methods, evidence_weights
                )
            elif self.strategy == 'confidence_weighted':
                resolved_value = self._confidence_weighted_resolution(
                    trait_values, trait_confidences
                )
            elif self.strategy == 'majority_vote':
                resolved_value = self._majority_vote_resolution(trait_values)
            else:
                # Default: simple average
                resolved_value = np.mean(trait_values)
            
            resolved_estimates[trait] = float(np.clip(resolved_value, 0, 1))
        
        return resolved_estimates, conflict_log
    
    def _evidence_weighted_resolution(self,
                                    values: List[float],
                                    methods: List[str],
                                    evidence_weights: Dict[str, float]) -> float:
        """Resolve using evidence weights."""
        weighted_sum = 0.0
        total_weight = 0.0
        
        for value, method in zip(values, methods):
            weight = evidence_weights.get(method, 1.0)
            weighted_sum += value * weight
            total_weight += weight
        
        return weighted_sum / total_weight if total_weight > 0 else np.mean(values)
    
    def _confidence_weighted_resolution(self,
                                      values: List[float],
                                      confidences: List[float]) -> float:
        """Resolve using confidence weights."""
        weighted_sum = sum(v * c for v, c in zip(values, confidences))
        total_confidence = sum(confidences)
        
        return weighted_sum / total_confidence if total_confidence > 0 else np.mean(values)
    
    def _majority_vote_resolution(self, values: List[float]) -> float:
        """Resolve using majority vote (discretized)."""
        # Discretize values into bins for voting
        bins = np.linspace(0, 1, 11)  # 0.0, 0.1, 0.2, ..., 1.0
        discretized = np.digitize(values, bins) - 1
        discretized = np.clip(discretized, 0, len(bins) - 2)
        
        # Find most common bin
        bin_counts = np.bincount(discretized, minlength=len(bins) - 1)
        majority_bin = np.argmax(bin_counts)
        
        # Return midpoint of majority bin
        return (bins[majority_bin] + bins[majority_bin + 1]) / 2


class DynamicMemoryUpdater:
    """
    Manages dynamic updates to personality memories based on new evidence.
    
    This component tracks evidence over time and updates personality
    reconstructions as new historical information becomes available.
    """
    
    def __init__(self, decay_rate: float = 0.05, update_threshold: float = 0.1):
        """
        Initialize dynamic memory updater.
        
        Args:
            decay_rate: Rate at which old evidence decays in influence
            update_threshold: Minimum evidence change to trigger update
        """
        self.decay_rate = decay_rate
        self.update_threshold = update_threshold
        self.evidence_history: List[Dict] = []
        self.reconstruction_history: List[ReconstructionResult] = []
    
    def update_with_new_evidence(self,
                               current_reconstruction: ReconstructionResult,
                               new_evidence: List[EvidenceSource],
                               reconstructor: 'HistoricalReconstructor') -> Optional[ReconstructionResult]:
        """
        Update reconstruction with new evidence.
        
        Args:
            current_reconstruction: Current personality reconstruction
            new_evidence: New evidence sources
            reconstructor: Reconstructor instance for re-computation
            
        Returns:
            Updated reconstruction if significant change, None otherwise
        """
        if not new_evidence:
            return None
        
        # Evaluate significance of new evidence
        evidence_impact = self._evaluate_evidence_impact(new_evidence)
        
        if evidence_impact < self.update_threshold:
            logger.info(f"Evidence impact {evidence_impact:.3f} below threshold {self.update_threshold}")
            return None
        
        logger.info(f"Significant new evidence detected (impact: {evidence_impact:.3f}), updating reconstruction")
        
        # Apply temporal decay to existing evidence
        decayed_weights = self._apply_temporal_decay()
        
        # Integrate new evidence with existing
        # This would require re-running the reconstruction with updated evidence
        # For now, we log the update and return None to indicate manual reprocessing needed
        
        self.evidence_history.append({
            'timestamp': datetime.now().timestamp(),
            'new_evidence_count': len(new_evidence),
            'evidence_impact': evidence_impact,
            'action': 'logged_for_reprocessing'
        })
        
        return None  # Indicates manual reprocessing needed
    
    def _evaluate_evidence_impact(self, new_evidence: List[EvidenceSource]) -> float:
        """Evaluate the potential impact of new evidence."""
        if not new_evidence:
            return 0.0
        
        # Simple heuristic: sum of credibility scores
        from .confidence_calculator import EvidenceCredibilityScorer
        scorer = EvidenceCredibilityScorer()
        
        total_impact = sum(scorer.score_evidence_credibility(e) for e in new_evidence)
        normalized_impact = total_impact / len(new_evidence)
        
        return float(normalized_impact)
    
    def _apply_temporal_decay(self) -> Dict[str, float]:
        """Apply temporal decay to evidence weights."""
        current_time = datetime.now().timestamp()
        decayed_weights = {}
        
        for i, evidence_entry in enumerate(self.evidence_history):
            time_elapsed = current_time - evidence_entry['timestamp']
            decay_factor = np.exp(-self.decay_rate * time_elapsed)
            decayed_weights[f'evidence_{i}'] = decay_factor
        
        return decayed_weights


class HistoricalReconstructor:
    """
    Main historical personality reconstruction engine.
    
    Integrates matrix completion, compressed sensing, temporal evolution,
    and Bayesian confidence calculation to reconstruct complete personality
    profiles from sparse historical data.
    """
    
    def __init__(self, config: Optional[ReconstructionConfig] = None):
        """
        Initialize historical reconstructor.
        
        Args:
            config: Configuration for reconstruction algorithms
        """
        self.config = config or ReconstructionConfig()
        
        # Initialize component algorithms
        self.matrix_completer = MatrixCompletion(
            nuclear_norm_weight=self.config.matrix_nuclear_norm_weight,
            sparse_weight=self.config.matrix_sparse_weight
        )
        
        self.compressed_sensing = CompressedSensing(
            sparsity_penalty=self.config.cs_sparsity_penalty,
            cultural_preservation_weight=self.config.cs_cultural_preservation_weight,
            dictionary_type=self.config.cs_dictionary_type
        )
        
        self.temporal_evolution = TemporalEvolutionModel(
            maturation_rate=self.config.temporal_maturation_rate,
            baseline_stability=self.config.temporal_baseline_stability,
            coherence_weight=self.config.temporal_coherence_weight
        )
        
        self.bayesian_calculator = BayesianConfidenceCalculator(
            prior_mean=self.config.bayesian_prior_mean,
            prior_std=self.config.bayesian_prior_std,
            min_evidence_threshold=self.config.min_evidence_threshold
        )
        
        # Initialize integration components
        self.conflict_resolver = ConflictResolver(self.config.conflict_resolution_strategy)
        self.memory_updater = DynamicMemoryUpdater(
            decay_rate=self.config.memory_decay_rate,
            update_threshold=self.config.evidence_update_threshold
        )
    
    def reconstruct_historical_personality(self,
                                         individual_id: str,
                                         sparse_personality_data: Dict[str, float],
                                         evidence_sources: List[EvidenceSource],
                                         life_events: Optional[List[LifeEvent]] = None,
                                         cultural_context: Optional[Dict] = None,
                                         trait_names: Optional[List[str]] = None) -> ReconstructionResult:
        """
        Reconstruct complete historical personality from sparse data.
        
        Args:
            individual_id: Identifier for the historical individual
            sparse_personality_data: Observed personality trait values
            evidence_sources: Historical evidence sources
            life_events: Known life events for temporal modeling
            cultural_context: Cultural context information
            trait_names: Names of personality traits (default: Big Five)
            
        Returns:
            ReconstructionResult with complete personality reconstruction
        """
        start_time = datetime.now()
        
        # Default trait names (Big Five)
        if trait_names is None:
            trait_names = ['openness', 'conscientiousness', 'extraversion', 
                          'agreeableness', 'neuroticism']
        
        logger.info(f"Starting personality reconstruction for {individual_id}")
        logger.info(f"Observed traits: {len(sparse_personality_data)}/{len(trait_names)}")
        logger.info(f"Evidence sources: {len(evidence_sources)}")
        
        # Prepare data for different algorithms
        trait_vector = np.array([sparse_personality_data.get(trait, np.nan) 
                                for trait in trait_names])
        observation_mask = ~np.isnan(trait_vector)
        trait_vector[np.isnan(trait_vector)] = 0  # Replace NaN with 0 for algorithms
        
        completion_results = {}
        cs_results = {}
        temporal_result = None
        confidence_result = None
        conflict_log = []
        
        try:
            # Method 1: Matrix Completion (if we have multiple individuals)
            # For single individual, create minimal matrix
            personality_matrix = np.array([trait_vector])
            mask_matrix = np.array([observation_mask])
            
            if np.sum(observation_mask) > 0:
                completion_result = self.matrix_completer.complete_personality_matrix(
                    personality_data=personality_matrix,
                    observation_mask=mask_matrix,
                    individual_ids=[individual_id],
                    trait_names=trait_names
                )
                completion_results[individual_id] = completion_result
                logger.info(f"Matrix completion: {completion_result.completion_accuracy:.1%} accuracy")
            
            # Method 2: Compressed Sensing
            if np.sum(observation_mask) > 0:
                cs_result = self.compressed_sensing.recovery_engine.recover_missing_traits(
                    observed_traits=trait_vector,
                    observation_mask=observation_mask,
                    cultural_context=cultural_context
                )
                cs_results[individual_id] = cs_result
                logger.info(f"Compressed sensing: {cs_result.sparsity_level:.1%} sparsity")
            
            # Method 3: Temporal Evolution (if we have life events)
            if life_events:
                # Create initial snapshot
                initial_snapshot = PersonalitySnapshot(
                    timestamp=min(event.timestamp for event in life_events) - 1,
                    trait_values=trait_vector,
                    confidence_scores=np.where(observation_mask, 0.8, 0.3)
                )
                
                temporal_result = self.temporal_evolution.reconstruct_temporal_personality(
                    personality_snapshots=[initial_snapshot],
                    life_events=life_events,
                    trait_names=trait_names
                )
                logger.info(f"Temporal evolution: {temporal_result.temporal_consistency_score:.2f} consistency")
            
            # Method 4: Bayesian Confidence Calculation
            trait_observations = {}
            evidence_by_trait = {}
            
            for i, trait in enumerate(trait_names):
                if observation_mask[i]:
                    trait_observations[trait] = [trait_vector[i]]
                    # Assign evidence sources to traits based on relevance
                    trait_evidence = [e for e in evidence_sources 
                                    if e.personality_relevance.get(trait, 0) > 0.1]
                    evidence_by_trait[trait] = trait_evidence
                else:
                    trait_observations[trait] = []
                    evidence_by_trait[trait] = []
            
            confidence_result = self.bayesian_calculator.calculate_bayesian_confidence(
                trait_observations=trait_observations,
                evidence_sources=evidence_by_trait,
                trait_names=trait_names
            )
            logger.info(f"Bayesian confidence: {confidence_result.overall_confidence_score:.2f}")
            
            # Integration: Combine results from different methods
            integrated_personality = self._integrate_reconstruction_results(
                completion_results=completion_results,
                cs_results=cs_results,
                temporal_result=temporal_result,
                confidence_result=confidence_result,
                trait_names=trait_names,
                individual_id=individual_id
            )
            
            # Create final personality vector
            final_personality_vector = self._create_personality_vector(
                integrated_personality=integrated_personality,
                confidence_result=confidence_result,
                individual_id=individual_id,
                trait_names=trait_names,
                evidence_sources=evidence_sources
            )
            
            # Compute final metrics
            reconstruction_accuracy = self._compute_reconstruction_accuracy(
                observed_traits=sparse_personality_data,
                reconstructed_traits=integrated_personality,
                trait_names=trait_names
            )
            
            consistency_score = self._compute_consistency_score(
                completion_results, cs_results, temporal_result, confidence_result
            )
            
            evidence_utilization = self._compute_evidence_utilization(evidence_sources)
            
        except Exception as e:
            logger.error(f"Reconstruction failed: {e}")
            raise
        
        computation_time = (datetime.now() - start_time).total_seconds()
        
        result = ReconstructionResult(
            reconstructed_personality=final_personality_vector,
            completion_results=completion_results,
            compressed_sensing_results=cs_results,
            temporal_evolution_result=temporal_result,
            confidence_result=confidence_result,
            reconstruction_accuracy=reconstruction_accuracy,
            consistency_score=consistency_score,
            conflict_resolution_log=conflict_log,
            evidence_utilization=evidence_utilization,
            reconstruction_timestamp=datetime.now().timestamp(),
            computation_time=computation_time,
            algorithm_versions={
                'matrix_completion': '1.0',
                'compressed_sensing': '1.0', 
                'temporal_evolution': '1.0',
                'bayesian_confidence': '1.0'
            }
        )
        
        logger.info(f"Reconstruction completed in {computation_time:.2f}s")
        logger.info(f"Final accuracy: {reconstruction_accuracy:.1%}")
        
        return result
    
    def _integrate_reconstruction_results(self,
                                        completion_results: Dict[str, CompletionResult],
                                        cs_results: Dict[str, SparseRecoveryResult],
                                        temporal_result: Optional[EvolutionResult],
                                        confidence_result: Optional[ConfidenceResult],
                                        trait_names: List[str],
                                        individual_id: str) -> Dict[str, float]:
        """Integrate results from different reconstruction methods."""
        
        # Collect trait estimates from each method
        trait_estimates = {}
        confidence_scores = {}
        evidence_weights = {}
        
        # Matrix completion results
        if individual_id in completion_results:
            completion_result = completion_results[individual_id]
            trait_estimates['matrix_completion'] = {
                trait: completion_result.completed_matrix[0, i] 
                for i, trait in enumerate(trait_names)
            }
            confidence_scores['matrix_completion'] = {
                trait: completion_result.confidence_scores[0, i]
                for i, trait in enumerate(trait_names)
            }
            evidence_weights['matrix_completion'] = completion_result.completion_accuracy
        
        # Compressed sensing results
        if individual_id in cs_results:
            cs_result = cs_results[individual_id]
            trait_estimates['compressed_sensing'] = {
                trait: cs_result.recovered_signal[i]
                for i, trait in enumerate(trait_names)
            }
            confidence_scores['compressed_sensing'] = {
                trait: cs_result.recovery_confidence[i]
                for i, trait in enumerate(trait_names)
            }
            evidence_weights['compressed_sensing'] = 1.0 - cs_result.reconstruction_error
        
        # Temporal evolution results
        if temporal_result and temporal_result.personality_timeline:
            latest_snapshot = temporal_result.personality_timeline[-1]
            trait_estimates['temporal_evolution'] = {
                trait: latest_snapshot.trait_values[i]
                for i, trait in enumerate(trait_names)
            }
            confidence_scores['temporal_evolution'] = {
                trait: latest_snapshot.confidence_scores[i]
                for i, trait in enumerate(trait_names)
            }
            evidence_weights['temporal_evolution'] = temporal_result.temporal_consistency_score
        
        # Bayesian confidence results
        if confidence_result:
            trait_estimates['bayesian'] = {}
            confidence_scores['bayesian'] = {}
            for trait in trait_names:
                if trait in confidence_result.trait_estimates:
                    estimate = confidence_result.trait_estimates[trait]
                    trait_estimates['bayesian'][trait] = estimate.posterior_mean
                    # Convert posterior std to confidence (higher precision = higher confidence)
                    confidence_scores['bayesian'][trait] = 1.0 / (1.0 + estimate.posterior_std)
            evidence_weights['bayesian'] = confidence_result.overall_confidence_score
        
        # Resolve conflicts and integrate
        integrated_traits, conflict_log = self.conflict_resolver.resolve_trait_conflicts(
            trait_estimates=trait_estimates,
            confidence_scores=confidence_scores,
            evidence_weights=evidence_weights
        )
        
        return integrated_traits
    
    def _create_personality_vector(self,
                                 integrated_personality: Dict[str, float],
                                 confidence_result: Optional[ConfidenceResult],
                                 individual_id: str,
                                 trait_names: List[str],
                                 evidence_sources: List[EvidenceSource]) -> PersonalityVector:
        """Create final PersonalityVector from integrated results."""
        
        # Create Big Five traits
        big_five_traits = BigFiveTraits(
            openness=integrated_personality.get('openness', 0.5),
            conscientiousness=integrated_personality.get('conscientiousness', 0.5),
            extraversion=integrated_personality.get('extraversion', 0.5),
            agreeableness=integrated_personality.get('agreeableness', 0.5),
            neuroticism=integrated_personality.get('neuroticism', 0.5)
        )
        
        # Create confidence score
        if confidence_result:
            confidence_score = ConfidenceScore(
                overall_confidence=confidence_result.overall_confidence_score,
                data_quality=confidence_result.evidence_quality_score,
                source_reliability=confidence_result.evidence_quality_score,
                temporal_stability=confidence_result.temporal_consistency_score,
                cross_validation_score=confidence_result.cross_validation_score
            )
        else:
            confidence_score = ConfidenceScore()
        
        # Create personality vector
        personality_vector = PersonalityVector(
            big_five_traits=big_five_traits,
            confidence_score=confidence_score,
            person_id=individual_id,
            created_timestamp=datetime.now().timestamp(),
            last_updated=datetime.now().timestamp(),
            text_sources=[e.source_id for e in evidence_sources 
                         if e.evidence_type in [EvidenceType.BIOGRAPHICAL_TEXT, EvidenceType.PERSONAL_LETTERS]],
            behavioral_sources=[e.source_id for e in evidence_sources
                              if e.evidence_type == EvidenceType.BEHAVIORAL_RECORDS],
            artifact_sources=[e.source_id for e in evidence_sources
                            if e.evidence_type == EvidenceType.ARCHAEOLOGICAL_ARTIFACTS]
        )
        
        return personality_vector
    
    def _compute_reconstruction_accuracy(self,
                                       observed_traits: Dict[str, float],
                                       reconstructed_traits: Dict[str, float],
                                       trait_names: List[str]) -> float:
        """Compute reconstruction accuracy on observed traits."""
        if not observed_traits:
            return 0.0
        
        errors = []
        for trait in trait_names:
            if trait in observed_traits and trait in reconstructed_traits:
                error = abs(observed_traits[trait] - reconstructed_traits[trait])
                errors.append(error)
        
        if not errors:
            return 0.0
        
        # Convert mean absolute error to accuracy
        mean_error = np.mean(errors)
        accuracy = max(0.0, 1.0 - 2 * mean_error)  # Scale to [0, 1]
        return float(accuracy)
    
    def _compute_consistency_score(self,
                                 completion_results: Dict,
                                 cs_results: Dict,
                                 temporal_result: Optional[EvolutionResult],
                                 confidence_result: Optional[ConfidenceResult]) -> float:
        """Compute consistency score across different methods."""
        consistency_scores = []
        
        # Add completion accuracy
        for result in completion_results.values():
            consistency_scores.append(result.completion_accuracy)
        
        # Add compressed sensing cultural preservation
        for result in cs_results.values():
            consistency_scores.append(result.cultural_preservation_score)
        
        # Add temporal consistency
        if temporal_result:
            consistency_scores.append(temporal_result.temporal_consistency_score)
        
        # Add confidence temporal consistency
        if confidence_result:
            consistency_scores.append(confidence_result.temporal_consistency_score)
        
        return float(np.mean(consistency_scores)) if consistency_scores else 0.0
    
    def _compute_evidence_utilization(self, evidence_sources: List[EvidenceSource]) -> Dict[str, float]:
        """Compute how well different types of evidence were utilized."""
        if not evidence_sources:
            return {}
        
        # Count evidence types
        type_counts = {}
        total_credibility_by_type = {}
        
        for evidence in evidence_sources:
            evidence_type = evidence.evidence_type.value
            type_counts[evidence_type] = type_counts.get(evidence_type, 0) + 1
            total_credibility_by_type[evidence_type] = (
                total_credibility_by_type.get(evidence_type, 0) + evidence.credibility_score
            )
        
        # Compute utilization scores
        utilization = {}
        for evidence_type, count in type_counts.items():
            avg_credibility = total_credibility_by_type[evidence_type] / count
            utilization[evidence_type] = avg_credibility
        
        return utilization
    
    def batch_reconstruct_personalities(self,
                                      individual_data: Dict[str, Dict],
                                      trait_names: Optional[List[str]] = None) -> Dict[str, ReconstructionResult]:
        """
        Reconstruct personalities for multiple historical individuals.
        
        Args:
            individual_data: Dict mapping individual_id -> individual_data
            trait_names: Names of personality traits
            
        Returns:
            Dict mapping individual_id -> ReconstructionResult
        """
        results = {}
        
        logger.info(f"Starting batch reconstruction for {len(individual_data)} individuals")
        
        for individual_id, data in individual_data.items():
            try:
                result = self.reconstruct_historical_personality(
                    individual_id=individual_id,
                    sparse_personality_data=data.get('personality_data', {}),
                    evidence_sources=data.get('evidence_sources', []),
                    life_events=data.get('life_events', []),
                    cultural_context=data.get('cultural_context'),
                    trait_names=trait_names
                )
                results[individual_id] = result
                logger.info(f"Completed reconstruction for {individual_id}: "
                           f"{result.reconstruction_accuracy:.1%} accuracy")
            except Exception as e:
                logger.error(f"Failed to reconstruct {individual_id}: {e}")
                continue
        
        logger.info(f"Batch reconstruction completed: {len(results)}/{len(individual_data)} successful")
        return results