"""
PersonalityEncoder system for transforming historical figure data into HDC vectors.

This module implements the core personality encoding system that transforms
multi-modal historical data into hyperdimensional computing (HDC) vectors
representing personality traits and cultural dimensions.
"""

import numpy as np
import logging
from typing import Dict, List, Optional, Tuple, Union, Any
from dataclasses import dataclass, field
import hashlib
import time
from scipy.special import softmax
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA

from ..hdc.core import HDCOperations as CoreHDCOperations
from .models import (
    PersonalityVector, BigFiveTraits, CulturalDimensions, 
    ConfidenceScore, TraitType, CulturalDimension
)
from .traits import (
    TraitValidator, TraitNormalizer, TraitRelationshipModeler,
    BigFiveTraitDefinitions, CulturalDimensionDefinitions
)

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass 
class HDCConfig:
    """Configuration parameters for HDC encoding."""
    vector_dimension: int = 10000
    seed_dimension: int = 512  # Dimension for seed vectors
    binding_strength: float = 1.0
    bundling_threshold: float = 0.1
    noise_level: float = 0.01
    normalization_method: str = "l2"  # "l2", "max", "none"
    use_bipolar: bool = True  # Use bipolar {-1, 1} or binary {0, 1}
    
    def validate(self) -> bool:
        """Validate configuration parameters."""
        if self.vector_dimension <= 0 or self.seed_dimension <= 0:
            return False
        if not (0.0 <= self.binding_strength <= 2.0):
            return False
        if not (0.0 <= self.noise_level <= 0.1):
            return False
        if self.normalization_method not in ["l2", "max", "none"]:
            return False
        return True




class PersonalityEncoder:
    """
    Core PersonalityEncoder class for transforming personality data into HDC vectors.
    
    This encoder implements multi-modal data fusion, Big Five personality traits
    encoding, cultural dimension encoding, and confidence scoring with uncertainty
    quantification.
    """
    
    def __init__(self, config: Optional[HDCConfig] = None):
        """
        Initialize the PersonalityEncoder.
        
        Args:
            config: HDC configuration parameters
        """
        self.config = config or HDCConfig()
        if not self.config.validate():
            raise ValueError("Invalid HDC configuration")
        
        # Use core HDC operations for better integration
        self.hdc_ops = CoreHDCOperations(dimension=self.config.vector_dimension, seed=42)
        self.scaler = StandardScaler()
        
        # Initialize trait processing components
        self.trait_validator = TraitValidator(self.hdc_ops)
        self.trait_normalizer = TraitNormalizer()
        self.relationship_modeler = TraitRelationshipModeler(
            self.hdc_ops, self.config.vector_dimension
        )
        
        # Initialize seed vectors for different personality components
        self._initialize_seed_vectors()
        
        # Cache for frequently used vectors and multi-modal fusion
        self._vector_cache = {}
        self._modality_weights = {
            'biographical': 0.25,
            'writings': 0.35,
            'behaviors': 0.30,
            'artifacts': 0.10
        }
        
        logger.info(f"Initialized PersonalityEncoder with {self.config.vector_dimension}D vectors")
    
    def _normalize_vector(self, vector: np.ndarray) -> np.ndarray:
        """Normalize vector using specified method."""
        if self.config.normalization_method == "l2":
            norm = np.linalg.norm(vector)
            return vector / norm if norm > 0 else vector
        elif self.config.normalization_method == "max":
            max_val = np.max(np.abs(vector))
            return vector / max_val if max_val > 0 else vector
        elif self.config.normalization_method == "none":
            return vector
        else:
            return vector
    
    def encode_multi_modal_data(self, multi_modal_data: Dict[str, Any]) -> Dict[str, np.ndarray]:
        """
        Encode multi-modal historical data into HDC vectors.
        
        Args:
            multi_modal_data: Dictionary containing different data modalities:
                - 'biographical': Biographical text and life events
                - 'writings': Written works, speeches, letters
                - 'behaviors': Documented behaviors and actions
                - 'artifacts': Cultural artifacts and creations
                
        Returns:
            Dictionary mapping modality names to HDC vectors
        """
        modality_vectors = {}
        
        for modality, data in multi_modal_data.items():
            if data is None or not data:
                continue
                
            try:
                if modality == 'biographical':
                    vector = self._encode_biographical_data(data)
                elif modality == 'writings':
                    vector = self._encode_textual_data(data)
                elif modality == 'behaviors':
                    vector = self._encode_behavioral_data(data)
                elif modality == 'artifacts':
                    vector = self._encode_artifact_data(data)
                else:
                    logger.warning(f"Unknown modality: {modality}")
                    continue
                
                modality_vectors[modality] = vector
                logger.debug(f"Encoded {modality} data into {len(vector)}D vector")
                
            except Exception as e:
                logger.error(f"Failed to encode {modality} data: {e}")
                continue
        
        return modality_vectors
    
    def _encode_biographical_data(self, bio_data: Dict[str, Any]) -> np.ndarray:
        """Encode biographical data into HDC vector."""
        # Create vector components for different biographical aspects
        components = []
        
        # Life events and milestones
        if 'life_events' in bio_data:
            events_vector = self._encode_life_events(bio_data['life_events'])
            components.append(events_vector)
        
        # Social background and relationships
        if 'social_context' in bio_data:
            social_vector = self._encode_social_context(bio_data['social_context'])
            components.append(social_vector)
        
        # Educational background
        if 'education' in bio_data:
            education_vector = self._encode_education(bio_data['education'])
            components.append(education_vector)
        
        # Bundle all biographical components
        if components:
            return self.hdc_ops.bundling(*components)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_textual_data(self, text_data: List[Dict[str, Any]]) -> np.ndarray:
        """Encode textual works into HDC vector."""
        text_vectors = []
        
        for text_item in text_data:
            # Extract text features
            content = text_item.get('content', '')
            text_type = text_item.get('type', 'general')  # letter, speech, book, etc.
            
            # Simple text encoding based on content characteristics
            text_vector = self._encode_text_content(content, text_type)
            text_vectors.append(text_vector)
        
        if text_vectors:
            return self.hdc_ops.bundling(*text_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_behavioral_data(self, behavior_data: List[Dict[str, Any]]) -> np.ndarray:
        """Encode behavioral patterns into HDC vector."""
        behavior_vectors = []
        
        for behavior in behavior_data:
            behavior_type = behavior.get('type', 'general')
            context = behavior.get('context', '')
            frequency = behavior.get('frequency', 1)
            
            # Encode behavior based on type and context
            behavior_vector = self._encode_single_behavior(behavior_type, context, frequency)
            behavior_vectors.append(behavior_vector)
        
        if behavior_vectors:
            return self.hdc_ops.bundling(*behavior_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_artifact_data(self, artifact_data: List[Dict[str, Any]]) -> np.ndarray:
        """Encode cultural artifacts into HDC vector."""
        artifact_vectors = []
        
        for artifact in artifact_data:
            artifact_type = artifact.get('type', 'general')  # art, music, invention, etc.
            style = artifact.get('style', '')
            cultural_impact = artifact.get('cultural_impact', 0.5)
            
            # Encode artifact characteristics
            artifact_vector = self._encode_single_artifact(artifact_type, style, cultural_impact)
            artifact_vectors.append(artifact_vector)
        
        if artifact_vectors:
            return self.hdc_ops.bundling(*artifact_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_life_events(self, events: List[str]) -> np.ndarray:
        """Encode life events into HDC vector."""
        # Create event type vectors based on event categories
        event_categories = {
            'birth': ['born', 'birth'],
            'education': ['school', 'university', 'studied', 'learned'],
            'career': ['work', 'job', 'career', 'profession'],
            'achievement': ['won', 'achieved', 'succeeded', 'invented'],
            'conflict': ['war', 'battle', 'fought', 'conflict'],
            'leadership': ['led', 'ruled', 'governed', 'commanded'],
            'personal': ['married', 'family', 'relationship', 'personal']
        }
        
        event_vectors = []
        for event in events:
            event_lower = event.lower()
            for category, keywords in event_categories.items():
                if any(keyword in event_lower for keyword in keywords):
                    # Get or create category vector
                    category_vector = self._get_or_create_category_vector(f"event_{category}")
                    event_vectors.append(category_vector)
                    break
        
        if event_vectors:
            return self.hdc_ops.bundling(*event_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_social_context(self, social_data: Dict[str, Any]) -> np.ndarray:
        """Encode social context information."""
        social_vectors = []
        
        # Social class
        if 'social_class' in social_data:
            class_vector = self._get_or_create_category_vector(f"class_{social_data['social_class']}")
            social_vectors.append(class_vector)
        
        # Cultural background
        if 'culture' in social_data:
            culture_vector = self._get_or_create_category_vector(f"culture_{social_data['culture']}")
            social_vectors.append(culture_vector)
        
        # Geographic origin
        if 'region' in social_data:
            region_vector = self._get_or_create_category_vector(f"region_{social_data['region']}")
            social_vectors.append(region_vector)
        
        if social_vectors:
            return self.hdc_ops.bundling(*social_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_education(self, education_data: Dict[str, Any]) -> np.ndarray:
        """Encode educational background."""
        edu_vectors = []
        
        # Education level
        level = education_data.get('level', 'basic')
        level_vector = self._get_or_create_category_vector(f"edu_level_{level}")
        edu_vectors.append(level_vector)
        
        # Fields of study
        fields = education_data.get('fields', [])
        for field in fields:
            field_vector = self._get_or_create_category_vector(f"field_{field}")
            edu_vectors.append(field_vector)
        
        if edu_vectors:
            return self.hdc_ops.bundling(*edu_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_text_content(self, content: str, text_type: str) -> np.ndarray:
        """Encode text content characteristics."""
        # Simple content analysis - in practice, this would use NLP
        content_features = {
            'emotional_tone': self._analyze_emotional_tone(content),
            'complexity': len(content.split()) / 100.0,  # Normalized word count
            'formality': self._analyze_formality(content, text_type)
        }
        
        feature_vectors = []
        for feature, value in content_features.items():
            feature_vector = self._encode_scalar_feature(feature, value)
            feature_vectors.append(feature_vector)
        
        if feature_vectors:
            return self.hdc_ops.bundling(*feature_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_single_behavior(self, behavior_type: str, context: str, frequency: float) -> np.ndarray:
        """Encode a single behavioral pattern."""
        # Get behavior type vector
        type_vector = self._get_or_create_category_vector(f"behavior_{behavior_type}")
        
        # Encode frequency
        freq_vector = self._encode_scalar_feature('frequency', min(frequency, 1.0))
        
        # Bind behavior with frequency
        return self.hdc_ops.binding(type_vector, freq_vector)
    
    def _encode_single_artifact(self, artifact_type: str, style: str, impact: float) -> np.ndarray:
        """Encode a single cultural artifact."""
        # Get artifact type vector
        type_vector = self._get_or_create_category_vector(f"artifact_{artifact_type}")
        
        # Encode style if provided
        style_vector = self._get_or_create_category_vector(f"style_{style}") if style else None
        
        # Encode cultural impact
        impact_vector = self._encode_scalar_feature('cultural_impact', impact)
        
        # Combine components
        vectors = [type_vector, impact_vector]
        if style_vector is not None:
            vectors.append(style_vector)
        
        return self.hdc_ops.bundling(*vectors)
    
    def _get_or_create_category_vector(self, category: str) -> np.ndarray:
        """Get or create a vector for a specific category."""
        if category in self._vector_cache:
            return self._vector_cache[category]
        
        # Create deterministic vector based on category name
        seed = hash(category) % (2**31)
        np.random.seed(seed)
        vector = self.hdc_ops.generate_random_vector('bipolar')
        self._vector_cache[category] = vector
        
        return vector
    
    def _encode_scalar_feature(self, feature_name: str, value: float) -> np.ndarray:
        """Encode a scalar feature value."""
        value = np.clip(value, 0.0, 1.0)
        
        # Use the existing trait value encoding method
        return self._encode_trait_value(value)
    
    def _analyze_emotional_tone(self, content: str) -> float:
        """Simple emotional tone analysis (placeholder for NLP analysis)."""
        positive_words = ['good', 'great', 'excellent', 'wonderful', 'love', 'happy', 'joy']
        negative_words = ['bad', 'terrible', 'awful', 'hate', 'sad', 'angry', 'fear']
        
        content_lower = content.lower()
        positive_count = sum(1 for word in positive_words if word in content_lower)
        negative_count = sum(1 for word in negative_words if word in content_lower)
        
        if positive_count + negative_count == 0:
            return 0.5  # Neutral
        
        return positive_count / (positive_count + negative_count)
    
    def _analyze_formality(self, content: str, text_type: str) -> float:
        """Simple formality analysis."""
        formal_types = ['official', 'academic', 'legal', 'diplomatic']
        informal_types = ['personal', 'diary', 'letter', 'casual']
        
        if text_type in formal_types:
            return 0.8
        elif text_type in informal_types:
            return 0.3
        else:
            return 0.5  # Default neutral formality
    
    def fuse_modality_vectors(self, modality_vectors: Dict[str, np.ndarray], 
                            custom_weights: Optional[Dict[str, float]] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Fuse multi-modal vectors using the formula:
        v_historical = α·v_documented + β·v_inferred + γ·v_contextual
        
        Args:
            modality_vectors: Dictionary of modality vectors
            custom_weights: Optional custom weights for modalities
            
        Returns:
            Tuple of (v_documented, v_inferred, v_contextual) vectors
        """
        weights = custom_weights or self._modality_weights
        
        # Documented evidence (high reliability)
        documented_modalities = ['biographical', 'writings']
        documented_vectors = [modality_vectors[mod] for mod in documented_modalities 
                            if mod in modality_vectors]
        
        v_documented = self.hdc_ops.bundling(*documented_vectors) if documented_vectors else \
                      self.hdc_ops.generate_random_vector('bipolar')
        
        # Inferred patterns (medium reliability)
        inferred_modalities = ['behaviors']
        inferred_vectors = [modality_vectors[mod] for mod in inferred_modalities 
                          if mod in modality_vectors]
        
        v_inferred = self.hdc_ops.bundling(*inferred_vectors) if inferred_vectors else \
                    self.hdc_ops.generate_random_vector('bipolar')
        
        # Contextual evidence (lower reliability)
        contextual_modalities = ['artifacts']
        contextual_vectors = [modality_vectors[mod] for mod in contextual_modalities 
                            if mod in modality_vectors]
        
        v_contextual = self.hdc_ops.bundling(*contextual_vectors) if contextual_vectors else \
                      self.hdc_ops.generate_random_vector('bipolar')
        
        return v_documented, v_inferred, v_contextual
    
    def model_temporal_evolution(self, personality_snapshots: List[Tuple[float, Dict[str, np.ndarray]]]) -> np.ndarray:
        """
        Model temporal evolution of personality over time.
        
        Args:
            personality_snapshots: List of (timestamp, trait_vectors) tuples
            
        Returns:
            HDC vector representing temporal evolution pattern
        """
        if len(personality_snapshots) < 2:
            return self.hdc_ops.generate_random_vector('bipolar')
        
        # Compute temporal changes
        change_vectors = []
        
        for i in range(1, len(personality_snapshots)):
            prev_time, prev_vectors = personality_snapshots[i-1]
            curr_time, curr_vectors = personality_snapshots[i]
            
            # Compute change for each trait
            trait_changes = []
            for trait in prev_vectors:
                if trait in curr_vectors:
                    # Unbind to get change vector
                    change = self.hdc_ops.unbind(curr_vectors[trait], prev_vectors[trait])
                    trait_changes.append(change)
            
            if trait_changes:
                period_change = self.hdc_ops.bundling(*trait_changes)
                change_vectors.append(period_change)
        
        if change_vectors:
            return self.hdc_ops.bundling(*change_vectors)
        else:
            return self.hdc_ops.generate_random_vector('bipolar')

    def _initialize_seed_vectors(self):
        """Initialize seed vectors for personality traits and cultural dimensions."""
        # Big Five trait seed vectors
        self.trait_seeds = {}
        for i, trait in enumerate(TraitType):
            seed = hash(trait.value) % (2**31)  # Deterministic seed
            np.random.seed(seed)  # Set deterministic seed
            self.trait_seeds[trait.value] = self.hdc_ops.generate_random_vector('bipolar')
        
        # Cultural dimension seed vectors
        self.cultural_seeds = {}
        for i, dimension in enumerate(CulturalDimension):
            seed = hash(dimension.value) % (2**31)  # Deterministic seed
            np.random.seed(seed)  # Set deterministic seed
            self.cultural_seeds[dimension.value] = self.hdc_ops.generate_random_vector('bipolar')
        
        # Value encoding vectors (for different intensity levels)
        self.value_seeds = {}
        for level in np.linspace(0.0, 1.0, 11):  # 11 levels: 0.0, 0.1, ..., 1.0
            level_key = f"level_{level:.1f}"
            seed = hash(level_key) % (2**31)
            np.random.seed(seed)  # Set deterministic seed
            self.value_seeds[level_key] = self.hdc_ops.generate_random_vector('bipolar')
    
    def _encode_trait_value(self, value: float) -> np.ndarray:
        """
        Encode a trait value (0.0-1.0) into HDC vector.
        
        Args:
            value: Trait value between 0.0 and 1.0
            
        Returns:
            HDC vector representing the trait value
        """
        value = np.clip(value, 0.0, 1.0)
        
        # Find the two closest levels for interpolation
        levels = np.linspace(0.0, 1.0, 11)
        idx = np.searchsorted(levels, value)
        
        if idx == 0:
            # Exact match with first level
            level_key = f"level_{levels[0]:.1f}"
            return self.value_seeds[level_key].copy()
        elif idx >= len(levels):
            # Exact match with last level  
            level_key = f"level_{levels[-1]:.1f}"
            return self.value_seeds[level_key].copy()
        else:
            # Interpolate between two levels
            lower_level = levels[idx - 1]
            upper_level = levels[idx]
            
            # Linear interpolation weight
            alpha = (value - lower_level) / (upper_level - lower_level)
            
            lower_key = f"level_{lower_level:.1f}"
            upper_key = f"level_{upper_level:.1f}"
            
            lower_vector = self.value_seeds[lower_key]
            upper_vector = self.value_seeds[upper_key]
            
            # Bundle with interpolation weights
            # Manual weighted combination since core bundling doesn't support weights
            interpolated = lower_vector * (1.0 - alpha) + upper_vector * alpha
            # Apply bipolar thresholding to maintain representation
            interpolated = np.where(interpolated >= 0, 1, -1).astype(lower_vector.dtype)
            
            return interpolated
    
    def encode_big_five_traits(self, traits: BigFiveTraits) -> Dict[str, np.ndarray]:
        """
        Encode Big Five personality traits into HDC vectors.
        
        Args:
            traits: Big Five traits structure
            
        Returns:
            Dictionary mapping trait names to HDC vectors
        """
        if not traits.validate():
            raise ValueError("Invalid Big Five traits")
        
        encoded_traits = {}
        
        # Encode each trait
        trait_values = {
            TraitType.OPENNESS.value: traits.openness,
            TraitType.CONSCIENTIOUSNESS.value: traits.conscientiousness,
            TraitType.EXTRAVERSION.value: traits.extraversion,
            TraitType.AGREEABLENESS.value: traits.agreeableness,
            TraitType.NEUROTICISM.value: traits.neuroticism
        }
        
        for trait_name, value in trait_values.items():
            # Get trait seed vector
            trait_seed = self.trait_seeds[trait_name]
            
            # Get value encoding
            value_vector = self._encode_trait_value(value)
            
            # Bind trait with its value using HDC binding operation
            trait_vector = self.hdc_ops.binding(trait_seed, value_vector)
            
            # Add noise for robustness
            if self.config.noise_level > 0:
                noise = np.random.normal(0, self.config.noise_level, trait_vector.shape)
                trait_vector = trait_vector + noise
                
                # Re-threshold for bipolar representation
                if self.config.use_bipolar:
                    trait_vector = np.where(trait_vector >= 0, 1, -1)
            
            # Normalize if specified
            trait_vector = self._normalize_vector(trait_vector)
            
            encoded_traits[trait_name] = trait_vector.astype(np.float32)
        
        return encoded_traits
    
    def encode_cultural_dimensions(self, dimensions: CulturalDimensions) -> Dict[str, np.ndarray]:
        """
        Encode cultural dimensions into HDC vectors using Hofstede's framework.
        
        Args:
            dimensions: Cultural dimensions structure
            
        Returns:
            Dictionary mapping dimension names to HDC vectors
        """
        if not dimensions.validate():
            raise ValueError("Invalid cultural dimensions")
        
        encoded_dimensions = {}
        
        # Map dimension values
        dimension_values = {
            CulturalDimension.POWER_DISTANCE.value: dimensions.power_distance,
            CulturalDimension.INDIVIDUALISM.value: dimensions.individualism,
            CulturalDimension.MASCULINITY.value: dimensions.masculinity,
            CulturalDimension.UNCERTAINTY_AVOIDANCE.value: dimensions.uncertainty_avoidance,
            CulturalDimension.LONG_TERM_ORIENTATION.value: dimensions.long_term_orientation,
            CulturalDimension.INDULGENCE.value: dimensions.indulgence
        }
        
        for dimension_name, value in dimension_values.items():
            # Get dimension seed vector
            dimension_seed = self.cultural_seeds[dimension_name]
            
            # Get value encoding
            value_vector = self._encode_trait_value(value)
            
            # Bind dimension with its value
            dimension_vector = self.hdc_ops.binding(dimension_seed, value_vector)
            
            # Add noise for robustness
            if self.config.noise_level > 0:
                noise = np.random.normal(0, self.config.noise_level, dimension_vector.shape)
                dimension_vector = dimension_vector + noise
                
                # Re-threshold for bipolar representation
                if self.config.use_bipolar:
                    dimension_vector = np.where(dimension_vector >= 0, 1, -1)
            
            # Normalize if specified
            dimension_vector = self._normalize_vector(dimension_vector)
            
            encoded_dimensions[dimension_name] = dimension_vector.astype(np.float32)
        
        return encoded_dimensions
    
    def compute_confidence_scores(self, input_data: Dict[str, Any],
                                trait_vectors: Dict[str, np.ndarray],
                                cultural_vectors: Dict[str, np.ndarray]) -> ConfidenceScore:
        """
        Compute confidence scores with uncertainty quantification.
        
        Args:
            input_data: Raw input data for analysis
            trait_vectors: Encoded trait vectors
            cultural_vectors: Encoded cultural vectors
            
        Returns:
            ConfidenceScore with uncertainty quantification
        """
        confidence = ConfidenceScore()
        
        # Data quality assessment
        confidence.data_quality = self._assess_data_quality(input_data)
        
        # Source reliability assessment
        confidence.source_reliability = self._assess_source_reliability(input_data)
        
        # Cross-validation score (simulated for now)
        confidence.cross_validation_score = self._compute_cross_validation_score(
            trait_vectors, cultural_vectors
        )
        
        # Temporal stability (requires historical data)
        confidence.temporal_stability = self._assess_temporal_stability(input_data)
        
        # Uncertainty quantification
        confidence.epistemic_uncertainty = self._compute_epistemic_uncertainty(
            trait_vectors, cultural_vectors
        )
        confidence.aleatoric_uncertainty = self._compute_aleatoric_uncertainty(input_data)
        
        # Compute overall confidence
        confidence.compute_overall_confidence()
        
        return confidence
    
    def _assess_data_quality(self, input_data: Dict[str, Any]) -> float:
        """Assess the quality of input data."""
        quality_score = 0.5  # Default neutral score
        
        # Check data completeness
        completeness = 0.0
        total_fields = 0
        filled_fields = 0
        
        for key, value in input_data.items():
            total_fields += 1
            if value is not None and str(value).strip():
                filled_fields += 1
        
        if total_fields > 0:
            completeness = filled_fields / total_fields
        
        # Check data consistency (e.g., text length, numeric ranges)
        consistency = 0.8  # Default high consistency
        
        # Combine metrics
        quality_score = 0.6 * completeness + 0.4 * consistency
        
        return np.clip(quality_score, 0.0, 1.0)
    
    def _assess_source_reliability(self, input_data: Dict[str, Any]) -> float:
        """Assess the reliability of data sources."""
        reliability_score = 0.7  # Default good reliability
        
        # Check source types and credibility
        source_types = input_data.get('source_types', [])
        if 'academic' in source_types:
            reliability_score += 0.2
        if 'historical_document' in source_types:
            reliability_score += 0.1
        if 'secondary_source' in source_types:
            reliability_score -= 0.1
        
        return np.clip(reliability_score, 0.0, 1.0)
    
    def _compute_cross_validation_score(self, trait_vectors: Dict[str, np.ndarray],
                                      cultural_vectors: Dict[str, np.ndarray]) -> float:
        """Compute cross-validation performance score."""
        # Simulate cross-validation by checking vector consistency
        all_vectors = list(trait_vectors.values()) + list(cultural_vectors.values())
        
        if len(all_vectors) < 2:
            return 0.5
        
        # Compute pairwise similarities
        similarities = []
        for i in range(len(all_vectors)):
            for j in range(i + 1, len(all_vectors)):
                sim = abs(self.hdc_ops.cosine_similarity(all_vectors[i], all_vectors[j]))
                similarities.append(sim)
        
        # High internal consistency suggests good encoding
        mean_similarity = np.mean(similarities) if similarities else 0.5
        
        # Transform to cross-validation score (higher internal consistency = higher CV score)
        cv_score = min(0.95, 0.5 + mean_similarity * 0.3)
        
        return cv_score
    
    def _assess_temporal_stability(self, input_data: Dict[str, Any]) -> float:
        """Assess temporal stability of personality traits."""
        # For now, return a default score
        # In practice, this would analyze historical data consistency
        return 0.8
    
    def _compute_epistemic_uncertainty(self, trait_vectors: Dict[str, np.ndarray],
                                     cultural_vectors: Dict[str, np.ndarray]) -> float:
        """Compute epistemic (model) uncertainty."""
        # Simulate uncertainty based on vector variance
        all_vectors = list(trait_vectors.values()) + list(cultural_vectors.values())
        
        if not all_vectors:
            return 0.5
        
        # Compute variance across vector elements
        vector_matrix = np.stack(all_vectors)
        variance = np.var(vector_matrix, axis=0)
        mean_variance = np.mean(variance)
        
        # Normalize to [0, 1] range
        uncertainty = min(1.0, mean_variance / 2.0)  # Adjust scaling as needed
        
        return uncertainty
    
    def _compute_aleatoric_uncertainty(self, input_data: Dict[str, Any]) -> float:
        """Compute aleatoric (data) uncertainty."""
        # Simulate based on data noise and inconsistencies
        data_noise = 0.1  # Baseline noise level
        
        # Increase uncertainty for incomplete or conflicting data
        completeness = self._assess_data_quality(input_data)
        noise_factor = 1.0 - completeness
        
        uncertainty = data_noise + noise_factor * 0.2
        
        return np.clip(uncertainty, 0.0, 1.0)
    
    def create_composite_vector(self, trait_vectors: Dict[str, np.ndarray],
                               cultural_vectors: Dict[str, np.ndarray],
                               trait_weights: Optional[Dict[str, float]] = None,
                               cultural_weights: Optional[Dict[str, float]] = None) -> np.ndarray:
        """
        Create composite personality vector using formula:
        V_persona = w1 × V_trait1 + w2 × V_trait2 + ... + wn × V_traitn
        
        Args:
            trait_vectors: Dictionary of encoded trait vectors
            cultural_vectors: Dictionary of encoded cultural vectors  
            trait_weights: Optional weights for trait vectors
            cultural_weights: Optional weights for cultural vectors
            
        Returns:
            Composite personality vector
        """
        if not trait_vectors and not cultural_vectors:
            raise ValueError("No vectors to combine")
        
        # Default equal weights
        if trait_weights is None:
            trait_weights = {name: 1.0 for name in trait_vectors.keys()}
        
        if cultural_weights is None:
            cultural_weights = {name: 0.5 for name in cultural_vectors.keys()}  # Lower weight for cultural
        
        # Collect all vectors and weights
        all_vectors = []
        all_weights = []
        
        for name, vector in trait_vectors.items():
            all_vectors.append(vector)
            all_weights.append(trait_weights.get(name, 1.0))
        
        for name, vector in cultural_vectors.items():
            all_vectors.append(vector)
            all_weights.append(cultural_weights.get(name, 0.5))
        
        # Create composite using bundling operation - core HDC ops don't support weights
        if all_weights:
            # Manual weighted combination since core bundling doesn't support weights
            composite = np.zeros_like(all_vectors[0], dtype=float)
            for vector, weight in zip(all_vectors, all_weights):
                composite += vector * weight
            # Normalize the sum
            composite = self._normalize_vector(composite)
        else:
            composite = self.hdc_ops.bundling(*all_vectors)
        
        # Final normalization
        composite = self._normalize_vector(composite)
        
        return composite.astype(np.float32)
    
    def encode_personality_vector(self, 
                                big_five: BigFiveTraits,
                                cultural: CulturalDimensions,
                                input_data: Dict[str, Any],
                                person_id: str = "",
                                multi_modal_data: Optional[Dict[str, Any]] = None) -> PersonalityVector:
        """
        Main encoding function that creates a complete PersonalityVector with multi-modal fusion.
        
        Args:
            big_five: Big Five personality traits
            cultural: Cultural dimensions
            input_data: Raw input data for confidence assessment
            person_id: Identifier for the person
            multi_modal_data: Optional multi-modal data for enhanced encoding
            
        Returns:
            Complete PersonalityVector with HDC encodings
        """
        # Validate input traits and cultural dimensions
        traits_valid, trait_issues = self.trait_validator.validate_big_five_traits(big_five)
        cultural_valid, cultural_issues = self.trait_validator.validate_cultural_dimensions(cultural)
        
        if not traits_valid:
            logger.warning(f"Trait validation issues for {person_id}: {trait_issues}")
        if not cultural_valid:
            logger.warning(f"Cultural validation issues for {person_id}: {cultural_issues}")
        
        # Encode trait and cultural vectors
        trait_vectors = self.encode_big_five_traits(big_five)
        cultural_vectors = self.encode_cultural_dimensions(cultural)
        
        # Process multi-modal data if provided
        if multi_modal_data:
            modality_vectors = self.encode_multi_modal_data(multi_modal_data)
            v_documented, v_inferred, v_contextual = self.fuse_modality_vectors(modality_vectors)
            
            # Implement the formula: v_historical = α·v_documented + β·v_inferred + γ·v_contextual
            alpha, beta, gamma = 0.5, 0.3, 0.2  # Weights for reliability levels
            
            # Create enhanced composite using multi-modal fusion
            base_composite = self.create_composite_vector(trait_vectors, cultural_vectors)
            # Create weighted multi-modal composite manually since bundling doesn't support weights
            v_documented_weighted = v_documented * alpha
            v_inferred_weighted = v_inferred * beta  
            v_contextual_weighted = v_contextual * gamma
            multi_modal_composite = self.hdc_ops.bundling(
                v_documented_weighted, v_inferred_weighted, v_contextual_weighted
            )
            
            # Combine base personality with multi-modal evidence
            base_weighted = base_composite * 0.7
            modal_weighted = multi_modal_composite * 0.3
            composite = self.hdc_ops.bundling(base_weighted, modal_weighted)
        else:
            # Standard composite without multi-modal data
            composite = self.create_composite_vector(trait_vectors, cultural_vectors)
        
        # Add trait relationship modeling
        relationship_vector = self.relationship_modeler.model_personality_profile_relationships(trait_vectors)
        
        # Combine with relationship information
        composite_weighted = composite * 0.8
        relationship_weighted = relationship_vector * 0.2
        final_composite = self.hdc_ops.bundling(composite_weighted, relationship_weighted)
        
        # Compute enhanced confidence scores
        confidence = self.compute_confidence_scores(input_data, trait_vectors, cultural_vectors)
        
        # Add consistency assessment
        consistency_score = self.relationship_modeler.analyze_trait_consistency(trait_vectors)
        confidence.cross_validation_score = min(confidence.cross_validation_score, consistency_score)
        confidence.compute_overall_confidence()
        
        # Create PersonalityVector
        personality_vector = PersonalityVector(
            big_five_traits=big_five,
            cultural_dimensions=cultural,
            confidence_score=confidence,
            trait_vectors=trait_vectors,
            cultural_vectors=cultural_vectors,
            composite_vector=final_composite,
            person_id=person_id,
            vector_dimension=self.config.vector_dimension,
            encoding_version="2.0",  # Updated version with multi-modal support
            created_timestamp=time.time(),
            last_updated=time.time()
        )
        
        # Add data sources if multi-modal data was provided
        if multi_modal_data:
            personality_vector.text_sources = multi_modal_data.get('writings', [])
            personality_vector.behavioral_sources = multi_modal_data.get('behaviors', [])
            personality_vector.artifact_sources = multi_modal_data.get('artifacts', [])
        
        # Validate the result
        if not personality_vector.validate():
            raise ValueError("Created invalid PersonalityVector")
        
        logger.info(f"Successfully encoded personality vector for {person_id} (consistency: {consistency_score:.3f})")
        
        return personality_vector
    
    def reconstruct_traits(self, personality_vector: PersonalityVector) -> Tuple[BigFiveTraits, float]:
        """
        Reconstruct Big Five traits from HDC vectors for validation.
        
        Args:
            personality_vector: PersonalityVector to reconstruct from
            
        Returns:
            Tuple of (reconstructed_traits, reconstruction_accuracy)
        """
        if not personality_vector.trait_vectors:
            raise ValueError("No trait vectors to reconstruct from")
        
        reconstructed_values = {}
        
        for trait_name, encoded_vector in personality_vector.trait_vectors.items():
            # Try to find the best matching value by testing all levels
            best_similarity = -1.0
            best_value = 0.5
            
            for level in np.linspace(0.0, 1.0, 101):  # Test with higher resolution
                # Reconstruct what the vector should look like for this level
                trait_seed = self.trait_seeds[trait_name]
                value_vector = self._encode_trait_value(level)
                expected_vector = self.hdc_ops.binding(trait_seed, value_vector)
                
                # Compute similarity
                similarity = self.hdc_ops.cosine_similarity(encoded_vector, expected_vector)
                
                if similarity > best_similarity:
                    best_similarity = similarity
                    best_value = level
            
            reconstructed_values[trait_name] = best_value
        
        # Create reconstructed traits
        reconstructed_traits = BigFiveTraits(
            openness=reconstructed_values.get(TraitType.OPENNESS.value, 0.5),
            conscientiousness=reconstructed_values.get(TraitType.CONSCIENTIOUSNESS.value, 0.5),
            extraversion=reconstructed_values.get(TraitType.EXTRAVERSION.value, 0.5),
            agreeableness=reconstructed_values.get(TraitType.AGREEABLENESS.value, 0.5),
            neuroticism=reconstructed_values.get(TraitType.NEUROTICISM.value, 0.5)
        )
        
        # Compute reconstruction accuracy
        original_traits = personality_vector.big_five_traits
        trait_diffs = [
            abs(original_traits.openness - reconstructed_traits.openness),
            abs(original_traits.conscientiousness - reconstructed_traits.conscientiousness),
            abs(original_traits.extraversion - reconstructed_traits.extraversion),
            abs(original_traits.agreeableness - reconstructed_traits.agreeableness),
            abs(original_traits.neuroticism - reconstructed_traits.neuroticism)
        ]
        
        # Accuracy as 1 - mean absolute error
        mean_error = np.mean(trait_diffs)
        accuracy = 1.0 - mean_error
        
        return reconstructed_traits, max(0.0, accuracy)