"""
Personality data models for HDC-based personality encoding.

This module defines the core data structures for representing personality vectors,
Big Five traits, cultural dimensions, and confidence scoring in the HDC framework.
"""

import numpy as np
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple, Union, Any
from enum import Enum
import json


class TraitType(Enum):
    """Enumeration of personality trait types."""
    OPENNESS = "openness"
    CONSCIENTIOUSNESS = "conscientiousness"  
    EXTRAVERSION = "extraversion"
    AGREEABLENESS = "agreeableness"
    NEUROTICISM = "neuroticism"


class CulturalDimension(Enum):
    """Hofstede's cultural dimensions."""
    POWER_DISTANCE = "power_distance"
    INDIVIDUALISM = "individualism"
    MASCULINITY = "masculinity"
    UNCERTAINTY_AVOIDANCE = "uncertainty_avoidance"
    LONG_TERM_ORIENTATION = "long_term_orientation"
    INDULGENCE = "indulgence"


@dataclass
class BigFiveTraits:
    """
    Big Five personality traits structure.
    
    Each trait is represented as a score between 0.0 and 1.0,
    along with confidence intervals and HDC vector representation.
    """
    openness: float = 0.5
    conscientiousness: float = 0.5
    extraversion: float = 0.5
    agreeableness: float = 0.5
    neuroticism: float = 0.5
    
    # Confidence intervals for each trait (lower, upper bounds)
    openness_ci: Tuple[float, float] = (0.0, 1.0)
    conscientiousness_ci: Tuple[float, float] = (0.0, 1.0)
    extraversion_ci: Tuple[float, float] = (0.0, 1.0)
    agreeableness_ci: Tuple[float, float] = (0.0, 1.0)
    neuroticism_ci: Tuple[float, float] = (0.0, 1.0)
    
    def validate(self) -> bool:
        """Validate trait scores are within valid range [0, 1]."""
        traits = [self.openness, self.conscientiousness, self.extraversion, 
                 self.agreeableness, self.neuroticism]
        return all(0.0 <= trait <= 1.0 for trait in traits)
    
    def to_dict(self) -> Dict[str, Union[float, Tuple[float, float]]]:
        """Convert to dictionary representation."""
        return {
            'openness': self.openness,
            'conscientiousness': self.conscientiousness,
            'extraversion': self.extraversion,
            'agreeableness': self.agreeableness,
            'neuroticism': self.neuroticism,
            'openness_ci': self.openness_ci,
            'conscientiousness_ci': self.conscientiousness_ci,
            'extraversion_ci': self.extraversion_ci,
            'agreeableness_ci': self.agreeableness_ci,
            'neuroticism_ci': self.neuroticism_ci
        }
    
    @classmethod
    def from_dict(cls, data: Dict) -> 'BigFiveTraits':
        """Create instance from dictionary."""
        return cls(**data)


@dataclass
class CulturalDimensions:
    """
    Hofstede's cultural dimensions structure.
    
    Each dimension is scored between 0.0 and 1.0 with confidence intervals.
    """
    power_distance: float = 0.5
    individualism: float = 0.5
    masculinity: float = 0.5
    uncertainty_avoidance: float = 0.5
    long_term_orientation: float = 0.5
    indulgence: float = 0.5
    
    # Confidence intervals for each dimension
    power_distance_ci: Tuple[float, float] = (0.0, 1.0)
    individualism_ci: Tuple[float, float] = (0.0, 1.0)
    masculinity_ci: Tuple[float, float] = (0.0, 1.0)
    uncertainty_avoidance_ci: Tuple[float, float] = (0.0, 1.0)
    long_term_orientation_ci: Tuple[float, float] = (0.0, 1.0)
    indulgence_ci: Tuple[float, float] = (0.0, 1.0)
    
    def validate(self) -> bool:
        """Validate dimension scores are within valid range [0, 1]."""
        dimensions = [self.power_distance, self.individualism, self.masculinity,
                     self.uncertainty_avoidance, self.long_term_orientation, 
                     self.indulgence]
        return all(0.0 <= dim <= 1.0 for dim in dimensions)
    
    def to_dict(self) -> Dict[str, Union[float, Tuple[float, float]]]:
        """Convert to dictionary representation."""
        return {
            'power_distance': self.power_distance,
            'individualism': self.individualism,
            'masculinity': self.masculinity,
            'uncertainty_avoidance': self.uncertainty_avoidance,
            'long_term_orientation': self.long_term_orientation,
            'indulgence': self.indulgence,
            'power_distance_ci': self.power_distance_ci,
            'individualism_ci': self.individualism_ci,
            'masculinity_ci': self.masculinity_ci,
            'uncertainty_avoidance_ci': self.uncertainty_avoidance_ci,
            'long_term_orientation_ci': self.long_term_orientation_ci,
            'indulgence_ci': self.indulgence_ci
        }
    
    @classmethod
    def from_dict(cls, data: Dict) -> 'CulturalDimensions':
        """Create instance from dictionary."""
        return cls(**data)


@dataclass
class ConfidenceScore:
    """
    Confidence scoring system with uncertainty quantification.
    """
    overall_confidence: float = 0.5  # Overall confidence score [0, 1]
    data_quality: float = 0.5        # Quality of input data [0, 1]
    source_reliability: float = 0.5   # Reliability of data sources [0, 1]
    temporal_stability: float = 0.5   # Stability across time periods [0, 1]
    cross_validation_score: float = 0.5  # Cross-validation performance [0, 1]
    
    # Uncertainty measures
    epistemic_uncertainty: float = 0.0  # Model uncertainty
    aleatoric_uncertainty: float = 0.0  # Data uncertainty
    
    def compute_overall_confidence(self) -> float:
        """
        Compute overall confidence as weighted average of components.
        """
        weights = [0.3, 0.2, 0.2, 0.15, 0.15]  # Configurable weights
        scores = [self.data_quality, self.source_reliability, 
                 self.temporal_stability, self.cross_validation_score,
                 1.0 - (self.epistemic_uncertainty + self.aleatoric_uncertainty) / 2]
        
        self.overall_confidence = np.clip(
            sum(w * s for w, s in zip(weights, scores)), 0.0, 1.0
        )
        return self.overall_confidence
    
    def validate(self) -> bool:
        """Validate confidence scores are within valid ranges."""
        scores = [self.overall_confidence, self.data_quality, self.source_reliability,
                 self.temporal_stability, self.cross_validation_score,
                 self.epistemic_uncertainty, self.aleatoric_uncertainty]
        return all(0.0 <= score <= 1.0 for score in scores)


@dataclass
class TemporalEvolution:
    """
    Tracking temporal evolution of personality traits.
    """
    timestamps: List[float] = field(default_factory=list)  # Unix timestamps
    trait_snapshots: List[BigFiveTraits] = field(default_factory=list)
    cultural_snapshots: List[CulturalDimensions] = field(default_factory=list)
    confidence_snapshots: List[ConfidenceScore] = field(default_factory=list)
    
    def add_snapshot(self, timestamp: float, traits: BigFiveTraits, 
                    cultural: CulturalDimensions, confidence: ConfidenceScore):
        """Add a new temporal snapshot."""
        self.timestamps.append(timestamp)
        self.trait_snapshots.append(traits)
        self.cultural_snapshots.append(cultural)
        self.confidence_snapshots.append(confidence)
    
    def get_latest_snapshot(self) -> Optional[Tuple[float, BigFiveTraits, 
                                                  CulturalDimensions, ConfidenceScore]]:
        """Get the most recent snapshot."""
        if not self.timestamps:
            return None
        
        latest_idx = -1
        return (
            self.timestamps[latest_idx],
            self.trait_snapshots[latest_idx],
            self.cultural_snapshots[latest_idx],
            self.confidence_snapshots[latest_idx]
        )
    
    def compute_stability(self, trait_type: TraitType, window_size: int = 5) -> float:
        """Compute temporal stability of a specific trait."""
        if len(self.trait_snapshots) < 2:
            return 0.0
        
        # Get recent trait values
        recent_snapshots = self.trait_snapshots[-window_size:]
        trait_values = []
        
        for snapshot in recent_snapshots:
            if trait_type == TraitType.OPENNESS:
                trait_values.append(snapshot.openness)
            elif trait_type == TraitType.CONSCIENTIOUSNESS:
                trait_values.append(snapshot.conscientiousness)
            elif trait_type == TraitType.EXTRAVERSION:
                trait_values.append(snapshot.extraversion)
            elif trait_type == TraitType.AGREEABLENESS:
                trait_values.append(snapshot.agreeableness)
            elif trait_type == TraitType.NEUROTICISM:
                trait_values.append(snapshot.neuroticism)
        
        # Compute stability as 1 - coefficient of variation
        if len(trait_values) < 2:
            return 0.0
        
        mean_val = np.mean(trait_values)
        std_val = np.std(trait_values)
        
        if mean_val == 0:
            return 1.0 if std_val == 0 else 0.0
        
        cv = std_val / mean_val
        return max(0.0, 1.0 - cv)


@dataclass
class PersonalityVector:
    """
    Core personality vector representation using HDC encoding.
    
    This class encapsulates all personality data including Big Five traits,
    cultural dimensions, confidence scoring, and temporal evolution tracking.
    """
    # Core personality data
    big_five_traits: BigFiveTraits = field(default_factory=BigFiveTraits)
    cultural_dimensions: CulturalDimensions = field(default_factory=CulturalDimensions)
    confidence_score: ConfidenceScore = field(default_factory=ConfidenceScore)
    
    # HDC vector representations
    trait_vectors: Dict[str, np.ndarray] = field(default_factory=dict)
    cultural_vectors: Dict[str, np.ndarray] = field(default_factory=dict)
    composite_vector: Optional[np.ndarray] = None
    
    # Temporal evolution
    temporal_evolution: TemporalEvolution = field(default_factory=TemporalEvolution)
    
    # Metadata
    person_id: str = ""
    vector_dimension: int = 10000  # Default HDC dimension
    encoding_version: str = "1.0"
    created_timestamp: float = 0.0
    last_updated: float = 0.0
    
    # Multi-modal data sources
    text_sources: List[str] = field(default_factory=list)
    behavioral_sources: List[str] = field(default_factory=list)
    artifact_sources: List[str] = field(default_factory=list)
    
    def validate(self) -> bool:
        """
        Validate the entire personality vector for consistency and correctness.
        """
        # Validate component structures
        if not self.big_five_traits.validate():
            return False
        
        if not self.cultural_dimensions.validate():
            return False
        
        if not self.confidence_score.validate():
            return False
        
        # Validate HDC vectors have correct dimensions
        for vector in self.trait_vectors.values():
            if vector.shape != (self.vector_dimension,):
                return False
        
        for vector in self.cultural_vectors.values():
            if vector.shape != (self.vector_dimension,):
                return False
        
        if self.composite_vector is not None:
            if self.composite_vector.shape != (self.vector_dimension,):
                return False
        
        return True
    
    def get_vector_norm(self) -> float:
        """Get the L2 norm of the composite vector."""
        if self.composite_vector is None:
            return 0.0
        return float(np.linalg.norm(self.composite_vector))
    
    def compute_similarity(self, other: 'PersonalityVector') -> float:
        """
        Compute cosine similarity with another personality vector.
        """
        if self.composite_vector is None or other.composite_vector is None:
            return 0.0
        
        # Normalize vectors
        norm_self = np.linalg.norm(self.composite_vector)
        norm_other = np.linalg.norm(other.composite_vector)
        
        if norm_self == 0 or norm_other == 0:
            return 0.0
        
        # Compute cosine similarity
        similarity = np.dot(self.composite_vector, other.composite_vector) / (norm_self * norm_other)
        return float(np.clip(similarity, -1.0, 1.0))
    
    def to_dict(self) -> Dict[str, Any]:
        """Export to dictionary (excluding numpy arrays)."""
        return {
            'big_five_traits': self.big_five_traits.to_dict(),
            'cultural_dimensions': self.cultural_dimensions.to_dict(),
            'person_id': self.person_id,
            'vector_dimension': self.vector_dimension,
            'encoding_version': self.encoding_version,
            'created_timestamp': self.created_timestamp,
            'last_updated': self.last_updated,
            'text_sources': self.text_sources,
            'behavioral_sources': self.behavioral_sources,
            'artifact_sources': self.artifact_sources,
            'vector_norm': self.get_vector_norm(),
            'confidence_score': {
                'overall_confidence': self.confidence_score.overall_confidence,
                'data_quality': self.confidence_score.data_quality,
                'source_reliability': self.confidence_score.source_reliability,
                'temporal_stability': self.confidence_score.temporal_stability,
                'cross_validation_score': self.confidence_score.cross_validation_score,
                'epistemic_uncertainty': self.confidence_score.epistemic_uncertainty,
                'aleatoric_uncertainty': self.confidence_score.aleatoric_uncertainty
            }
        }

    def to_json(self) -> str:
        """Export to JSON string (excluding numpy arrays)."""
        return json.dumps(self.to_dict(), indent=2)
    
    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'PersonalityVector':
        """Create PersonalityVector instance from dictionary."""
        # Create component objects from their dictionaries
        big_five = BigFiveTraits.from_dict(data.get('big_five_traits', {}))
        
        cultural_data = data.get('cultural_dimensions', {})
        cultural = CulturalDimensions(**{k: v for k, v in cultural_data.items() 
                                       if not k.endswith('_ci')})
        
        confidence_data = data.get('confidence_score', {})
        confidence = ConfidenceScore(**confidence_data)
        
        # Create PersonalityVector instance
        return cls(
            big_five_traits=big_five,
            cultural_dimensions=cultural,
            confidence_score=confidence,
            person_id=data.get('person_id', ''),
            vector_dimension=data.get('vector_dimension', 10000),
            encoding_version=data.get('encoding_version', '1.0'),
            created_timestamp=data.get('created_timestamp', 0.0),
            last_updated=data.get('last_updated', 0.0),
            text_sources=data.get('text_sources', []),
            behavioral_sources=data.get('behavioral_sources', []),
            artifact_sources=data.get('artifact_sources', [])
        )
    
    def save_vectors(self, filepath: str) -> None:
        """Save HDC vectors to numpy file."""
        vector_data = {
            'trait_vectors': self.trait_vectors,
            'cultural_vectors': self.cultural_vectors,
            'composite_vector': self.composite_vector,
            'metadata': {
                'person_id': self.person_id,
                'vector_dimension': self.vector_dimension,
                'encoding_version': self.encoding_version
            }
        }
        np.savez_compressed(filepath, **vector_data)
    
    def load_vectors(self, filepath: str) -> None:
        """Load HDC vectors from numpy file."""
        data = np.load(filepath, allow_pickle=True)
        
        self.trait_vectors = data['trait_vectors'].item()
        self.cultural_vectors = data['cultural_vectors'].item()
        self.composite_vector = data['composite_vector']
        
        metadata = data['metadata'].item()
        self.person_id = metadata['person_id']
        self.vector_dimension = metadata['vector_dimension']
        self.encoding_version = metadata['encoding_version']


# Utility functions for working with personality models

def create_default_personality_vector(person_id: str, 
                                    vector_dimension: int = 10000) -> PersonalityVector:
    """Create a default personality vector with neutral traits."""
    return PersonalityVector(
        person_id=person_id,
        vector_dimension=vector_dimension,
        created_timestamp=np.datetime64('now').astype(float),
        last_updated=np.datetime64('now').astype(float)
    )


def merge_personality_vectors(vectors: List[PersonalityVector], 
                            weights: Optional[List[float]] = None) -> PersonalityVector:
    """
    Merge multiple personality vectors using weighted averaging.
    
    Args:
        vectors: List of personality vectors to merge
        weights: Optional weights for each vector (default: equal weights)
    
    Returns:
        Merged personality vector
    """
    if not vectors:
        raise ValueError("Cannot merge empty list of vectors")
    
    if weights is None:
        weights = [1.0 / len(vectors)] * len(vectors)
    
    if len(weights) != len(vectors):
        raise ValueError("Number of weights must match number of vectors")
    
    # Normalize weights
    total_weight = sum(weights)
    weights = [w / total_weight for w in weights]
    
    # Create merged vector
    merged = PersonalityVector(
        person_id=f"merged_{len(vectors)}_vectors",
        vector_dimension=vectors[0].vector_dimension
    )
    
    # Merge Big Five traits
    traits_sum = {trait: 0.0 for trait in ['openness', 'conscientiousness', 
                                          'extraversion', 'agreeableness', 'neuroticism']}
    
    for vector, weight in zip(vectors, weights):
        traits_sum['openness'] += vector.big_five_traits.openness * weight
        traits_sum['conscientiousness'] += vector.big_five_traits.conscientiousness * weight
        traits_sum['extraversion'] += vector.big_five_traits.extraversion * weight
        traits_sum['agreeableness'] += vector.big_five_traits.agreeableness * weight
        traits_sum['neuroticism'] += vector.big_five_traits.neuroticism * weight
    
    merged.big_five_traits = BigFiveTraits(
        openness=traits_sum['openness'],
        conscientiousness=traits_sum['conscientiousness'],
        extraversion=traits_sum['extraversion'],
        agreeableness=traits_sum['agreeableness'],
        neuroticism=traits_sum['neuroticism']
    )
    
    # Merge cultural dimensions similarly
    cultural_sum = {dim: 0.0 for dim in ['power_distance', 'individualism', 'masculinity',
                                        'uncertainty_avoidance', 'long_term_orientation', 'indulgence']}
    
    for vector, weight in zip(vectors, weights):
        cultural_sum['power_distance'] += vector.cultural_dimensions.power_distance * weight
        cultural_sum['individualism'] += vector.cultural_dimensions.individualism * weight
        cultural_sum['masculinity'] += vector.cultural_dimensions.masculinity * weight
        cultural_sum['uncertainty_avoidance'] += vector.cultural_dimensions.uncertainty_avoidance * weight
        cultural_sum['long_term_orientation'] += vector.cultural_dimensions.long_term_orientation * weight
        cultural_sum['indulgence'] += vector.cultural_dimensions.indulgence * weight
    
    merged.cultural_dimensions = CulturalDimensions(**cultural_sum)
    
    # Average confidence scores
    confidence_attrs = ['overall_confidence', 'data_quality', 'source_reliability',
                       'temporal_stability', 'cross_validation_score',
                       'epistemic_uncertainty', 'aleatoric_uncertainty']
    
    confidence_sum = {attr: 0.0 for attr in confidence_attrs}
    
    for vector, weight in zip(vectors, weights):
        for attr in confidence_attrs:
            confidence_sum[attr] += getattr(vector.confidence_score, attr) * weight
    
    merged.confidence_score = ConfidenceScore(**confidence_sum)
    
    # Merge HDC vectors if they exist
    if all(vector.composite_vector is not None for vector in vectors):
        merged.composite_vector = np.zeros(vectors[0].vector_dimension)
        for vector, weight in zip(vectors, weights):
            merged.composite_vector += vector.composite_vector * weight
    
    return merged