"""
Compressed Sensing for Historical Personality Trait Recovery

This module implements compressed sensing techniques adapted for recovering missing
personality traits from historical data. It leverages sparsity assumptions and
non-coherent sampling to reconstruct missing information while preserving
cultural context.

Key Features:
- Sparse signal recovery using L1 optimization
- Historical data pattern adaptation
- Cultural context preservation
- Non-coherent sampling for robust reconstruction
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Union
from scipy.optimize import minimize
from scipy.sparse import csr_matrix
from scipy.linalg import svd, pinv
import warnings
from dataclasses import dataclass
import logging

logger = logging.getLogger(__name__)


@dataclass
class SparseRecoveryResult:
    """Results from compressed sensing recovery."""
    recovered_signal: np.ndarray
    sparse_coefficients: np.ndarray
    reconstruction_error: float
    sparsity_level: float
    coherence_measure: float
    cultural_preservation_score: float
    recovery_confidence: np.ndarray


class HistoricalSparseRecovery:
    """
    Compressed sensing recovery adapted for historical personality data.
    
    This algorithm recovers missing personality traits by assuming the underlying
    personality structure has sparse representation in some transform domain.
    """
    
    def __init__(self,
                 sparsity_penalty: float = 0.1,
                 cultural_preservation_weight: float = 0.05,
                 max_iterations: int = 1000,
                 convergence_tolerance: float = 1e-6,
                 dictionary_type: str = 'dct'):
        """
        Initialize historical sparse recovery.
        
        Args:
            sparsity_penalty: Weight for L1 sparsity penalty
            cultural_preservation_weight: Weight for cultural context preservation
            max_iterations: Maximum optimization iterations
            convergence_tolerance: Convergence criterion
            dictionary_type: Transform dictionary ('dct', 'wavelet', 'identity')
        """
        self.sparsity_penalty = sparsity_penalty
        self.cultural_preservation_weight = cultural_preservation_weight
        self.max_iterations = max_iterations
        self.convergence_tolerance = convergence_tolerance
        self.dictionary_type = dictionary_type
        self._dictionary = None
    
    def recover_missing_traits(self,
                              observed_traits: np.ndarray,
                              observation_mask: np.ndarray,
                              cultural_context: Optional[Dict] = None,
                              historical_patterns: Optional[np.ndarray] = None) -> SparseRecoveryResult:
        """
        Recover missing personality traits using compressed sensing.
        
        Args:
            observed_traits: Vector of observed personality traits
            observation_mask: Boolean mask indicating observed traits
            cultural_context: Cultural context information for preservation
            historical_patterns: Prior historical patterns for guidance
            
        Returns:
            SparseRecoveryResult with recovered traits and diagnostics
        """
        n_traits = len(observed_traits)
        
        # Build transform dictionary
        if self._dictionary is None or self._dictionary.shape[0] != n_traits:
            self._dictionary = self._build_dictionary(n_traits)
        
        # Setup compressed sensing problem
        # y = Ax where A = Measurement_matrix @ Dictionary
        measurement_matrix = np.diag(observation_mask.astype(float))
        A = measurement_matrix @ self._dictionary
        y = observed_traits[observation_mask]
        
        # Extract only the rows of A corresponding to observed entries
        A = A[observation_mask, :]
        
        # Solve sparse recovery problem: min ||x||_1 subject to ||Ax - y||_2 < epsilon
        sparse_coefficients = self._solve_basis_pursuit(A, y, cultural_context, historical_patterns)
        
        # Reconstruct signal
        recovered_signal = self._dictionary @ sparse_coefficients
        
        # Ensure observed entries are preserved
        recovered_signal[observation_mask] = observed_traits[observation_mask]
        
        # Compute diagnostics
        reconstruction_error = self._compute_reconstruction_error(
            observed_traits, recovered_signal, observation_mask
        )
        
        sparsity_level = np.sum(np.abs(sparse_coefficients) > 1e-6) / len(sparse_coefficients)
        
        coherence_measure = self._compute_coherence(A)
        
        cultural_preservation_score = self._compute_cultural_preservation(
            recovered_signal, cultural_context
        )
        
        recovery_confidence = self._compute_recovery_confidence(
            recovered_signal, observation_mask, coherence_measure, sparsity_level
        )
        
        logger.info(f"Sparse recovery completed: {sparsity_level:.1%} sparsity, "
                   f"{reconstruction_error:.4f} error")
        
        return SparseRecoveryResult(
            recovered_signal=recovered_signal,
            sparse_coefficients=sparse_coefficients,
            reconstruction_error=reconstruction_error,
            sparsity_level=sparsity_level,
            coherence_measure=coherence_measure,
            cultural_preservation_score=cultural_preservation_score,
            recovery_confidence=recovery_confidence
        )
    
    def _build_dictionary(self, n_traits: int) -> np.ndarray:
        """Build transform dictionary for sparse representation."""
        if self.dictionary_type == 'dct':
            # Discrete Cosine Transform dictionary
            dictionary = np.zeros((n_traits, n_traits))
            for k in range(n_traits):
                for n in range(n_traits):
                    if k == 0:
                        dictionary[n, k] = 1 / np.sqrt(n_traits)
                    else:
                        dictionary[n, k] = np.sqrt(2 / n_traits) * np.cos(
                            np.pi * k * (2*n + 1) / (2 * n_traits)
                        )
            return dictionary
        
        elif self.dictionary_type == 'wavelet':
            # Simple Haar wavelet dictionary
            dictionary = np.eye(n_traits)
            # Add some wavelet-like bases
            for i in range(n_traits // 2):
                basis = np.zeros(n_traits)
                basis[2*i:2*i+2] = [1, -1] / np.sqrt(2)
                if i < n_traits:
                    dictionary[:, i] = basis
            return dictionary
        
        elif self.dictionary_type == 'personality':
            # Personality-specific dictionary based on trait correlations
            dictionary = self._build_personality_dictionary(n_traits)
            return dictionary
        
        else:  # identity
            return np.eye(n_traits)
    
    def _build_personality_dictionary(self, n_traits: int) -> np.ndarray:
        """Build dictionary based on personality trait correlations."""
        # Create bases that reflect known personality structure
        dictionary = np.eye(n_traits)
        
        # Big Five factor structure (assuming first 5 traits are Big Five)
        if n_traits >= 5:
            # Add correlated bases reflecting personality structure
            # General factor (all traits positively correlated)
            general_factor = np.ones(n_traits) / np.sqrt(n_traits)
            dictionary[:, 0] = general_factor
            
            # Stability factor (C, A, -N)
            if n_traits >= 5:
                stability_factor = np.zeros(n_traits)
                stability_factor[1] = 1  # Conscientiousness
                stability_factor[3] = 1  # Agreeableness  
                stability_factor[4] = -1  # Neuroticism (reversed)
                stability_factor /= np.linalg.norm(stability_factor)
                dictionary[:, 1] = stability_factor
            
            # Plasticity factor (O, E)
            if n_traits >= 5:
                plasticity_factor = np.zeros(n_traits)
                plasticity_factor[0] = 1  # Openness
                plasticity_factor[2] = 1  # Extraversion
                plasticity_factor /= np.linalg.norm(plasticity_factor)
                dictionary[:, 2] = plasticity_factor
        
        # Ensure orthogonality
        U, _, _ = svd(dictionary)
        return U
    
    def _solve_basis_pursuit(self,
                            A: np.ndarray,
                            y: np.ndarray,
                            cultural_context: Optional[Dict],
                            historical_patterns: Optional[np.ndarray]) -> np.ndarray:
        """
        Solve the basis pursuit problem with cultural preservation.
        
        minimize ||x||_1 + λ_cultural * cultural_loss(x)
        subject to ||Ax - y||_2 ≤ ε
        """
        n_coeffs = A.shape[1]
        
        def objective(x):
            # L1 sparsity penalty
            l1_penalty = self.sparsity_penalty * np.sum(np.abs(x))
            
            # Cultural preservation term
            cultural_penalty = 0.0
            if cultural_context is not None:
                reconstructed = self._dictionary @ x
                cultural_penalty = self.cultural_preservation_weight * self._compute_cultural_loss(
                    reconstructed, cultural_context
                )
            
            # Historical pattern consistency
            pattern_penalty = 0.0
            if historical_patterns is not None:
                reconstructed = self._dictionary @ x
                pattern_penalty = 0.01 * np.sum((reconstructed - historical_patterns) ** 2)
            
            return l1_penalty + cultural_penalty + pattern_penalty
        
        def constraint(x):
            # Data fidelity constraint: ||Ax - y||_2
            residual = A @ x - y
            return self.convergence_tolerance - np.linalg.norm(residual)
        
        # Initial guess
        x0 = np.zeros(n_coeffs)
        if y.size > 0:
            # Least squares initialization
            x0 = pinv(A) @ y
        
        # Optimization constraints
        constraints = {'type': 'ineq', 'fun': constraint}
        
        # Solve optimization problem
        try:
            result = minimize(
                objective, x0,
                method='SLSQP',
                constraints=constraints,
                options={'maxiter': self.max_iterations, 'ftol': self.convergence_tolerance}
            )
            
            if result.success:
                return result.x
            else:
                logger.warning("Optimization did not converge, using least squares solution")
                return x0
                
        except Exception as e:
            logger.warning(f"Optimization failed: {e}, using least squares solution")
            return x0
    
    def _compute_reconstruction_error(self,
                                    observed: np.ndarray,
                                    recovered: np.ndarray,
                                    mask: np.ndarray) -> float:
        """Compute reconstruction error on observed entries."""
        if np.sum(mask) == 0:
            return 0.0
        
        error = np.sum((observed[mask] - recovered[mask]) ** 2)
        return error / np.sum(mask)
    
    def _compute_coherence(self, A: np.ndarray) -> float:
        """Compute coherence measure of the measurement matrix."""
        if A.shape[0] == 0 or A.shape[1] == 0:
            return 1.0
        
        # Normalize columns
        A_normalized = A / (np.linalg.norm(A, axis=0, keepdims=True) + 1e-10)
        
        # Compute coherence as maximum off-diagonal element of Gram matrix
        gram_matrix = A_normalized.T @ A_normalized
        gram_matrix = np.abs(gram_matrix)
        
        # Set diagonal to zero
        np.fill_diagonal(gram_matrix, 0)
        
        coherence = np.max(gram_matrix) if gram_matrix.size > 0 else 0.0
        return float(coherence)
    
    def _compute_cultural_loss(self, reconstructed: np.ndarray, 
                              cultural_context: Dict) -> float:
        """Compute cultural preservation loss."""
        if cultural_context is None:
            return 0.0
        
        loss = 0.0
        
        # Cultural dimension expectations
        if 'expected_cultural_profile' in cultural_context:
            expected = np.array(cultural_context['expected_cultural_profile'])
            if len(expected) == len(reconstructed):
                loss += np.sum((reconstructed - expected) ** 2)
        
        # Cultural constraints (e.g., certain traits should be high/low)
        if 'cultural_constraints' in cultural_context:
            constraints = cultural_context['cultural_constraints']
            for trait_idx, constraint in constraints.items():
                if constraint['type'] == 'high':
                    loss += max(0, constraint['threshold'] - reconstructed[trait_idx]) ** 2
                elif constraint['type'] == 'low':
                    loss += max(0, reconstructed[trait_idx] - constraint['threshold']) ** 2
        
        return loss
    
    def _compute_cultural_preservation(self, 
                                     recovered_signal: np.ndarray,
                                     cultural_context: Optional[Dict]) -> float:
        """Compute cultural preservation score (higher is better)."""
        if cultural_context is None:
            return 1.0
        
        # Score based on cultural consistency
        preservation_score = 1.0
        
        if 'expected_cultural_profile' in cultural_context:
            expected = np.array(cultural_context['expected_cultural_profile'])
            if len(expected) == len(recovered_signal):
                mse = np.mean((recovered_signal - expected) ** 2)
                preservation_score *= np.exp(-mse)  # Exponential decay with error
        
        return min(1.0, preservation_score)
    
    def _compute_recovery_confidence(self,
                                   recovered_signal: np.ndarray,
                                   observation_mask: np.ndarray,
                                   coherence: float,
                                   sparsity: float) -> np.ndarray:
        """Compute per-trait recovery confidence."""
        n_traits = len(recovered_signal)
        confidence = np.ones(n_traits)
        
        # Higher confidence for observed traits
        confidence[observation_mask] = 1.0
        
        # Lower confidence for unobserved traits based on coherence and sparsity
        confidence[~observation_mask] *= (1 - coherence)  # Lower coherence is better
        confidence[~observation_mask] *= sparsity  # Higher sparsity gives more confidence
        
        # Normalize confidence to [0.1, 1.0] range (minimum confidence of 10%)
        confidence = np.clip(confidence, 0.1, 1.0)
        
        return confidence


class CompressedSensing:
    """
    Main compressed sensing interface for historical personality reconstruction.
    
    This class provides a high-level interface for applying compressed sensing
    to historical personality data with multiple individuals and time periods.
    """
    
    def __init__(self,
                 sparsity_penalty: float = 0.1,
                 cultural_preservation_weight: float = 0.05,
                 temporal_consistency_weight: float = 0.02,
                 dictionary_type: str = 'personality'):
        """
        Initialize compressed sensing for historical data.
        
        Args:
            sparsity_penalty: Weight for L1 sparsity penalty
            cultural_preservation_weight: Weight for cultural preservation
            temporal_consistency_weight: Weight for temporal consistency
            dictionary_type: Type of sparsity dictionary to use
        """
        self.sparsity_penalty = sparsity_penalty
        self.cultural_preservation_weight = cultural_preservation_weight
        self.temporal_consistency_weight = temporal_consistency_weight
        self.dictionary_type = dictionary_type
        
        self.recovery_engine = HistoricalSparseRecovery(
            sparsity_penalty=sparsity_penalty,
            cultural_preservation_weight=cultural_preservation_weight,
            dictionary_type=dictionary_type
        )
    
    def reconstruct_historical_personalities(self,
                                           personality_data: Dict[str, np.ndarray],
                                           observation_masks: Dict[str, np.ndarray],
                                           cultural_contexts: Optional[Dict[str, Dict]] = None,
                                           temporal_relationships: Optional[Dict] = None) -> Dict[str, SparseRecoveryResult]:
        """
        Reconstruct personalities for multiple historical individuals.
        
        Args:
            personality_data: Dictionary mapping individual_id -> trait_vector
            observation_masks: Dictionary mapping individual_id -> observation_mask
            cultural_contexts: Optional cultural context for each individual
            temporal_relationships: Optional temporal relationships between individuals
            
        Returns:
            Dictionary mapping individual_id -> SparseRecoveryResult
        """
        results = {}
        
        # Compute historical patterns for temporal consistency
        historical_patterns = self._compute_historical_patterns(
            personality_data, observation_masks, temporal_relationships
        )
        
        for individual_id, traits in personality_data.items():
            logger.info(f"Reconstructing personality for {individual_id}")
            
            # Get individual-specific context
            cultural_context = cultural_contexts.get(individual_id) if cultural_contexts else None
            individual_patterns = historical_patterns.get(individual_id)
            
            # Perform sparse recovery
            result = self.recovery_engine.recover_missing_traits(
                observed_traits=traits,
                observation_mask=observation_masks[individual_id],
                cultural_context=cultural_context,
                historical_patterns=individual_patterns
            )
            
            results[individual_id] = result
        
        logger.info(f"Completed reconstruction for {len(results)} individuals")
        return results
    
    def _compute_historical_patterns(self,
                                   personality_data: Dict[str, np.ndarray],
                                   observation_masks: Dict[str, np.ndarray],
                                   temporal_relationships: Optional[Dict]) -> Dict[str, np.ndarray]:
        """Compute historical patterns for temporal consistency."""
        patterns = {}
        
        if temporal_relationships is None:
            return patterns
        
        # Compute average patterns for each time period
        time_period_averages = {}
        for individual_id, traits in personality_data.items():
            if individual_id in temporal_relationships:
                time_period = temporal_relationships[individual_id].get('time_period')
                if time_period:
                    if time_period not in time_period_averages:
                        time_period_averages[time_period] = []
                    
                    # Only use observed traits for averaging
                    mask = observation_masks[individual_id]
                    observed_traits = np.copy(traits)
                    observed_traits[~mask] = np.nan
                    time_period_averages[time_period].append(observed_traits)
        
        # Compute averages per time period
        for time_period, trait_lists in time_period_averages.items():
            if trait_lists:
                # Stack and compute nanmean
                trait_matrix = np.stack(trait_lists)
                time_average = np.nanmean(trait_matrix, axis=0)
                
                # Fill remaining NaNs with global average
                global_avg = np.nanmean(trait_matrix)
                time_average[np.isnan(time_average)] = global_avg
                
                # Assign pattern to all individuals in this time period
                for individual_id, relationship in temporal_relationships.items():
                    if relationship.get('time_period') == time_period:
                        patterns[individual_id] = time_average
        
        return patterns
    
    def evaluate_recovery_quality(self,
                                true_personalities: Dict[str, np.ndarray],
                                recovered_personalities: Dict[str, SparseRecoveryResult],
                                test_masks: Dict[str, np.ndarray]) -> Dict[str, Dict[str, float]]:
        """
        Evaluate compressed sensing recovery quality.
        
        Args:
            true_personalities: Ground truth personality data
            recovered_personalities: Recovered personality data
            test_masks: Test masks for evaluation
            
        Returns:
            Dictionary with evaluation metrics per individual
        """
        evaluation_results = {}
        
        for individual_id in true_personalities:
            if individual_id not in recovered_personalities or individual_id not in test_masks:
                continue
            
            true_traits = true_personalities[individual_id]
            recovered_traits = recovered_personalities[individual_id].recovered_signal
            test_mask = test_masks[individual_id]
            
            if np.sum(test_mask) == 0:
                continue
            
            # Compute evaluation metrics
            true_test = true_traits[test_mask]
            recovered_test = recovered_traits[test_mask]
            
            rmse = np.sqrt(np.mean((true_test - recovered_test) ** 2))
            mae = np.mean(np.abs(true_test - recovered_test))
            
            # Relative error
            relative_errors = np.abs(true_test - recovered_test) / (np.abs(true_test) + 0.1)
            mean_relative_error = np.mean(relative_errors)
            
            # Accuracy (predictions within 20% relative error)
            accuracy = np.mean(relative_errors < 0.2)
            
            # Correlation
            correlation = np.corrcoef(true_test, recovered_test)[0, 1] if len(true_test) > 1 else 0.0
            
            evaluation_results[individual_id] = {
                'rmse': float(rmse),
                'mae': float(mae),
                'relative_error': float(mean_relative_error),
                'accuracy': float(accuracy),
                'correlation': float(correlation),
                'test_entries': int(np.sum(test_mask)),
                'sparsity_level': recovered_personalities[individual_id].sparsity_level,
                'cultural_preservation': recovered_personalities[individual_id].cultural_preservation_score
            }
        
        return evaluation_results