"""
SearcHD Engine: Memory-centric hyperdimensional computing with stochastic training.

This module implements the SearcHD framework for efficient personality storage and retrieval:
- Binary hypervector training: h_class ∈ {-1, +1}^D
- In-memory associative search: class = argmax_i hamming_similarity(q, hi)
- Competitive learning mechanisms for memory optimization
- Target performance: 178.7× training speed improvement
"""

import numpy as np
import time
from typing import Dict, List, Optional, Tuple, Union
from dataclasses import dataclass, field
from concurrent.futures import ThreadPoolExecutor
import logging
from collections import defaultdict

logger = logging.getLogger(__name__)


@dataclass
class SearcHDConfig:
    """Configuration for SearcHD engine."""
    dimension: int = 10000
    num_classes: int = 50  # Support for >50 personality types
    learning_rate: float = 0.1
    competitive_factor: float = 0.05
    memory_consolidation_threshold: int = 100
    stochastic_sampling_rate: float = 0.8
    max_workers: int = 8


@dataclass
class TrainingSample:
    """Training sample for SearcHD."""
    data: np.ndarray
    class_id: int
    weight: float = 1.0
    timestamp: float = 0.0
    metadata: Dict = field(default_factory=dict)


@dataclass
class SearchResult:
    """Result from SearcHD associative search."""
    class_id: int
    confidence: float
    hamming_similarity: float
    search_time_ms: float
    activated_memories: int
    metadata: Dict = field(default_factory=dict)


class SearcHDEngine:
    """
    SearcHD: Memory-centric hyperdimensional computing engine.
    
    Features:
    - Binary hypervector training for personality classes
    - In-memory associative search with Hamming similarity
    - Competitive learning with memory optimization
    - Stochastic training for improved convergence
    - Support for >50 concurrent personality types
    """
    
    def __init__(self, config: SearcHDConfig = None):
        """
        Initialize SearcHD engine.
        
        Args:
            config: SearcHD configuration parameters
        """
        self.config = config or SearcHDConfig()
        
        # Initialize class hypervectors: h_class ∈ {-1, +1}^D
        self.class_hypervectors = self._initialize_class_hypervectors()
        
        # Memory statistics for each class
        self.class_stats = {
            i: {
                'training_samples': 0,
                'last_update': 0.0,
                'confidence_history': [],
                'memory_strength': 1.0
            }
            for i in range(self.config.num_classes)
        }
        
        # Stochastic sampling
        self.rng = np.random.RandomState(42)
        
        # Competitive learning parameters
        self.competition_matrix = np.zeros((self.config.num_classes, self.config.num_classes))
        self.class_activation_counts = np.zeros(self.config.num_classes)
        
        # Performance tracking
        self.training_stats = {
            'total_training_samples': 0,
            'total_search_queries': 0,
            'average_training_time_ms': 0.0,
            'average_search_time_ms': 0.0,
            'convergence_iterations': 0,
            'memory_consolidations': 0
        }
        
        logger.info(f"SearcHD engine initialized with {self.config.num_classes} classes, "
                   f"dimension {self.config.dimension}")
    
    def _initialize_class_hypervectors(self) -> np.ndarray:
        """Initialize binary hypervectors for all classes."""
        # Generate random binary hypervectors {-1, +1}^D
        rng = np.random.RandomState(42)  # Reproducible initialization
        hypervectors = rng.choice([-1, 1], size=(self.config.num_classes, self.config.dimension))
        
        # Ensure orthogonality between classes for better separation
        self._orthogonalize_hypervectors(hypervectors)
        
        return hypervectors.astype(np.int8)  # Memory efficient storage
    
    def _orthogonalize_hypervectors(self, hypervectors: np.ndarray):
        """Apply orthogonalization to improve class separation."""
        rng = np.random.RandomState(42)  # Local RNG for orthogonalization
        for i in range(len(hypervectors)):
            for j in range(i + 1, len(hypervectors)):
                # Measure similarity
                similarity = np.dot(hypervectors[i], hypervectors[j]) / self.config.dimension
                
                # If too similar, adjust to increase separation
                if abs(similarity) > 0.1:  # Threshold for orthogonality
                    # Flip random bits in one vector to reduce similarity
                    flip_count = int(abs(similarity) * self.config.dimension * 0.5)
                    flip_indices = rng.choice(
                        self.config.dimension, 
                        size=flip_count, 
                        replace=False
                    )
                    hypervectors[j][flip_indices] *= -1
    
    def train(self, samples: List[TrainingSample]) -> Dict:
        """
        Train SearcHD with binary hypervector training.
        
        Args:
            samples: List of training samples
            
        Returns:
            Training statistics and performance metrics
        """
        start_time = time.time()
        
        try:
            training_metrics = {
                'samples_processed': 0,
                'convergence_achieved': False,
                'final_accuracy': 0.0,
                'training_speed_improvement': 0.0
            }
            
            # Stochastic sampling for improved training efficiency
            if len(samples) > 1000:  # Use sampling for large datasets
                sample_size = int(len(samples) * self.config.stochastic_sampling_rate)
                selected_samples = self.rng.choice(samples, size=sample_size, replace=False)
            else:
                selected_samples = samples
            
            # Batch training with competitive learning
            for epoch in range(10):  # Maximum epochs
                epoch_start = time.time()
                correct_predictions = 0
                total_predictions = 0
                
                # Shuffle samples for stochastic training
                self.rng.shuffle(selected_samples)
                
                for sample in selected_samples:
                    # Validate sample
                    if not self._validate_sample(sample):
                        continue
                    
                    # Convert data to binary hypervector space
                    sample_hypervector = self._encode_to_hypervector(sample.data)
                    
                    # Competitive learning update
                    self._competitive_update(sample_hypervector, sample.class_id, sample.weight)
                    
                    # Test current accuracy
                    predicted_class = self._predict_class(sample_hypervector)
                    if predicted_class == sample.class_id:
                        correct_predictions += 1
                    total_predictions += 1
                    
                    training_metrics['samples_processed'] += 1
                
                # Calculate epoch accuracy
                epoch_accuracy = correct_predictions / max(total_predictions, 1)
                epoch_time = (time.time() - epoch_start) * 1000
                
                logger.debug(f"Epoch {epoch}: accuracy={epoch_accuracy:.3f}, "
                           f"time={epoch_time:.2f}ms")
                
                # Check convergence
                if epoch_accuracy > 0.95:
                    training_metrics['convergence_achieved'] = True
                    training_metrics['final_accuracy'] = epoch_accuracy
                    break
            
            # Update statistics
            training_time = (time.time() - start_time) * 1000
            self._update_training_stats(training_time, len(selected_samples))
            
            # Calculate training speed improvement (target: 178.7×)
            baseline_time = len(selected_samples) * 10  # Assumed baseline: 10ms per sample
            speed_improvement = baseline_time / max(training_time, 1.0)
            training_metrics['training_speed_improvement'] = speed_improvement
            
            # Memory consolidation if needed
            if training_metrics['samples_processed'] >= self.config.memory_consolidation_threshold:
                self._perform_memory_consolidation()
            
            logger.info(f"Training completed in {training_time:.2f}ms, "
                       f"accuracy={training_metrics['final_accuracy']:.3f}, "
                       f"speed improvement={speed_improvement:.1f}×")
            
            return training_metrics
            
        except Exception as e:
            logger.error(f"SearcHD training failed: {e}")
            return {'error': str(e)}
    
    def _validate_sample(self, sample: TrainingSample) -> bool:
        """Validate training sample."""
        if sample.class_id < 0 or sample.class_id >= self.config.num_classes:
            logger.warning(f"Invalid class_id: {sample.class_id}")
            return False
        
        if len(sample.data) == 0:
            logger.warning("Empty sample data")
            return False
        
        return True
    
    def _encode_to_hypervector(self, data: np.ndarray) -> np.ndarray:
        """
        Encode input data to hyperdimensional space.
        
        Args:
            data: Input data vector
            
        Returns:
            Binary hypervector in {-1, +1}^D space
        """
        # Simple encoding: hash-based projection
        if len(data) == self.config.dimension:
            # Data already in hyperdimensional space
            return np.where(data > 0, 1, -1).astype(np.int8)
        
        # Project to hyperdimensional space using random projection
        hypervector = np.zeros(self.config.dimension, dtype=np.int8)
        
        # Use hash of data to determine hypervector pattern
        data_hash = hash(data.tobytes())
        self.rng.seed(data_hash % (2**31))  # Reproducible encoding
        
        # Generate hypervector based on data features
        for i, value in enumerate(data):
            # Map each data dimension to multiple hyperdimensions
            start_idx = (i * 97) % self.config.dimension  # Prime number for distribution
            end_idx = ((i + 1) * 97) % self.config.dimension
            
            if start_idx > end_idx:
                start_idx, end_idx = end_idx, start_idx
            
            # Set hypervector values based on data value
            if value > 0:
                hypervector[start_idx:end_idx] = 1
            else:
                hypervector[start_idx:end_idx] = -1
        
        return hypervector
    
    def _competitive_update(self, sample_hypervector: np.ndarray, class_id: int, weight: float):
        """
        Perform competitive learning update on class hypervectors.
        
        Args:
            sample_hypervector: Encoded sample hypervector
            class_id: Target class ID
            weight: Sample weight for learning
        """
        # Update target class (attraction)
        similarity = np.dot(self.class_hypervectors[class_id], sample_hypervector)
        learning_strength = self.config.learning_rate * weight
        
        # Hebbian-like update rule
        update = learning_strength * sample_hypervector
        self.class_hypervectors[class_id] = np.clip(
            self.class_hypervectors[class_id] + update,
            -1, 1
        ).astype(np.int8)
        
        # Competitive inhibition for other classes
        competitive_strength = self.config.competitive_factor * weight
        
        for other_class in range(self.config.num_classes):
            if other_class != class_id:
                # Calculate competition based on similarity
                other_similarity = np.dot(self.class_hypervectors[other_class], sample_hypervector)
                
                if other_similarity > 0:  # Only inhibit if similar
                    inhibition = competitive_strength * sample_hypervector
                    self.class_hypervectors[other_class] = np.clip(
                        self.class_hypervectors[other_class] - inhibition,
                        -1, 1
                    ).astype(np.int8)
                    
                    # Update competition matrix
                    self.competition_matrix[class_id][other_class] += competitive_strength
        
        # Update class statistics
        self.class_stats[class_id]['training_samples'] += 1
        self.class_stats[class_id]['last_update'] = time.time()
    
    def search(self, query: np.ndarray, top_k: int = 5) -> List[SearchResult]:
        """
        Perform in-memory associative search.
        
        Args:
            query: Query vector for search
            top_k: Number of top results to return
            
        Returns:
            List of search results ranked by similarity
        """
        start_time = time.time()
        
        try:
            # Encode query to hypervector space
            query_hypervector = self._encode_to_hypervector(query)
            
            # Calculate similarities with all class hypervectors
            similarities = []
            for class_id in range(self.config.num_classes):
                # Hamming similarity: class = argmax_i hamming_similarity(q, hi)
                hamming_sim = self._hamming_similarity(
                    query_hypervector, 
                    self.class_hypervectors[class_id]
                )
                
                # Calculate confidence based on similarity and memory strength
                memory_strength = self.class_stats[class_id]['memory_strength']
                confidence = hamming_sim * memory_strength
                
                similarities.append((class_id, confidence, hamming_sim))
            
            # Sort by confidence (descending)
            similarities.sort(key=lambda x: x[1], reverse=True)
            
            # Create search results
            search_time = (time.time() - start_time) * 1000
            results = []
            
            for i, (class_id, confidence, hamming_sim) in enumerate(similarities[:top_k]):
                result = SearchResult(
                    class_id=class_id,
                    confidence=confidence,
                    hamming_similarity=hamming_sim,
                    search_time_ms=search_time,
                    activated_memories=1,  # Single memory per class
                    metadata={
                        'rank': i + 1,
                        'memory_strength': self.class_stats[class_id]['memory_strength'],
                        'training_samples': self.class_stats[class_id]['training_samples']
                    }
                )
                results.append(result)
            
            # Update statistics
            self.training_stats['total_search_queries'] += 1
            self._update_search_stats(search_time)
            
            # Update activation counts
            if results:
                self.class_activation_counts[results[0].class_id] += 1
            
            logger.debug(f"Search completed in {search_time:.2f}ms, "
                        f"top result: class {results[0].class_id if results else 'none'}")
            
            return results
            
        except Exception as e:
            logger.error(f"SearcHD search failed: {e}")
            return []
    
    def _hamming_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate Hamming similarity between binary hypervectors."""
        if len(vec1) != len(vec2):
            return 0.0
        
        # For binary vectors in {-1, +1}, Hamming similarity = (D + dot_product) / (2 * D)
        dot_product = np.dot(vec1, vec2)
        hamming_sim = (self.config.dimension + dot_product) / (2.0 * self.config.dimension)
        
        return float(hamming_sim)
    
    def _predict_class(self, hypervector: np.ndarray) -> int:
        """Predict class for a hypervector."""
        similarities = [
            self._hamming_similarity(hypervector, self.class_hypervectors[i])
            for i in range(self.config.num_classes)
        ]
        return int(np.argmax(similarities))
    
    def _perform_memory_consolidation(self):
        """Perform memory consolidation to optimize storage."""
        logger.info("Starting memory consolidation")
        
        try:
            # Update memory strength based on usage patterns
            for class_id in range(self.config.num_classes):
                stats = self.class_stats[class_id]
                
                # Calculate memory strength based on recency and frequency
                time_decay = max(0.1, 1.0 - (time.time() - stats['last_update']) / 86400)  # 24h decay
                frequency_boost = min(2.0, 1.0 + stats['training_samples'] / 1000)
                
                stats['memory_strength'] = time_decay * frequency_boost
            
            # Strengthen frequently competing classes
            for i in range(self.config.num_classes):
                for j in range(self.config.num_classes):
                    if i != j and self.competition_matrix[i][j] > 10:  # High competition threshold
                        # Increase separation between competing classes
                        self._increase_class_separation(i, j)
            
            self.training_stats['memory_consolidations'] += 1
            logger.info("Memory consolidation completed")
            
        except Exception as e:
            logger.error(f"Memory consolidation failed: {e}")
    
    def _increase_class_separation(self, class_a: int, class_b: int):
        """Increase separation between two competing classes."""
        # Find dimensions where classes are similar and make them different
        similarity_mask = self.class_hypervectors[class_a] == self.class_hypervectors[class_b]
        similar_dims = np.where(similarity_mask)[0]
        
        if len(similar_dims) > 10:  # Only modify if many similar dimensions
            # Randomly flip some similar dimensions in one class
            flip_count = min(len(similar_dims) // 4, 50)  # Flip up to 25% or 50 dimensions
            flip_indices = self.rng.choice(similar_dims, size=flip_count, replace=False)
            
            self.class_hypervectors[class_b][flip_indices] *= -1
    
    def _update_training_stats(self, training_time: float, num_samples: int):
        """Update training performance statistics."""
        self.training_stats['total_training_samples'] += num_samples
        
        # Update average training time
        current_avg = self.training_stats['average_training_time_ms']
        if current_avg == 0:
            self.training_stats['average_training_time_ms'] = training_time
        else:
            # Exponential moving average
            self.training_stats['average_training_time_ms'] = (
                0.9 * current_avg + 0.1 * training_time
            )
    
    def _update_search_stats(self, search_time: float):
        """Update search performance statistics."""
        # Update average search time
        current_avg = self.training_stats['average_search_time_ms']
        if current_avg == 0:
            self.training_stats['average_search_time_ms'] = search_time
        else:
            # Exponential moving average
            self.training_stats['average_search_time_ms'] = (
                0.9 * current_avg + 0.1 * search_time
            )
    
    def add_new_class(self, class_name: str) -> int:
        """
        Dynamically add a new personality class.
        
        Args:
            class_name: Name of the new class
            
        Returns:
            New class ID
        """
        new_class_id = self.config.num_classes
        
        # Expand hypervector matrix
        new_hypervector = self.rng.choice([-1, 1], size=self.config.dimension).astype(np.int8)
        self.class_hypervectors = np.vstack([self.class_hypervectors, new_hypervector.reshape(1, -1)])
        
        # Expand other data structures
        self.class_stats[new_class_id] = {
            'training_samples': 0,
            'last_update': time.time(),
            'confidence_history': [],
            'memory_strength': 1.0,
            'class_name': class_name
        }
        
        # Expand matrices
        new_competition_matrix = np.zeros((new_class_id + 1, new_class_id + 1))
        new_competition_matrix[:self.config.num_classes, :self.config.num_classes] = self.competition_matrix
        self.competition_matrix = new_competition_matrix
        
        self.class_activation_counts = np.append(self.class_activation_counts, 0)
        
        # Update config
        self.config.num_classes = new_class_id + 1
        
        logger.info(f"Added new class '{class_name}' with ID {new_class_id}")
        return new_class_id
    
    def get_class_hypervector(self, class_id: int) -> Optional[np.ndarray]:
        """Get hypervector for a specific class."""
        if 0 <= class_id < self.config.num_classes:
            return self.class_hypervectors[class_id].copy()
        return None
    
    def get_stats(self) -> Dict:
        """Get comprehensive SearcHD statistics."""
        # Calculate overall performance metrics
        total_samples = self.training_stats['total_training_samples']
        total_queries = self.training_stats['total_search_queries']
        avg_training_time = self.training_stats['average_training_time_ms']
        avg_search_time = self.training_stats['average_search_time_ms']
        
        # Calculate memory efficiency
        active_classes = sum(1 for stats in self.class_stats.values() if stats['training_samples'] > 0)
        memory_efficiency = active_classes / self.config.num_classes if self.config.num_classes > 0 else 0
        
        return {
            'training_stats': self.training_stats.copy(),
            'num_classes': self.config.num_classes,
            'active_classes': active_classes,
            'memory_efficiency': memory_efficiency,
            'dimension': self.config.dimension,
            'total_samples_trained': total_samples,
            'total_search_queries': total_queries,
            'average_training_time_ms': avg_training_time,
            'average_search_time_ms': avg_search_time,
            'memory_consolidations': self.training_stats['memory_consolidations'],
            'class_activation_distribution': self.class_activation_counts.tolist(),
            'estimated_memory_usage_mb': self._estimate_memory_usage()
        }
    
    def _estimate_memory_usage(self) -> float:
        """Estimate memory usage in megabytes."""
        hypervector_size = self.class_hypervectors.nbytes
        competition_matrix_size = self.competition_matrix.nbytes
        activation_counts_size = self.class_activation_counts.nbytes
        
        # Add overhead for other data structures
        overhead = len(self.class_stats) * 1000  # Rough estimate
        
        total_bytes = hypervector_size + competition_matrix_size + activation_counts_size + overhead
        return total_bytes / (1024 * 1024)
    
    def save_model(self, filepath: str):
        """Save SearcHD model to file."""
        model_data = {
            'class_hypervectors': self.class_hypervectors,
            'class_stats': self.class_stats,
            'competition_matrix': self.competition_matrix,
            'class_activation_counts': self.class_activation_counts,
            'config': self.config,
            'training_stats': self.training_stats
        }
        np.savez_compressed(filepath, **model_data)
        logger.info(f"SearcHD model saved to {filepath}")
    
    def load_model(self, filepath: str):
        """Load SearcHD model from file."""
        data = np.load(filepath, allow_pickle=True)
        
        self.class_hypervectors = data['class_hypervectors']
        self.class_stats = data['class_stats'].item()
        self.competition_matrix = data['competition_matrix']
        self.class_activation_counts = data['class_activation_counts']
        self.config = data['config'].item()
        self.training_stats = data['training_stats'].item()
        
        logger.info(f"SearcHD model loaded from {filepath}")