"""
SDM: Complete Sparse Distributed Memory implementation with HDC integration.

This module provides the main SDMEngine class that integrates with HDC:
- High-dimensional address space using binary vectors
- Hamming distance-based activation: activate(x, hi) = 1 when dH(x, hi) ≤ H
- Storage operations: Ci ← Ci + Σ Aij · transform(wj)
- Retrieval operations: z = threshold(Σ Ai · Ci)
- Theoretical capacity of 5% of storage locations
"""

import numpy as np
import time
from typing import Dict, List, Optional, Tuple, Union, Callable
from dataclasses import dataclass, field
from concurrent.futures import ThreadPoolExecutor
import logging
from collections import defaultdict
import threading

# Import the base engine
from .sdm_engine import SDMEngine as BaseSDMEngine

logger = logging.getLogger(__name__)


@dataclass
class SDMConfig:
    """Configuration for SDM engine with HDC integration."""
    dimension: int = 10000
    num_locations: int = 1000000
    activation_radius: int = 451
    data_dimension: int = 1000
    threshold_factor: float = 0.5
    max_workers: int = 8
    hdc_integration: bool = True
    storage_efficiency_target: float = 0.05  # 5% of locations
    noise_tolerance: float = 0.1
    dynamic_threshold: bool = True


@dataclass
class SDMPattern:
    """Pattern stored in SDM with metadata."""
    address: np.ndarray
    data: np.ndarray
    timestamp: float
    access_count: int = 0
    confidence: float = 1.0
    pattern_type: str = "generic"
    metadata: Dict = field(default_factory=dict)


class SDMEngine:
    """
    Enhanced Sparse Distributed Memory engine with HDC integration.
    
    Features:
    - High-dimensional binary address space
    - Hamming distance-based activation with theoretical 5% efficiency
    - HDC-compatible operations for personality storage
    - Dynamic threshold adaptation
    - Content-addressable retrieval with <100ms guarantee
    - Noise-robust storage and retrieval
    """
    
    def __init__(self, config: SDMConfig = None):
        """
        Initialize enhanced SDM engine.
        
        Args:
            config: SDM configuration parameters
        """
        self.config = config or SDMConfig()
        
        # Initialize base SDM engine
        self.base_engine = BaseSDMEngine(
            dimension=self.config.dimension,
            num_locations=self.config.num_locations,
            activation_radius=self.config.activation_radius,
            data_dimension=self.config.data_dimension,
            threshold_factor=self.config.threshold_factor,
            max_workers=self.config.max_workers
        )
        
        # HDC integration components
        self.hdc_patterns = {}  # pattern_id -> SDMPattern
        self.pattern_associations = defaultdict(list)  # pattern_id -> list of associated patterns
        
        # Dynamic threshold management
        self.threshold_history = []
        self.performance_metrics = []
        self.adaptive_threshold = self.config.threshold_factor
        
        # Storage efficiency tracking
        self.storage_efficiency = 0.0
        self.target_efficiency = self.config.storage_efficiency_target
        
        # Performance optimization
        self.access_patterns = defaultdict(int)  # Track access frequency
        self.hot_patterns = set()  # Frequently accessed patterns
        
        # Thread safety
        self.access_lock = threading.RLock()
        
        # Statistics
        self.enhanced_stats = {
            'hdc_patterns_stored': 0,
            'adaptive_threshold_changes': 0,
            'noise_corrections': 0,
            'efficiency_optimizations': 0,
            'hot_pattern_promotions': 0
        }
        
        logger.info(f"Enhanced SDM engine initialized with HDC integration, "
                   f"target efficiency {self.target_efficiency*100:.1f}%")
    
    def store_pattern(
        self, 
        pattern: SDMPattern, 
        pattern_id: str = None
    ) -> bool:
        """
        Store an HDC pattern with enhanced features.
        
        Args:
            pattern: SDMPattern to store
            pattern_id: Unique identifier for the pattern
            
        Returns:
            True if storage was successful
        """
        start_time = time.time()
        
        try:
            with self.access_lock:
                # Generate pattern ID if not provided
                if pattern_id is None:
                    pattern_id = f"pattern_{len(self.hdc_patterns)}"
                
                # Apply noise correction if enabled
                if self.config.hdc_integration:
                    corrected_address = self._apply_noise_correction(pattern.address)
                    corrected_data = self._apply_noise_correction(pattern.data)
                else:
                    corrected_address = pattern.address
                    corrected_data = pattern.data
                
                # Store in base engine
                success = self.base_engine.store(corrected_address, corrected_data)
                
                if success:
                    # Store pattern metadata
                    pattern.timestamp = time.time()
                    self.hdc_patterns[pattern_id] = pattern
                    
                    # Update efficiency tracking
                    self._update_storage_efficiency()
                    
                    # Check for pattern associations
                    if self.config.hdc_integration:
                        self._detect_pattern_associations(pattern_id, pattern)
                    
                    # Adaptive threshold adjustment
                    if self.config.dynamic_threshold:
                        self._adapt_threshold_post_storage(success, time.time() - start_time)
                    
                    self.enhanced_stats['hdc_patterns_stored'] += 1
                    
                    storage_time = (time.time() - start_time) * 1000
                    logger.debug(f"Pattern {pattern_id} stored in {storage_time:.2f}ms")
                
                return success
                
        except Exception as e:
            logger.error(f"Pattern storage failed: {e}")
            return False
    
    def retrieve_pattern(
        self, 
        address: np.ndarray, 
        pattern_id: str = None,
        similarity_threshold: float = 0.8
    ) -> Optional[SDMPattern]:
        """
        Retrieve an HDC pattern with enhanced features.
        
        Args:
            address: Address vector for retrieval
            pattern_id: Specific pattern ID to retrieve
            similarity_threshold: Minimum similarity for successful retrieval
            
        Returns:
            Retrieved SDMPattern or None if not found
        """
        start_time = time.time()
        
        try:
            with self.access_lock:
                # Direct pattern retrieval if ID provided
                if pattern_id and pattern_id in self.hdc_patterns:
                    pattern = self.hdc_patterns[pattern_id]
                    pattern.access_count += 1
                    
                    # Update access patterns
                    self.access_patterns[pattern_id] += 1
                    self._update_hot_patterns(pattern_id)
                    
                    retrieval_time = (time.time() - start_time) * 1000
                    logger.debug(f"Direct pattern {pattern_id} retrieved in {retrieval_time:.2f}ms")
                    return pattern
                
                # Apply noise correction
                corrected_address = self._apply_noise_correction(address)
                
                # Retrieve from base engine
                retrieved_data = self.base_engine.retrieve(corrected_address)
                
                if retrieved_data is None:
                    return None
                
                # Find best matching pattern
                best_match = self._find_best_pattern_match(
                    corrected_address, 
                    retrieved_data, 
                    similarity_threshold
                )
                
                if best_match:
                    pattern_id, pattern = best_match
                    pattern.access_count += 1
                    self.access_patterns[pattern_id] += 1
                    self._update_hot_patterns(pattern_id)
                
                retrieval_time = (time.time() - start_time) * 1000
                
                # Ensure <100ms retrieval requirement
                if retrieval_time > 100:
                    logger.warning(f"Retrieval time {retrieval_time:.2f}ms exceeds 100ms target")
                
                # Adaptive threshold adjustment
                if self.config.dynamic_threshold:
                    self._adapt_threshold_post_retrieval(
                        best_match is not None, 
                        retrieval_time
                    )
                
                logger.debug(f"Pattern retrieved in {retrieval_time:.2f}ms")
                return best_match[1] if best_match else None
                
        except Exception as e:
            logger.error(f"Pattern retrieval failed: {e}")
            return None
    
    def _apply_noise_correction(self, vector: np.ndarray) -> np.ndarray:
        """Apply noise correction to input vector."""
        if not self.config.hdc_integration or self.config.noise_tolerance <= 0:
            return vector
        
        corrected = vector.copy()
        noise_threshold = int(len(vector) * self.config.noise_tolerance)
        
        # Simple majority vote noise correction for binary vectors
        if len(corrected) >= 3:
            for i in range(1, len(corrected) - 1):
                neighbors = corrected[i-1:i+2]
                if len(set(neighbors)) > 1:  # If there's variation
                    # Use majority vote
                    majority_value = 1 if np.sum(neighbors) > 1.5 else 0
                    
                    # Only correct if current value is in minority
                    if corrected[i] != majority_value:
                        corrected[i] = majority_value
                        self.enhanced_stats['noise_corrections'] += 1
        
        return corrected
    
    def _update_storage_efficiency(self):
        """Update storage efficiency metrics."""
        # Calculate current efficiency based on stored patterns vs. total capacity
        total_patterns = len(self.hdc_patterns)
        theoretical_capacity = int(self.config.num_locations * self.target_efficiency)
        
        self.storage_efficiency = total_patterns / max(theoretical_capacity, 1)
        
        # Trigger optimization if efficiency is too low or high
        if self.storage_efficiency > 1.2:  # 20% over target
            self._optimize_storage()
        elif self.storage_efficiency < 0.5:  # 50% under target
            self._promote_patterns()
    
    def _optimize_storage(self):
        """Optimize storage when efficiency is over target."""
        try:
            # Remove least accessed patterns
            pattern_access_scores = [
                (pattern_id, self.access_patterns.get(pattern_id, 0))
                for pattern_id in self.hdc_patterns.keys()
            ]
            pattern_access_scores.sort(key=lambda x: x[1])
            
            # Remove bottom 10% of patterns
            remove_count = max(1, len(pattern_access_scores) // 10)
            
            for i in range(remove_count):
                pattern_id, _ = pattern_access_scores[i]
                if pattern_id in self.hdc_patterns:
                    del self.hdc_patterns[pattern_id]
                    if pattern_id in self.access_patterns:
                        del self.access_patterns[pattern_id]
                    self.hot_patterns.discard(pattern_id)
            
            self.enhanced_stats['efficiency_optimizations'] += 1
            logger.debug(f"Removed {remove_count} patterns for efficiency optimization")
            
        except Exception as e:
            logger.error(f"Storage optimization failed: {e}")
    
    def _promote_patterns(self):
        """Promote patterns to improve efficiency."""
        # This could involve pre-loading commonly used patterns
        # or adjusting storage parameters
        logger.debug("Pattern promotion triggered")
    
    def _detect_pattern_associations(self, pattern_id: str, pattern: SDMPattern):
        """Detect associations between patterns."""
        try:
            # Find similar patterns based on address similarity
            for existing_id, existing_pattern in self.hdc_patterns.items():
                if existing_id == pattern_id:
                    continue
                
                # Calculate address similarity
                addr_similarity = self._calculate_vector_similarity(
                    pattern.address, 
                    existing_pattern.address
                )
                
                # If similar enough, create association
                if addr_similarity > 0.7:
                    self.pattern_associations[pattern_id].append(existing_id)
                    self.pattern_associations[existing_id].append(pattern_id)
            
        except Exception as e:
            logger.error(f"Pattern association detection failed: {e}")
    
    def _calculate_vector_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate similarity between two vectors."""
        if len(vec1) != len(vec2):
            return 0.0
        
        # Hamming similarity for binary vectors
        return 1.0 - (np.sum(vec1 != vec2) / len(vec1))
    
    def _find_best_pattern_match(
        self, 
        address: np.ndarray, 
        data: np.ndarray, 
        threshold: float
    ) -> Optional[Tuple[str, SDMPattern]]:
        """Find the best matching pattern for retrieved data."""
        best_match = None
        best_similarity = 0.0
        
        for pattern_id, pattern in self.hdc_patterns.items():
            # Calculate similarity with both address and data
            addr_sim = self._calculate_vector_similarity(address, pattern.address)
            data_sim = self._calculate_vector_similarity(data, pattern.data)
            
            # Combined similarity score
            combined_sim = 0.6 * addr_sim + 0.4 * data_sim
            
            if combined_sim > best_similarity and combined_sim >= threshold:
                best_similarity = combined_sim
                best_match = (pattern_id, pattern)
        
        return best_match
    
    def _update_hot_patterns(self, pattern_id: str):
        """Update hot patterns based on access frequency."""
        access_count = self.access_patterns[pattern_id]
        hot_threshold = 10  # Threshold for hot pattern promotion
        
        if access_count >= hot_threshold and pattern_id not in self.hot_patterns:
            self.hot_patterns.add(pattern_id)
            self.enhanced_stats['hot_pattern_promotions'] += 1
            logger.debug(f"Pattern {pattern_id} promoted to hot pattern")
    
    def _adapt_threshold_post_storage(self, success: bool, storage_time: float):
        """Adapt threshold based on storage performance."""
        try:
            # Record performance
            self.threshold_history.append(self.adaptive_threshold)
            self.performance_metrics.append({
                'success': success,
                'time': storage_time * 1000,  # Convert to ms
                'operation': 'storage'
            })
            
            # Adapt threshold if we have enough history
            if len(self.performance_metrics) >= 10:
                recent_metrics = self.performance_metrics[-10:]
                success_rate = sum(1 for m in recent_metrics if m['success']) / len(recent_metrics)
                avg_time = np.mean([m['time'] for m in recent_metrics])
                
                # Adjust threshold based on performance
                if success_rate < 0.8:  # Low success rate
                    self.adaptive_threshold = max(0.1, self.adaptive_threshold - 0.05)
                    self.enhanced_stats['adaptive_threshold_changes'] += 1
                elif success_rate > 0.95 and avg_time < 50:  # High success, fast
                    self.adaptive_threshold = min(0.9, self.adaptive_threshold + 0.05)
                    self.enhanced_stats['adaptive_threshold_changes'] += 1
                
                # Update base engine threshold
                self.base_engine.threshold_factor = self.adaptive_threshold
            
            # Limit history size
            if len(self.threshold_history) > 100:
                self.threshold_history = self.threshold_history[-50:]
                self.performance_metrics = self.performance_metrics[-50:]
                
        except Exception as e:
            logger.error(f"Threshold adaptation failed: {e}")
    
    def _adapt_threshold_post_retrieval(self, success: bool, retrieval_time: float):
        """Adapt threshold based on retrieval performance."""
        # Similar to storage adaptation but focused on retrieval metrics
        self.performance_metrics.append({
            'success': success,
            'time': retrieval_time,
            'operation': 'retrieval'
        })
        
        # Ensure retrieval time constraint
        if retrieval_time > 100:  # 100ms target
            # Lower threshold to improve speed
            self.adaptive_threshold = max(0.1, self.adaptive_threshold - 0.02)
            self.base_engine.threshold_factor = self.adaptive_threshold
            self.enhanced_stats['adaptive_threshold_changes'] += 1
    
    def get_pattern_associations(self, pattern_id: str) -> List[str]:
        """Get associated patterns for a given pattern."""
        return self.pattern_associations.get(pattern_id, [])
    
    def get_hot_patterns(self) -> List[str]:
        """Get list of hot (frequently accessed) patterns."""
        return list(self.hot_patterns)
    
    def clear_patterns(self):
        """Clear all stored patterns."""
        self.hdc_patterns.clear()
        self.pattern_associations.clear()
        self.access_patterns.clear()
        self.hot_patterns.clear()
        self.threshold_history.clear()
        self.performance_metrics.clear()
        self.base_engine.clear_memory()
        logger.info("All patterns cleared")
    
    def get_enhanced_stats(self) -> Dict:
        """Get comprehensive enhanced statistics."""
        base_stats = self.base_engine.get_stats()
        
        return {
            **base_stats,
            **self.enhanced_stats,
            'hdc_integration_enabled': self.config.hdc_integration,
            'stored_patterns': len(self.hdc_patterns),
            'hot_patterns': len(self.hot_patterns),
            'pattern_associations': len(self.pattern_associations),
            'storage_efficiency': self.storage_efficiency,
            'target_efficiency': self.target_efficiency,
            'adaptive_threshold': self.adaptive_threshold,
            'config': {
                'dimension': self.config.dimension,
                'num_locations': self.config.num_locations,
                'activation_radius': self.config.activation_radius,
                'data_dimension': self.config.data_dimension,
                'noise_tolerance': self.config.noise_tolerance,
                'dynamic_threshold': self.config.dynamic_threshold
            },
            'access_patterns_count': len(self.access_patterns),
            'total_pattern_accesses': sum(self.access_patterns.values()),
            'memory_usage_mb': self._estimate_enhanced_memory_usage()
        }
    
    def _estimate_enhanced_memory_usage(self) -> float:
        """Estimate enhanced memory usage including HDC components."""
        base_usage = self.base_engine._estimate_memory_usage()
        
        # Estimate HDC pattern storage
        pattern_overhead = len(self.hdc_patterns) * 500  # Rough estimate per pattern
        association_overhead = sum(len(assocs) for assocs in self.pattern_associations.values()) * 50
        access_pattern_overhead = len(self.access_patterns) * 100
        
        total_overhead = pattern_overhead + association_overhead + access_pattern_overhead
        enhanced_usage = total_overhead / (1024 * 1024)  # Convert to MB
        
        return base_usage + enhanced_usage
    
    def save_enhanced_state(self, filepath: str):
        """Save enhanced SDM state including HDC components."""
        # Save base engine state
        self.base_engine.save_state(f"{filepath}_base.npz")
        
        # Save enhanced components
        enhanced_state = {
            'hdc_patterns': self.hdc_patterns,
            'pattern_associations': dict(self.pattern_associations),
            'access_patterns': dict(self.access_patterns),
            'hot_patterns': list(self.hot_patterns),
            'threshold_history': self.threshold_history,
            'performance_metrics': self.performance_metrics,
            'adaptive_threshold': self.adaptive_threshold,
            'storage_efficiency': self.storage_efficiency,
            'enhanced_stats': self.enhanced_stats,
            'config': self.config
        }
        
        np.savez_compressed(f"{filepath}_enhanced.npz", **enhanced_state)
        logger.info(f"Enhanced SDM state saved to {filepath}")
    
    def load_enhanced_state(self, filepath: str):
        """Load enhanced SDM state including HDC components."""
        # Load base engine state
        self.base_engine.load_state(f"{filepath}_base.npz")
        
        # Load enhanced components
        data = np.load(f"{filepath}_enhanced.npz", allow_pickle=True)
        
        self.hdc_patterns = data['hdc_patterns'].item()
        self.pattern_associations = defaultdict(list, data['pattern_associations'].item())
        self.access_patterns = defaultdict(int, data['access_patterns'].item())
        self.hot_patterns = set(data['hot_patterns'])
        self.threshold_history = data['threshold_history'].tolist()
        self.performance_metrics = data['performance_metrics'].tolist()
        self.adaptive_threshold = float(data['adaptive_threshold'])
        self.storage_efficiency = float(data['storage_efficiency'])
        self.enhanced_stats = data['enhanced_stats'].item()
        self.config = data['config'].item()
        
        logger.info(f"Enhanced SDM state loaded from {filepath}")
    
    # Delegate base methods to maintain compatibility
    def store(self, address: np.ndarray, data: np.ndarray) -> bool:
        """Store data using base engine (for compatibility)."""
        return self.base_engine.store(address, data)
    
    def retrieve(self, address: np.ndarray) -> Optional[np.ndarray]:
        """Retrieve data using base engine (for compatibility)."""
        return self.base_engine.retrieve(address)
    
    def batch_store(self, addresses: List[np.ndarray], data_list: List[np.ndarray]) -> List[bool]:
        """Batch store using base engine."""
        return self.base_engine.batch_store(addresses, data_list)
    
    def batch_retrieve(self, addresses: List[np.ndarray]) -> List[Optional[np.ndarray]]:
        """Batch retrieve using base engine."""
        return self.base_engine.batch_retrieve(addresses)