"""
Associative Retrieval: Advanced retrieval system for SDM.

This module implements:
- Similarity-based pattern matching
- Threshold-based activation control
- Parallel memory trace activation
- Noise-robust retrieval algorithms
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Callable
import time
import logging
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, as_completed
from scipy.spatial.distance import hamming, cosine
from sklearn.metrics.pairwise import pairwise_distances
from .sdm_engine import SDMEngine
from .hierarchy import MemoryHierarchy, MemoryLayer

logger = logging.getLogger(__name__)


@dataclass
class RetrievalResult:
    """Container for retrieval results with metadata."""
    data: Optional[np.ndarray]
    confidence: float
    activation_count: int
    retrieval_time_ms: float
    similarity_scores: Optional[Dict] = None
    layer: Optional[MemoryLayer] = None
    error_message: Optional[str] = None


@dataclass 
class AssociativeQuery:
    """Query for associative retrieval."""
    address: np.ndarray
    similarity_threshold: float = 0.7
    max_results: int = 10
    noise_tolerance: float = 0.3
    layer_preference: Optional[List[MemoryLayer]] = None
    use_parallel: bool = True
    metadata: Dict = None


class AssociativeRetrieval:
    """
    Advanced associative retrieval system for SDM.
    
    Features:
    - Multiple similarity metrics (Hamming, cosine, Jaccard)
    - Noise-robust retrieval with error correction
    - Parallel memory trace activation
    - Dynamic threshold adaptation
    - Multi-layer associative search
    """
    
    def __init__(
        self,
        hierarchy: MemoryHierarchy,
        default_similarity_metric: str = 'hamming',
        parallel_workers: int = 8,
        noise_compensation: bool = True,
        adaptive_thresholds: bool = True
    ):
        """
        Initialize associative retrieval system.
        
        Args:
            hierarchy: Memory hierarchy to search
            default_similarity_metric: Default similarity metric ('hamming', 'cosine', 'jaccard')
            parallel_workers: Number of parallel workers for retrieval
            noise_compensation: Enable noise compensation algorithms
            adaptive_thresholds: Enable adaptive threshold adjustment
        """
        self.hierarchy = hierarchy
        self.default_similarity_metric = default_similarity_metric
        self.parallel_workers = parallel_workers
        self.noise_compensation = noise_compensation
        self.adaptive_thresholds = adaptive_thresholds
        
        # Similarity metric functions
        self.similarity_metrics = {
            'hamming': self._hamming_similarity,
            'cosine': self._cosine_similarity,
            'jaccard': self._jaccard_similarity,
            'hybrid': self._hybrid_similarity
        }
        
        # Performance tracking
        self.retrieval_stats = {
            'total_queries': 0,
            'successful_retrievals': 0,
            'average_retrieval_time_ms': 0.0,
            'noise_corrections': 0,
            'threshold_adaptations': 0
        }
        
        # Adaptive threshold history
        self.threshold_history = []
        self.performance_history = []
        
        logger.info(f"Associative retrieval initialized with {parallel_workers} workers, "
                   f"metric: {default_similarity_metric}")
    
    def retrieve(self, query: AssociativeQuery) -> List[RetrievalResult]:
        """
        Perform associative retrieval based on query.
        
        Args:
            query: Associative query specification
            
        Returns:
            List of retrieval results ranked by relevance
        """
        start_time = time.time()
        
        try:
            self.retrieval_stats['total_queries'] += 1
            
            # Prepare query address (apply noise compensation if enabled)
            processed_address = self._preprocess_query(query.address)
            
            # Determine layers to search
            search_layers = query.layer_preference or [
                MemoryLayer.PERSONALITY, 
                MemoryLayer.PATTERN, 
                MemoryLayer.EVENT
            ]
            
            # Perform retrieval across layers
            if query.use_parallel:
                results = self._parallel_retrieval(processed_address, search_layers, query)
            else:
                results = self._sequential_retrieval(processed_address, search_layers, query)
            
            # Filter and rank results
            filtered_results = self._filter_and_rank_results(results, query)
            
            # Update performance statistics
            retrieval_time = (time.time() - start_time) * 1000
            self._update_retrieval_stats(retrieval_time, len(filtered_results) > 0)
            
            # Adapt thresholds if enabled
            if self.adaptive_thresholds:
                self._adapt_thresholds(query, filtered_results, retrieval_time)
            
            logger.debug(f"Associative retrieval completed in {retrieval_time:.2f}ms, "
                        f"found {len(filtered_results)} results")
            
            return filtered_results
            
        except Exception as e:
            logger.error(f"Associative retrieval failed: {e}")
            return [RetrievalResult(
                data=None,
                confidence=0.0,
                activation_count=0,
                retrieval_time_ms=(time.time() - start_time) * 1000,
                error_message=str(e)
            )]
    
    def _preprocess_query(self, address: np.ndarray) -> np.ndarray:
        """Preprocess query address for noise compensation."""
        if not self.noise_compensation:
            return address.copy()
        
        # Apply noise reduction techniques
        processed = address.copy()
        
        # Simple noise reduction: median filtering for binary data
        # (In practice, this would be more sophisticated)
        if len(processed) >= 3:
            for i in range(1, len(processed) - 1):
                neighbors = processed[i-1:i+2]
                # If middle bit is different from both neighbors, it might be noise
                if processed[i] != processed[i-1] and processed[i] != processed[i+1]:
                    # Choose the more common value
                    processed[i] = int(np.round(np.mean(neighbors)))
        
        return processed
    
    def _parallel_retrieval(
        self, 
        address: np.ndarray, 
        search_layers: List[MemoryLayer],
        query: AssociativeQuery
    ) -> List[RetrievalResult]:
        """Perform parallel retrieval across multiple layers."""
        results = []
        
        with ThreadPoolExecutor(max_workers=self.parallel_workers) as executor:
            # Submit retrieval tasks for each layer
            future_to_layer = {}
            for layer in search_layers:
                future = executor.submit(self._retrieve_from_layer, address, layer, query)
                future_to_layer[future] = layer
            
            # Collect results as they complete
            for future in as_completed(future_to_layer):
                layer = future_to_layer[future]
                try:
                    layer_results = future.result()
                    results.extend(layer_results)
                except Exception as e:
                    logger.error(f"Parallel retrieval from {layer.value} failed: {e}")
        
        return results
    
    def _sequential_retrieval(
        self,
        address: np.ndarray,
        search_layers: List[MemoryLayer], 
        query: AssociativeQuery
    ) -> List[RetrievalResult]:
        """Perform sequential retrieval across layers."""
        results = []
        
        for layer in search_layers:
            try:
                layer_results = self._retrieve_from_layer(address, layer, query)
                results.extend(layer_results)
            except Exception as e:
                logger.error(f"Sequential retrieval from {layer.value} failed: {e}")
        
        return results
    
    def _retrieve_from_layer(
        self, 
        address: np.ndarray, 
        layer: MemoryLayer,
        query: AssociativeQuery
    ) -> List[RetrievalResult]:
        """Retrieve from a specific memory layer."""
        start_time = time.time()
        
        try:
            # Project address to layer dimensions
            projected_address = self.hierarchy._project_address(address, layer)
            
            # Retrieve from layer
            retrieved_data = self.hierarchy.retrieve_from_layer(layer, projected_address)
            
            if retrieved_data is None:
                return []
            
            # Calculate similarity and confidence
            similarity_score = self._calculate_similarity(
                projected_address, 
                retrieved_data, 
                self.default_similarity_metric
            )
            
            # Count activated locations (approximation)
            layer_engine = self.hierarchy.layers[layer]
            activated_locations = layer_engine._find_activated_locations(projected_address)
            activation_count = len(activated_locations)
            
            retrieval_time = (time.time() - start_time) * 1000
            
            result = RetrievalResult(
                data=retrieved_data,
                confidence=similarity_score,
                activation_count=activation_count,
                retrieval_time_ms=retrieval_time,
                layer=layer,
                similarity_scores={self.default_similarity_metric: similarity_score}
            )
            
            return [result]
            
        except Exception as e:
            logger.error(f"Layer retrieval from {layer.value} failed: {e}")
            return []
    
    def _calculate_similarity(
        self, 
        query_address: np.ndarray, 
        retrieved_data: np.ndarray,
        metric: str
    ) -> float:
        """Calculate similarity between query and retrieved data."""
        if metric not in self.similarity_metrics:
            metric = 'hamming'
        
        return self.similarity_metrics[metric](query_address, retrieved_data)
    
    def _hamming_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate Hamming similarity (1 - normalized Hamming distance)."""
        if len(vec1) != len(vec2):
            return 0.0
        
        hamming_dist = np.sum(vec1 != vec2) / len(vec1)
        return 1.0 - hamming_dist
    
    def _cosine_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate cosine similarity between vectors."""
        try:
            # Convert to float for cosine calculation
            v1 = vec1.astype(float)
            v2 = vec2.astype(float)
            
            # Handle zero vectors
            norm1 = np.linalg.norm(v1)
            norm2 = np.linalg.norm(v2)
            
            if norm1 == 0 or norm2 == 0:
                return 0.0
            
            return np.dot(v1, v2) / (norm1 * norm2)
            
        except Exception as e:
            logger.warning(f"Cosine similarity calculation failed: {e}")
            return 0.0
    
    def _jaccard_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate Jaccard similarity for binary vectors."""
        if len(vec1) != len(vec2):
            return 0.0
        
        intersection = np.sum(np.logical_and(vec1, vec2))
        union = np.sum(np.logical_or(vec1, vec2))
        
        if union == 0:
            return 1.0  # Both vectors are all zeros
        
        return intersection / union
    
    def _hybrid_similarity(self, vec1: np.ndarray, vec2: np.ndarray) -> float:
        """Calculate hybrid similarity combining multiple metrics."""
        hamming_sim = self._hamming_similarity(vec1, vec2)
        cosine_sim = self._cosine_similarity(vec1, vec2)
        jaccard_sim = self._jaccard_similarity(vec1, vec2)
        
        # Weighted combination
        return 0.5 * hamming_sim + 0.3 * cosine_sim + 0.2 * jaccard_sim
    
    def _filter_and_rank_results(
        self, 
        results: List[RetrievalResult], 
        query: AssociativeQuery
    ) -> List[RetrievalResult]:
        """Filter and rank results based on query criteria."""
        # Filter by confidence threshold
        filtered = [
            result for result in results 
            if result.data is not None and result.confidence >= query.similarity_threshold
        ]
        
        # Sort by confidence (descending)
        filtered.sort(key=lambda x: x.confidence, reverse=True)
        
        # Limit to max results
        return filtered[:query.max_results]
    
    def _update_retrieval_stats(self, retrieval_time: float, success: bool):
        """Update retrieval performance statistics."""
        if success:
            self.retrieval_stats['successful_retrievals'] += 1
        
        # Update average retrieval time
        total_queries = self.retrieval_stats['total_queries']
        current_avg = self.retrieval_stats['average_retrieval_time_ms']
        new_avg = ((current_avg * (total_queries - 1)) + retrieval_time) / total_queries
        self.retrieval_stats['average_retrieval_time_ms'] = new_avg
    
    def _adapt_thresholds(
        self, 
        query: AssociativeQuery, 
        results: List[RetrievalResult],
        retrieval_time: float
    ):
        """Adapt retrieval thresholds based on performance."""
        try:
            # Record current performance
            success_rate = len([r for r in results if r.data is not None]) / len(results) if results else 0
            performance_metric = success_rate * (100.0 / max(retrieval_time, 1.0))  # Success per ms
            
            self.threshold_history.append(query.similarity_threshold)
            self.performance_history.append(performance_metric)
            
            # Keep only recent history
            if len(self.threshold_history) > 100:
                self.threshold_history = self.threshold_history[-100:]
                self.performance_history = self.performance_history[-100:]
            
            # Simple adaptation: if performance is declining, adjust threshold
            if len(self.performance_history) >= 10:
                recent_performance = np.mean(self.performance_history[-5:])
                older_performance = np.mean(self.performance_history[-10:-5])
                
                if recent_performance < older_performance * 0.9:  # 10% decline
                    # Performance declining - adapt threshold
                    self.retrieval_stats['threshold_adaptations'] += 1
                    logger.debug("Adaptive threshold adjustment triggered")
            
        except Exception as e:
            logger.warning(f"Threshold adaptation failed: {e}")
    
    def fuzzy_retrieve(
        self, 
        partial_address: np.ndarray, 
        noise_level: float = 0.2,
        layer: MemoryLayer = MemoryLayer.PATTERN
    ) -> List[RetrievalResult]:
        """
        Perform fuzzy retrieval with partial or noisy address.
        
        Args:
            partial_address: Incomplete or noisy address
            noise_level: Expected noise level (0-1)
            layer: Memory layer to search
            
        Returns:
            List of fuzzy retrieval results
        """
        try:
            # Generate multiple candidate addresses by flipping bits
            candidate_addresses = self._generate_candidate_addresses(
                partial_address, 
                noise_level
            )
            
            results = []
            for candidate_addr in candidate_addresses:
                query = AssociativeQuery(
                    address=candidate_addr,
                    similarity_threshold=max(0.5, 1.0 - noise_level),
                    layer_preference=[layer],
                    use_parallel=False
                )
                
                candidate_results = self.retrieve(query)
                results.extend(candidate_results)
            
            # Remove duplicates and rank
            unique_results = self._deduplicate_results(results)
            return unique_results[:10]  # Top 10 results
            
        except Exception as e:
            logger.error(f"Fuzzy retrieval failed: {e}")
            return []
    
    def _generate_candidate_addresses(
        self, 
        base_address: np.ndarray, 
        noise_level: float
    ) -> List[np.ndarray]:
        """Generate candidate addresses by introducing controlled variations."""
        candidates = [base_address.copy()]
        
        num_bits_to_flip = int(len(base_address) * noise_level)
        
        # Generate variations by flipping different combinations of bits
        for _ in range(min(20, 2**min(num_bits_to_flip, 10))):  # Limit combinations
            candidate = base_address.copy()
            flip_indices = np.random.choice(
                len(base_address), 
                size=num_bits_to_flip, 
                replace=False
            )
            candidate[flip_indices] = 1 - candidate[flip_indices]
            candidates.append(candidate)
        
        return candidates
    
    def _deduplicate_results(self, results: List[RetrievalResult]) -> List[RetrievalResult]:
        """Remove duplicate results based on data content."""
        if not results:
            return []
        
        unique_results = []
        seen_hashes = set()
        
        for result in results:
            if result.data is None:
                continue
            
            data_hash = hash(result.data.tobytes())
            if data_hash not in seen_hashes:
                seen_hashes.add(data_hash)
                unique_results.append(result)
        
        # Sort by confidence
        unique_results.sort(key=lambda x: x.confidence, reverse=True)
        return unique_results
    
    def batch_retrieve(
        self, 
        queries: List[AssociativeQuery]
    ) -> List[List[RetrievalResult]]:
        """Perform batch retrieval for multiple queries."""
        with ThreadPoolExecutor(max_workers=self.parallel_workers) as executor:
            futures = [executor.submit(self.retrieve, query) for query in queries]
            results = [future.result() for future in futures]
        
        return results
    
    def get_retrieval_stats(self) -> Dict:
        """Get comprehensive retrieval statistics."""
        success_rate = (
            self.retrieval_stats['successful_retrievals'] / 
            max(self.retrieval_stats['total_queries'], 1)
        )
        
        return {
            **self.retrieval_stats,
            'success_rate': success_rate,
            'default_similarity_metric': self.default_similarity_metric,
            'noise_compensation_enabled': self.noise_compensation,
            'adaptive_thresholds_enabled': self.adaptive_thresholds,
            'parallel_workers': self.parallel_workers
        }
    
    def reset_stats(self):
        """Reset all retrieval statistics."""
        self.retrieval_stats = {
            'total_queries': 0,
            'successful_retrievals': 0,
            'average_retrieval_time_ms': 0.0,
            'noise_corrections': 0,
            'threshold_adaptations': 0
        }
        self.threshold_history.clear()
        self.performance_history.clear()
        logger.info("Retrieval statistics reset")