"""
Core HDC (Hyperdimensional Computing) Operations

This module implements the fundamental operations of hyperdimensional computing:
- Binding: circular convolution operation for combining vectors
- Bundling: element-wise addition for creating superpositions  
- Permutation: vector rearrangement for creating variations
- Similarity: cosine similarity computation
- Random vector generation with quasi-orthogonality
"""

import numpy as np
from typing import List, Optional, Tuple, Union
import warnings
from scipy.fft import fft, ifft
import logging

# Optional numba import for performance acceleration
try:
    from numba import jit, njit
    NUMBA_AVAILABLE = True
except ImportError:
    NUMBA_AVAILABLE = False
    # Create dummy decorators if numba is not available
    def jit(*args, **kwargs):
        def decorator(func):
            return func
        return decorator
    
    def njit(*args, **kwargs):
        def decorator(func):
            return func
        return decorator

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class HDCOperations:
    """
    A comprehensive class for Hyperdimensional Computing operations.
    
    This class provides optimized implementations of all core HDC operations
    with proper error handling and mathematical validation.
    """
    
    def __init__(self, dimension: int = 10000, seed: Optional[int] = None):
        """
        Initialize HDC operations with specified dimension.
        
        Args:
            dimension: Vector dimension (typically 5000-15000)
            seed: Random seed for reproducibility
        """
        if dimension < 1000:
            warnings.warn(f"Dimension {dimension} is very small for HDC operations. Recommended: 5000+")
        elif dimension > 20000:
            warnings.warn(f"Dimension {dimension} is very large and may impact performance.")
            
        self.dimension = dimension
        if seed is not None:
            np.random.seed(seed)
            
        logger.info(f"HDC Operations initialized with dimension {dimension}")
    
    def binding(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
        """
        Binding operation using circular convolution: c = a ⊗ b
        
        The binding operation combines two vectors in a way that produces
        a third vector that is dissimilar to both inputs but can be used
        to retrieve either input given the other.
        
        Args:
            a: First input vector
            b: Second input vector
            
        Returns:
            Result of circular convolution
            
        Raises:
            ValueError: If vectors have different dimensions or wrong shape
        """
        a, b = self._validate_vectors(a, b)
        
        # Use FFT for efficient circular convolution: IFFT(FFT(a) * FFT(b))
        try:
            result = np.real(ifft(fft(a) * fft(b)))
            return self._normalize_vector(result)
        except Exception as e:
            raise RuntimeError(f"Binding operation failed: {e}")
    
    def bundling(self, *vectors: np.ndarray) -> np.ndarray:
        """
        Bundling operation using element-wise addition: s = a ⊕ b ⊕ c
        
        The bundling operation creates a superposition vector that is
        similar to all input vectors. It represents a set or collection.
        
        Args:
            *vectors: Variable number of input vectors
            
        Returns:
            Normalized sum of all input vectors
            
        Raises:
            ValueError: If no vectors provided or dimension mismatch
        """
        if not vectors:
            raise ValueError("At least one vector must be provided for bundling")
            
        # Validate all vectors have same dimension
        first_vec = self._validate_single_vector(vectors[0])
        for i, vec in enumerate(vectors[1:], 1):
            if vec.shape != first_vec.shape:
                raise ValueError(f"Vector {i} has shape {vec.shape}, expected {first_vec.shape}")
        
        # Element-wise addition
        result = np.sum(vectors, axis=0)
        return self._normalize_vector(result)
    
    def permutation(self, vector: np.ndarray, permutation_id: Optional[int] = None) -> np.ndarray:
        """
        Permutation operation using vector rearrangement: p = ρ(a)
        
        The permutation operation rearranges vector elements to create
        a new vector that is uncorrelated with the original but maintains
        the same statistical properties.
        
        Args:
            vector: Input vector to permute
            permutation_id: Optional specific permutation pattern (0-based)
            
        Returns:
            Permuted vector
        """
        vector = self._validate_single_vector(vector)
        
        if permutation_id is None:
            # Random permutation
            indices = np.random.permutation(len(vector))
        else:
            # Deterministic permutation based on ID
            np.random.seed(permutation_id)
            indices = np.random.permutation(len(vector))
            
        return vector[indices]
    
    def cosine_similarity(self, a: np.ndarray, b: np.ndarray) -> float:
        """
        Compute cosine similarity between two vectors.
        
        Cosine similarity measures the cosine of the angle between vectors,
        providing a metric of similarity that is independent of vector magnitude.
        
        Args:
            a: First vector
            b: Second vector
            
        Returns:
            Cosine similarity value between -1 and 1
        """
        a, b = self._validate_vectors(a, b)
        
        # Handle zero vectors
        norm_a = np.linalg.norm(a)
        norm_b = np.linalg.norm(b)
        
        if norm_a == 0 or norm_b == 0:
            return 0.0
            
        return np.dot(a, b) / (norm_a * norm_b)
    
    def generate_random_vector(self, distribution: str = 'bipolar') -> np.ndarray:
        """
        Generate a random vector with specified distribution.
        
        Args:
            distribution: Type of distribution ('bipolar', 'gaussian', 'uniform')
            
        Returns:
            Random vector of specified dimension
        """
        if distribution == 'bipolar':
            # Random bipolar vector {-1, +1}
            return np.random.choice([-1, 1], size=self.dimension)
        elif distribution == 'gaussian':
            # Gaussian distributed vector
            return np.random.normal(0, 1, self.dimension)
        elif distribution == 'uniform':
            # Uniform distribution [-1, 1]
            return np.random.uniform(-1, 1, self.dimension)
        else:
            raise ValueError(f"Unknown distribution: {distribution}")
    
    def generate_quasi_orthogonal_vectors(self, count: int, 
                                        max_similarity: float = 0.1) -> List[np.ndarray]:
        """
        Generate a set of quasi-orthogonal random vectors.
        
        These vectors have low pairwise similarity, making them suitable
        as basis vectors for encoding different concepts.
        
        Args:
            count: Number of vectors to generate
            max_similarity: Maximum allowed pairwise similarity
            
        Returns:
            List of quasi-orthogonal vectors
        """
        if count <= 0:
            raise ValueError("Count must be positive")
            
        vectors = []
        max_attempts = count * 10  # Prevent infinite loops
        attempts = 0
        
        while len(vectors) < count and attempts < max_attempts:
            candidate = self.generate_random_vector('bipolar')
            candidate = self._normalize_vector(candidate)
            
            # Check orthogonality with existing vectors
            is_orthogonal = True
            for existing in vectors:
                similarity = abs(self.cosine_similarity(candidate, existing))
                if similarity > max_similarity:
                    is_orthogonal = False
                    break
            
            if is_orthogonal:
                vectors.append(candidate)
            
            attempts += 1
        
        if len(vectors) < count:
            warnings.warn(f"Only generated {len(vectors)} out of {count} requested vectors")
        
        logger.info(f"Generated {len(vectors)} quasi-orthogonal vectors")
        return vectors
    
    def unbind(self, bound_vector: np.ndarray, key_vector: np.ndarray) -> np.ndarray:
        """
        Unbind operation to retrieve original vector: a' = c ⊗ b^(-1)
        
        For circular convolution, the inverse is achieved by using
        the complex conjugate in frequency domain.
        
        Args:
            bound_vector: Result of previous binding operation
            key_vector: Key vector used in original binding
            
        Returns:
            Approximation of original bound vector
        """
        bound_vector, key_vector = self._validate_vectors(bound_vector, key_vector)
        
        # Inverse binding using conjugate in frequency domain
        try:
            key_fft = fft(key_vector)
            bound_fft = fft(bound_vector)
            
            # Avoid division by zero
            key_fft_conj = np.conj(key_fft)
            norm_squared = np.abs(key_fft) ** 2
            safe_inverse = key_fft_conj / np.maximum(norm_squared, 1e-10)
            
            result = np.real(ifft(bound_fft * safe_inverse))
            return self._normalize_vector(result)
        except Exception as e:
            raise RuntimeError(f"Unbinding operation failed: {e}")
    
    def cleanup(self, noisy_vector: np.ndarray, 
                reference_vectors: List[np.ndarray],
                threshold: float = 0.5) -> Optional[np.ndarray]:
        """
        Clean up a noisy vector by finding the best matching reference vector.
        
        Args:
            noisy_vector: Vector to clean up
            reference_vectors: List of clean reference vectors
            threshold: Minimum similarity threshold for cleanup
            
        Returns:
            Best matching reference vector or None if no good match
        """
        noisy_vector = self._validate_single_vector(noisy_vector)
        
        if not reference_vectors:
            return None
        
        best_similarity = -1
        best_vector = None
        
        for ref_vec in reference_vectors:
            similarity = self.cosine_similarity(noisy_vector, ref_vec)
            if similarity > best_similarity:
                best_similarity = similarity
                best_vector = ref_vec
        
        return best_vector if best_similarity >= threshold else None
    
    def _validate_vectors(self, a: np.ndarray, b: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """Validate that two vectors have compatible dimensions."""
        a = self._validate_single_vector(a)
        b = self._validate_single_vector(b)
        
        if a.shape != b.shape:
            raise ValueError(f"Vector dimensions don't match: {a.shape} vs {b.shape}")
        
        return a, b
    
    def _validate_single_vector(self, vector: np.ndarray) -> np.ndarray:
        """Validate a single vector."""
        if not isinstance(vector, np.ndarray):
            vector = np.array(vector)
        
        if vector.ndim != 1:
            raise ValueError(f"Vector must be 1-dimensional, got shape {vector.shape}")
        
        if len(vector) != self.dimension:
            raise ValueError(f"Vector dimension {len(vector)} doesn't match expected {self.dimension}")
        
        return vector
    
    def _normalize_vector(self, vector: np.ndarray) -> np.ndarray:
        """Normalize vector to unit length."""
        norm = np.linalg.norm(vector)
        if norm == 0:
            return vector
        return vector / norm


# Convenience functions for direct use
def binding(a: np.ndarray, b: np.ndarray) -> np.ndarray:
    """Convenience function for binding operation."""
    ops = HDCOperations(dimension=len(a))
    return ops.binding(a, b)


def bundling(*vectors: np.ndarray) -> np.ndarray:
    """Convenience function for bundling operation."""
    if not vectors:
        raise ValueError("At least one vector required")
    ops = HDCOperations(dimension=len(vectors[0]))
    return ops.bundling(*vectors)


def permutation(vector: np.ndarray, permutation_id: Optional[int] = None) -> np.ndarray:
    """Convenience function for permutation operation."""
    ops = HDCOperations(dimension=len(vector))
    return ops.permutation(vector, permutation_id)


def cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
    """Convenience function for cosine similarity."""
    ops = HDCOperations(dimension=len(a))
    return ops.cosine_similarity(a, b)


def generate_random_vector(dimension: int, distribution: str = 'bipolar') -> np.ndarray:
    """Convenience function for random vector generation."""
    ops = HDCOperations(dimension=dimension)
    return ops.generate_random_vector(distribution)


def generate_quasi_orthogonal_vectors(dimension: int, count: int, 
                                    max_similarity: float = 0.1) -> List[np.ndarray]:
    """Convenience function for quasi-orthogonal vector generation."""
    ops = HDCOperations(dimension=dimension)
    return ops.generate_quasi_orthogonal_vectors(count, max_similarity)


# Optimized functions using Numba for performance-critical operations
@njit
def _fast_cosine_similarity(a: np.ndarray, b: np.ndarray) -> float:
    """Fast numba-compiled cosine similarity."""
    dot_product = np.dot(a, b)
    norm_a = np.sqrt(np.sum(a * a))
    norm_b = np.sqrt(np.sum(b * b))
    
    if norm_a == 0 or norm_b == 0:
        return 0.0
    
    return dot_product / (norm_a * norm_b)


@njit
def _fast_bundling(vectors: np.ndarray) -> np.ndarray:
    """Fast numba-compiled bundling operation."""
    result = np.sum(vectors, axis=0)
    norm = np.sqrt(np.sum(result * result))
    if norm > 0:
        result = result / norm
    return result