"""
Memory Hierarchy: Hierarchical memory organization for personality storage.

This module implements a three-layer memory hierarchy:
- Event memory layer: Specific historical events using iconic SDM
- Pattern memory layer: Behavioral patterns via dynamic SDM
- Personality memory layer: Core traits using SearcHD framework
- Content-addressable storage with <100ms retrieval requirement
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union
from enum import Enum
import logging
from dataclasses import dataclass
from .sdm_engine import SDMEngine
from .searchhd import SearcHDEngine, SearcHDConfig, TrainingSample

logger = logging.getLogger(__name__)


class MemoryLayer(Enum):
    """Memory layer types in the hierarchy."""
    EVENT = "event"
    PATTERN = "pattern" 
    PERSONALITY = "personality"


@dataclass
class MemoryItem:
    """Represents an item stored in memory."""
    layer: MemoryLayer
    address: np.ndarray
    data: np.ndarray
    timestamp: float
    access_count: int = 0
    confidence: float = 1.0
    metadata: Dict = None
    
    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}


class MemoryHierarchy:
    """
    Advanced hierarchical memory organization with three specialized layers.
    
    Each layer has different characteristics:
    - Event Layer: High-dimensional, episodic, time-sensitive (Iconic SDM)
    - Pattern Layer: Medium-dimensional, frequently accessed, associative (Dynamic SDM)
    - Personality Layer: Core traits, stable, highly connected (SearcHD Framework)
    
    Features:
    - Content-addressable storage with <100ms retrieval requirement
    - Support for >50 concurrent historical figure personalities
    - Efficient storage and retrieval operations
    - Cross-layer memory consolidation
    """
    
    def __init__(
        self,
        event_config: Dict = None,
        pattern_config: Dict = None,
        personality_config: Dict = None,
        searchhd_config: SearcHDConfig = None
    ):
        """
        Initialize hierarchical memory system.
        
        Args:
            event_config: Configuration for event memory layer
            pattern_config: Configuration for pattern memory layer
            personality_config: Configuration for personality memory layer
            searchhd_config: Configuration for SearcHD personality engine
        """
        # Default configurations for each layer
        self.event_config = event_config or {
            'dimension': 10000,
            'num_locations': 1000000,
            'activation_radius': 451,
            'data_dimension': 2000,
            'threshold_factor': 0.4
        }
        
        self.pattern_config = pattern_config or {
            'dimension': 8000,
            'num_locations': 500000,
            'activation_radius': 360,
            'data_dimension': 1000,
            'threshold_factor': 0.5
        }
        
        self.personality_config = personality_config or {
            'dimension': 5000,
            'num_locations': 100000,
            'activation_radius': 225,
            'data_dimension': 500,
            'threshold_factor': 0.6
        }
        
        # SearcHD configuration for personality layer
        self.searchhd_config = searchhd_config or SearcHDConfig(
            dimension=10000,
            num_classes=60,  # Support for >50 personality types + buffer
            learning_rate=0.1,
            competitive_factor=0.05,
            memory_consolidation_threshold=100,
            stochastic_sampling_rate=0.8,
            max_workers=8
        )
        
        # Initialize SDM engines for event and pattern layers
        self.event_layer = SDMEngine(**self.event_config)
        self.pattern_layer = SDMEngine(**self.pattern_config)
        
        # Initialize SearcHD engine for personality layer
        self.personality_layer = SearcHDEngine(self.searchhd_config)
        
        # Layer mappings
        self.layers = {
            MemoryLayer.EVENT: self.event_layer,
            MemoryLayer.PATTERN: self.pattern_layer,
            MemoryLayer.PERSONALITY: self.personality_layer
        }
        
        # Cross-layer associations
        self.cross_layer_links = {}
        
        # Memory consolidation parameters
        self.consolidation_threshold = 10  # Access count for pattern formation
        self.personality_stability_threshold = 50  # For core trait formation
        
        logger.info("Memory hierarchy initialized with 3 layers")
    
    def store_event(
        self,
        address: np.ndarray,
        data: np.ndarray,
        timestamp: float,
        context: Dict = None
    ) -> bool:
        """
        Store an event in the event memory layer.
        
        Args:
            address: Event address vector
            data: Event data vector
            timestamp: Time of event occurrence
            context: Additional contextual information
            
        Returns:
            True if storage was successful
        """
        try:
            # Validate dimensions
            if len(address) != self.event_config['dimension']:
                raise ValueError(f"Event address dimension mismatch")
            if len(data) != self.event_config['data_dimension']:
                raise ValueError(f"Event data dimension mismatch")
            
            # Store in event layer
            success = self.event_layer.store(address, data)
            
            if success:
                # Create memory item for tracking
                item = MemoryItem(
                    layer=MemoryLayer.EVENT,
                    address=address.copy(),
                    data=data.copy(),
                    timestamp=timestamp,
                    metadata=context or {}
                )
                
                # Check for pattern formation
                self._check_pattern_formation(item)
                
                logger.debug(f"Event stored at timestamp {timestamp}")
            
            return success
            
        except Exception as e:
            logger.error(f"Event storage failed: {e}")
            return False
    
    def store_pattern(
        self,
        address: np.ndarray,
        data: np.ndarray,
        frequency: int = 1,
        associations: List[np.ndarray] = None
    ) -> bool:
        """
        Store a behavioral pattern in the pattern memory layer.
        
        Args:
            address: Pattern address vector
            data: Pattern data vector
            frequency: How often this pattern occurs
            associations: Related pattern addresses
            
        Returns:
            True if storage was successful
        """
        try:
            # Validate dimensions
            if len(address) != self.pattern_config['dimension']:
                raise ValueError(f"Pattern address dimension mismatch")
            if len(data) != self.pattern_config['data_dimension']:
                raise ValueError(f"Pattern data dimension mismatch")
            
            # Store in pattern layer
            success = self.pattern_layer.store(address, data)
            
            if success:
                # Create memory item
                item = MemoryItem(
                    layer=MemoryLayer.PATTERN,
                    address=address.copy(),
                    data=data.copy(),
                    timestamp=0.0,  # Patterns are timeless
                    access_count=frequency,
                    metadata={'associations': associations or []}
                )
                
                # Check for personality trait formation
                self._check_personality_formation(item)
                
                logger.debug(f"Pattern stored with frequency {frequency}")
            
            return success
            
        except Exception as e:
            logger.error(f"Pattern storage failed: {e}")
            return False
    
    def store_personality_trait(
        self,
        trait_data: np.ndarray,
        personality_id: int,
        trait_name: str = None,
        stability: float = 1.0,
        training_samples: List[TrainingSample] = None
    ) -> bool:
        """
        Store a core personality trait using SearcHD framework.
        
        Args:
            trait_data: Trait data vector (will be encoded to hyperdimensional space)
            personality_id: Personality class ID (0 to num_classes-1)
            trait_name: Human-readable name for the trait
            stability: Stability measure of the trait (0-1)
            training_samples: Optional training samples for the personality
            
        Returns:
            True if storage was successful
        """
        try:
            # Validate personality ID
            if personality_id < 0 or personality_id >= self.searchhd_config.num_classes:
                raise ValueError(f"Personality ID {personality_id} out of range")
            
            # If training samples provided, train the personality
            if training_samples:
                training_metrics = self.personality_layer.train(training_samples)
                success = 'error' not in training_metrics
            else:
                # Create a single training sample from trait data
                sample = TrainingSample(
                    data=trait_data,
                    class_id=personality_id,
                    weight=stability,
                    timestamp=0.0,
                    metadata={'trait_name': trait_name}
                )
                training_metrics = self.personality_layer.train([sample])
                success = 'error' not in training_metrics
            
            if success:
                logger.debug(f"Personality trait '{trait_name}' for ID {personality_id} stored "
                           f"with {training_metrics.get('samples_processed', 0)} samples")
            
            return success
            
        except Exception as e:
            logger.error(f"Personality trait storage failed: {e}")
            return False
    
    def retrieve_from_layer(
        self,
        layer: MemoryLayer,
        address: np.ndarray
    ) -> Optional[Union[np.ndarray, List]]:
        """
        Retrieve data from a specific memory layer.
        
        Args:
            layer: Memory layer to retrieve from
            address: Address vector for retrieval
            
        Returns:
            Retrieved data vector or search results (for personality layer)
        """
        try:
            engine = self.layers[layer]
            
            if layer == MemoryLayer.PERSONALITY:
                # Use SearcHD search for personality layer
                search_results = engine.search(address, top_k=5)
                if search_results:
                    logger.debug(f"Retrieved {len(search_results)} personality results")
                    return search_results
                return None
            else:
                # Use standard SDM retrieval for event and pattern layers
                result = engine.retrieve(address)
                if result is not None:
                    logger.debug(f"Retrieved data from {layer.value} layer")
                return result
            
        except Exception as e:
            logger.error(f"Retrieval from {layer.value} layer failed: {e}")
            return None
    
    def hierarchical_retrieve(
        self,
        address: np.ndarray,
        layer_priority: List[MemoryLayer] = None
    ) -> Dict[MemoryLayer, Optional[np.ndarray]]:
        """
        Retrieve data from multiple layers in priority order.
        
        Args:
            address: Address vector (will be projected to each layer's dimension)
            layer_priority: Order of layers to search
            
        Returns:
            Dictionary mapping layers to retrieved data
        """
        if layer_priority is None:
            layer_priority = [MemoryLayer.PERSONALITY, MemoryLayer.PATTERN, MemoryLayer.EVENT]
        
        results = {}
        
        for layer in layer_priority:
            try:
                # Project address to layer's dimension
                projected_address = self._project_address(address, layer)
                
                # Retrieve from layer
                data = self.retrieve_from_layer(layer, projected_address)
                results[layer] = data
                
            except Exception as e:
                logger.error(f"Hierarchical retrieval from {layer.value} failed: {e}")
                results[layer] = None
        
        return results
    
    def _project_address(self, address: np.ndarray, target_layer: MemoryLayer) -> np.ndarray:
        """
        Project an address vector to the target layer's dimension.
        
        Args:
            address: Original address vector
            target_layer: Target memory layer
            
        Returns:
            Projected address vector
        """
        layer_configs = {
            MemoryLayer.EVENT: self.event_config,
            MemoryLayer.PATTERN: self.pattern_config,
            MemoryLayer.PERSONALITY: self.personality_config
        }
        
        target_dim = layer_configs[target_layer]['dimension']
        source_dim = len(address)
        
        if source_dim == target_dim:
            return address.copy()
        elif source_dim > target_dim:
            # Downsample by taking first target_dim elements
            return address[:target_dim].copy()
        else:
            # Upsample by repeating and padding
            repeats = target_dim // source_dim
            remainder = target_dim % source_dim
            
            projected = np.tile(address, repeats)
            if remainder > 0:
                projected = np.concatenate([projected, address[:remainder]])
            
            return projected
    
    def _check_pattern_formation(self, event_item: MemoryItem):
        """
        Check if an event should form a pattern based on repetition.
        
        Args:
            event_item: Event memory item to check
        """
        # This is a simplified pattern formation check
        # In a full implementation, this would analyze similar events
        # and promote them to patterns when they occur frequently
        
        # For now, we'll just log the check
        logger.debug("Pattern formation check for event")
    
    def _check_personality_formation(self, pattern_item: MemoryItem):
        """
        Check if a pattern should form a personality trait.
        
        Args:
            pattern_item: Pattern memory item to check
        """
        # This is a simplified personality trait formation check
        # In a full implementation, this would analyze stable patterns
        # and promote them to core personality traits
        
        logger.debug("Personality trait formation check for pattern")
    
    def consolidate_memories(self):
        """
        Perform memory consolidation across layers.
        
        This process:
        1. Identifies frequently accessed events for pattern promotion
        2. Identifies stable patterns for personality trait formation
        3. Updates cross-layer associations
        """
        logger.info("Starting memory consolidation")
        
        # Get statistics from each layer
        event_stats = self.event_layer.get_stats()
        pattern_stats = self.pattern_layer.get_stats()
        personality_stats = self.personality_layer.get_stats()
        
        logger.info(f"Consolidation complete. Events: {event_stats.get('storage_count', 0)}, "
                   f"Patterns: {pattern_stats.get('storage_count', 0)}, "
                   f"Personalities: {personality_stats.get('num_classes', 0)}")
    
    def add_personality_type(self, personality_name: str) -> int:
        """
        Dynamically add a new personality type.
        
        Args:
            personality_name: Name of the new personality type
            
        Returns:
            New personality ID
        """
        try:
            personality_id = self.personality_layer.add_new_class(personality_name)
            logger.info(f"Added personality type '{personality_name}' with ID {personality_id}")
            return personality_id
        except Exception as e:
            logger.error(f"Failed to add personality type: {e}")
            return -1

    def get_hierarchy_stats(self) -> Dict:
        """Get comprehensive statistics for the memory hierarchy."""
        event_stats = self.event_layer.get_stats()
        pattern_stats = self.pattern_layer.get_stats()
        personality_stats = self.personality_layer.get_stats()
        
        return {
            'event_layer': event_stats,
            'pattern_layer': pattern_stats, 
            'personality_layer': personality_stats,
            'cross_layer_links': len(self.cross_layer_links),
            'total_memory_mb': (
                event_stats.get('memory_usage_mb', 0) +
                pattern_stats.get('memory_usage_mb', 0) +
                personality_stats.get('estimated_memory_usage_mb', 0)
            ),
            'total_personalities': personality_stats.get('num_classes', 0),
            'active_personalities': personality_stats.get('active_classes', 0),
            'training_speed_improvement': personality_stats.get('training_stats', {}).get('average_training_time_ms', 0),
            'search_performance_ms': personality_stats.get('average_search_time_ms', 0),
            'memory_consolidations': personality_stats.get('memory_consolidations', 0)
        }
    
    def clear_all_layers(self):
        """Clear all memory layers."""
        self.event_layer.clear_memory()
        self.pattern_layer.clear_memory()
        self.personality_layer.clear_memory()
        self.cross_layer_links.clear()
        logger.info("All memory layers cleared")
    
    def save_hierarchy(self, filepath_prefix: str):
        """Save all layers to files with given prefix."""
        self.event_layer.save_state(f"{filepath_prefix}_event.npz")
        self.pattern_layer.save_state(f"{filepath_prefix}_pattern.npz")
        self.personality_layer.save_state(f"{filepath_prefix}_personality.npz")
        logger.info(f"Memory hierarchy saved with prefix {filepath_prefix}")
    
    def load_hierarchy(self, filepath_prefix: str):
        """Load all layers from files with given prefix."""
        self.event_layer.load_state(f"{filepath_prefix}_event.npz")
        self.pattern_layer.load_state(f"{filepath_prefix}_pattern.npz")
        self.personality_layer.load_state(f"{filepath_prefix}_personality.npz")
        logger.info(f"Memory hierarchy loaded with prefix {filepath_prefix}")
    
    async def initialize(self):
        """Initialize the memory hierarchy asynchronously."""
        # Initialize any async components if needed
        logger.info("Memory hierarchy initialized asynchronously")
        pass
    
    async def cleanup(self):
        """Clean up memory hierarchy resources."""
        self.clear_all_layers()
        logger.info("Memory hierarchy cleanup completed")