"""
Memory Manager: Advanced memory management system for SDM.

This module implements:
- Dynamic address allocation optimization
- Memory capacity management (targeting 5% storage efficiency)
- Distributed storage across multiple locations
- Redundancy and error recovery mechanisms
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Set, Union
import threading
import time
import logging
from dataclasses import dataclass, field
from collections import defaultdict
from .sdm_engine import SDMEngine
from .hierarchy import MemoryHierarchy, MemoryLayer
from .searchhd import SearcHDEngine, SearcHDConfig, TrainingSample

logger = logging.getLogger(__name__)


@dataclass
class MemoryLocation:
    """Represents a physical memory location with metadata."""
    location_id: int
    address_space_region: Tuple[int, int]  # (start, end) indices
    current_load: float = 0.0
    max_capacity: int = 1000
    stored_items: int = 0
    last_access_time: float = 0.0
    error_count: int = 0
    is_active: bool = True
    replicas: Set[int] = field(default_factory=set)


@dataclass
class AllocationRequest:
    """Request for memory allocation."""
    address: np.ndarray
    data_size: int
    priority: float = 1.0
    redundancy_level: int = 2
    layer: MemoryLayer = MemoryLayer.EVENT
    metadata: Dict = field(default_factory=dict)


class MemoryManager:
    """
    Advanced memory management system for SDM operations.
    
    Features:
    - Dynamic address allocation optimization
    - Load balancing across memory locations
    - Redundant storage for fault tolerance
    - Capacity management targeting 5% efficiency
    - Garbage collection and memory defragmentation
    """
    
    def __init__(
        self,
        hdc_ops,  # HDCOperations instance
        num_physical_locations: int = 1000,
        target_storage_efficiency: float = 0.05,
        max_redundancy_level: int = 3,
        load_balance_threshold: float = 0.8
    ):
        """
        Initialize memory manager.
        
        Args:
            hdc_ops: HDC operations instance
            num_physical_locations: Number of physical storage locations
            target_storage_efficiency: Target storage efficiency (default 5%)
            max_redundancy_level: Maximum redundancy level for critical data
            load_balance_threshold: Threshold for triggering load balancing
        """
        self.hdc_ops = hdc_ops
        # Create a memory hierarchy for this manager
        self.hierarchy = MemoryHierarchy()
        self.num_physical_locations = num_physical_locations
        self.target_storage_efficiency = target_storage_efficiency
        self.max_redundancy_level = max_redundancy_level
        self.load_balance_threshold = load_balance_threshold
        
        # Initialize physical memory locations
        self.physical_locations = self._initialize_physical_locations()
        
        # Address allocation tracking
        self.address_allocations = defaultdict(list)  # address -> list of location_ids
        self.location_loads = np.zeros(num_physical_locations)
        
        # Performance monitoring
        self.allocation_stats = {
            'total_allocations': 0,
            'failed_allocations': 0,
            'load_balancing_events': 0,
            'garbage_collections': 0,
            'error_recoveries': 0
        }
        
        # Thread safety
        self.allocation_lock = threading.RLock()
        self.load_balance_lock = threading.RLock()
        
        # Background tasks
        self.background_tasks_active = True
        self._start_background_tasks()
        
        logger.info(f"Memory manager initialized with {num_physical_locations} locations, "
                   f"target efficiency {target_storage_efficiency*100:.1f}%")
    
    def _initialize_physical_locations(self) -> Dict[int, MemoryLocation]:
        """Initialize physical memory locations with balanced address space regions."""
        locations = {}
        
        # Divide address space among locations
        total_address_space = 2**16  # Simplified address space size
        region_size = total_address_space // self.num_physical_locations
        
        for i in range(self.num_physical_locations):
            start_addr = i * region_size
            end_addr = (i + 1) * region_size - 1 if i < self.num_physical_locations - 1 else total_address_space - 1
            
            locations[i] = MemoryLocation(
                location_id=i,
                address_space_region=(start_addr, end_addr),
                max_capacity=1000,  # Items per location
                is_active=True
            )
        
        return locations
    
    def allocate_memory(self, request: AllocationRequest) -> Optional[List[int]]:
        """
        Allocate memory for a storage request.
        
        Args:
            request: Memory allocation request
            
        Returns:
            List of allocated location IDs, or None if allocation fails
        """
        with self.allocation_lock:
            try:
                # Find candidate locations based on address
                candidates = self._find_candidate_locations(request.address)
                
                # Select optimal locations considering load balancing
                selected_locations = self._select_optimal_locations(
                    candidates, 
                    request.redundancy_level,
                    request.priority
                )
                
                if not selected_locations:
                    self.allocation_stats['failed_allocations'] += 1
                    logger.warning("Memory allocation failed - no suitable locations")
                    return None
                
                # Reserve capacity in selected locations
                for location_id in selected_locations:
                    self.physical_locations[location_id].stored_items += 1
                    self.physical_locations[location_id].current_load = (
                        self.physical_locations[location_id].stored_items / 
                        self.physical_locations[location_id].max_capacity
                    )
                
                # Update allocation tracking
                address_key = hash(request.address.tobytes())
                self.address_allocations[address_key] = selected_locations
                
                # Update statistics
                self.allocation_stats['total_allocations'] += 1
                
                # Check if load balancing is needed
                self._check_load_balancing()
                
                logger.debug(f"Allocated memory at locations {selected_locations}")
                return selected_locations
                
            except Exception as e:
                logger.error(f"Memory allocation failed: {e}")
                self.allocation_stats['failed_allocations'] += 1
                return None
    
    def _find_candidate_locations(self, address: np.ndarray) -> List[int]:
        """Find candidate locations based on address proximity."""
        # Hash address to determine preferred region
        address_hash = hash(address.tobytes()) % (2**16)
        
        candidates = []
        for location_id, location in self.physical_locations.items():
            if not location.is_active:
                continue
                
            start, end = location.address_space_region
            if start <= address_hash <= end:
                candidates.append(location_id)
        
        # If no direct match, find nearby regions
        if not candidates:
            # Find closest regions
            distances = []
            for location_id, location in self.physical_locations.items():
                if not location.is_active:
                    continue
                    
                start, end = location.address_space_region
                mid_point = (start + end) // 2
                distance = abs(address_hash - mid_point)
                distances.append((distance, location_id))
            
            # Sort by distance and take closest ones
            distances.sort()
            candidates = [loc_id for _, loc_id in distances[:10]]
        
        return candidates
    
    def _select_optimal_locations(
        self, 
        candidates: List[int], 
        redundancy_level: int,
        priority: float
    ) -> List[int]:
        """Select optimal locations considering load balancing and redundancy."""
        if len(candidates) < redundancy_level:
            logger.warning(f"Not enough candidates ({len(candidates)}) for redundancy level {redundancy_level}")
            redundancy_level = len(candidates)
        
        # Sort candidates by load and error count
        candidate_scores = []
        for location_id in candidates:
            location = self.physical_locations[location_id]
            
            # Calculate score based on load, errors, and capacity
            load_penalty = location.current_load * 2.0
            error_penalty = location.error_count * 0.1
            capacity_bonus = (location.max_capacity - location.stored_items) / location.max_capacity
            
            score = capacity_bonus - load_penalty - error_penalty
            candidate_scores.append((score, location_id))
        
        # Sort by score (higher is better)
        candidate_scores.sort(reverse=True)
        
        # Select top candidates up to redundancy level
        selected = []
        for score, location_id in candidate_scores[:redundancy_level]:
            location = self.physical_locations[location_id]
            
            # Check capacity constraints
            if location.stored_items >= location.max_capacity:
                continue
                
            selected.append(location_id)
            
            # Set up replica relationships
            for other_id in selected[:-1]:
                location.replicas.add(other_id)
                self.physical_locations[other_id].replicas.add(location_id)
        
        return selected
    
    def deallocate_memory(self, address: np.ndarray) -> bool:
        """
        Deallocate memory for the given address.
        
        Args:
            address: Address vector to deallocate
            
        Returns:
            True if deallocation was successful
        """
        with self.allocation_lock:
            try:
                address_key = hash(address.tobytes())
                
                if address_key not in self.address_allocations:
                    logger.warning("Attempted to deallocate non-allocated address")
                    return False
                
                location_ids = self.address_allocations[address_key]
                
                # Free capacity in locations
                for location_id in location_ids:
                    location = self.physical_locations[location_id]
                    location.stored_items = max(0, location.stored_items - 1)
                    location.current_load = (
                        location.stored_items / location.max_capacity
                    )
                    
                    # Remove replica relationships
                    for replica_id in location.replicas:
                        if replica_id in self.physical_locations:
                            self.physical_locations[replica_id].replicas.discard(location_id)
                    location.replicas.clear()
                
                # Remove from allocation tracking
                del self.address_allocations[address_key]
                
                logger.debug(f"Deallocated memory from locations {location_ids}")
                return True
                
            except Exception as e:
                logger.error(f"Memory deallocation failed: {e}")
                return False
    
    def _check_load_balancing(self):
        """Check if load balancing is needed and trigger if necessary."""
        max_load = max(loc.current_load for loc in self.physical_locations.values() if loc.is_active)
        
        if max_load > self.load_balance_threshold:
            self._trigger_load_balancing()
    
    def _trigger_load_balancing(self):
        """Trigger load balancing across memory locations."""
        with self.load_balance_lock:
            try:
                logger.info("Starting load balancing")
                
                # Find heavily loaded locations
                overloaded = [
                    (loc.current_load, loc_id) 
                    for loc_id, loc in self.physical_locations.items()
                    if loc.is_active and loc.current_load > self.load_balance_threshold
                ]
                overloaded.sort(reverse=True)
                
                # Find lightly loaded locations
                underloaded = [
                    (loc.current_load, loc_id)
                    for loc_id, loc in self.physical_locations.items() 
                    if loc.is_active and loc.current_load < self.load_balance_threshold * 0.5
                ]
                underloaded.sort()
                
                # Perform load balancing (simplified)
                for _, overloaded_id in overloaded[:5]:  # Limit to top 5
                    for _, underloaded_id in underloaded[:3]:  # Move to top 3
                        if self._migrate_data(overloaded_id, underloaded_id):
                            break
                
                self.allocation_stats['load_balancing_events'] += 1
                logger.info("Load balancing completed")
                
            except Exception as e:
                logger.error(f"Load balancing failed: {e}")
    
    def _migrate_data(self, source_id: int, target_id: int) -> bool:
        """Migrate some data from source to target location."""
        # This is a simplified migration - in practice would involve
        # actual data movement and address table updates
        source_loc = self.physical_locations[source_id]
        target_loc = self.physical_locations[target_id]
        
        if target_loc.stored_items >= target_loc.max_capacity:
            return False
        
        # Simulate migration of one item
        source_loc.stored_items = max(0, source_loc.stored_items - 1)
        target_loc.stored_items += 1
        
        # Update load calculations
        source_loc.current_load = source_loc.stored_items / source_loc.max_capacity
        target_loc.current_load = target_loc.stored_items / target_loc.max_capacity
        
        logger.debug(f"Migrated data from location {source_id} to {target_id}")
        return True
    
    def perform_garbage_collection(self):
        """Perform garbage collection to free unused memory."""
        logger.info("Starting garbage collection")
        
        try:
            # Identify unused allocations (simplified)
            current_time = time.time()
            cleanup_threshold = current_time - 3600  # 1 hour threshold
            
            addresses_to_cleanup = []
            for address_key, location_ids in self.address_allocations.items():
                # Check if any location hasn't been accessed recently
                all_stale = True
                for location_id in location_ids:
                    location = self.physical_locations[location_id]
                    if location.last_access_time > cleanup_threshold:
                        all_stale = False
                        break
                
                if all_stale:
                    addresses_to_cleanup.append(address_key)
            
            # Clean up stale allocations
            for address_key in addresses_to_cleanup:
                location_ids = self.address_allocations[address_key]
                for location_id in location_ids:
                    location = self.physical_locations[location_id]
                    location.stored_items = max(0, location.stored_items - 1)
                    location.current_load = location.stored_items / location.max_capacity
                
                del self.address_allocations[address_key]
            
            self.allocation_stats['garbage_collections'] += 1
            logger.info(f"Garbage collection completed, cleaned up {len(addresses_to_cleanup)} allocations")
            
        except Exception as e:
            logger.error(f"Garbage collection failed: {e}")
    
    def handle_location_failure(self, failed_location_id: int):
        """Handle failure of a memory location."""
        try:
            logger.warning(f"Handling failure of location {failed_location_id}")
            
            failed_location = self.physical_locations[failed_location_id]
            failed_location.is_active = False
            failed_location.error_count += 1
            
            # Find affected allocations
            affected_allocations = []
            for address_key, location_ids in self.address_allocations.items():
                if failed_location_id in location_ids:
                    affected_allocations.append((address_key, location_ids))
            
            # Attempt recovery using replicas
            for address_key, location_ids in affected_allocations:
                remaining_replicas = [
                    lid for lid in location_ids 
                    if lid != failed_location_id and self.physical_locations[lid].is_active
                ]
                
                if remaining_replicas:
                    # Update allocation to exclude failed location
                    self.address_allocations[address_key] = remaining_replicas
                    logger.debug(f"Recovered allocation {address_key} using replicas")
                else:
                    # Complete data loss for this allocation
                    logger.error(f"Data loss for allocation {address_key} - no available replicas")
                    del self.address_allocations[address_key]
            
            self.allocation_stats['error_recoveries'] += 1
            logger.info(f"Location {failed_location_id} failure handled")
            
        except Exception as e:
            logger.error(f"Error handling location failure: {e}")
    
    def get_storage_efficiency(self) -> float:
        """Calculate current storage efficiency."""
        total_capacity = sum(
            loc.max_capacity 
            for loc in self.physical_locations.values() 
            if loc.is_active
        )
        total_used = sum(
            loc.stored_items 
            for loc in self.physical_locations.values() 
            if loc.is_active
        )
        
        if total_capacity == 0:
            return 0.0
        
        return total_used / total_capacity
    
    def get_manager_stats(self) -> Dict:
        """Get comprehensive manager statistics."""
        active_locations = sum(1 for loc in self.physical_locations.values() if loc.is_active)
        total_stored_items = sum(loc.stored_items for loc in self.physical_locations.values())
        average_load = np.mean([loc.current_load for loc in self.physical_locations.values() if loc.is_active])
        
        return {
            'allocation_stats': self.allocation_stats.copy(),
            'active_locations': active_locations,
            'total_physical_locations': self.num_physical_locations,
            'total_stored_items': total_stored_items,
            'storage_efficiency': self.get_storage_efficiency(),
            'target_efficiency': self.target_storage_efficiency,
            'average_load': float(average_load),
            'total_allocations': len(self.address_allocations),
            'memory_usage_mb': self._estimate_manager_memory_usage()
        }
    
    def _estimate_manager_memory_usage(self) -> float:
        """Estimate memory manager's memory usage in MB."""
        # Estimate based on data structures
        location_size = len(self.physical_locations) * 200  # Rough estimate per location
        allocation_size = len(self.address_allocations) * 50  # Rough estimate per allocation
        total_bytes = location_size + allocation_size
        return total_bytes / (1024 * 1024)
    
    def _start_background_tasks(self):
        """Start background maintenance tasks."""
        def maintenance_loop():
            while self.background_tasks_active:
                try:
                    time.sleep(300)  # Run every 5 minutes
                    if self.background_tasks_active:
                        self.perform_garbage_collection()
                        self._check_load_balancing()
                except Exception as e:
                    logger.error(f"Background maintenance error: {e}")
        
        maintenance_thread = threading.Thread(target=maintenance_loop, daemon=True)
        maintenance_thread.start()
        logger.debug("Background maintenance tasks started")
    
    def shutdown(self):
        """Shutdown the memory manager."""
        self.background_tasks_active = False
        logger.info("Memory manager shutdown")
    
    async def initialize(self):
        """Initialize the memory manager asynchronously."""
        # Initialize any async components if needed
        logger.info("Memory manager initialized asynchronously")
        pass
    
    async def cleanup(self):
        """Clean up memory manager resources."""
        self.shutdown()
        logger.info("Memory manager cleanup completed")


class AdvancedMemorySystem:
    """
    Unified interface for all memory operations in the AI Historical Simulation Platform.
    
    Features:
    - Unified interface for all memory operations
    - Dynamic memory allocation and garbage collection
    - Memory consistency and integrity validation
    - Distributed storage support for scalability
    - Support for >50 concurrent historical figure personalities
    """
    
    def __init__(
        self,
        event_config: Dict = None,
        pattern_config: Dict = None,
        personality_config: Dict = None,
        searchhd_config: SearcHDConfig = None,
        manager_config: Dict = None
    ):
        """
        Initialize advanced memory system.
        
        Args:
            event_config: Configuration for event memory layer
            pattern_config: Configuration for pattern memory layer
            personality_config: Configuration for personality memory layer
            searchhd_config: Configuration for SearcHD personality engine
            manager_config: Configuration for memory manager
        """
        # Initialize hierarchical memory
        self.hierarchy = MemoryHierarchy(
            event_config=event_config,
            pattern_config=pattern_config,
            personality_config=personality_config,
            searchhd_config=searchhd_config
        )
        
        # Initialize memory manager
        manager_defaults = {
            'num_physical_locations': 1000,
            'target_storage_efficiency': 0.05,
            'max_redundancy_level': 3,
            'load_balance_threshold': 0.8
        }
        if manager_config:
            manager_defaults.update(manager_config)
        
        self.memory_manager = MemoryManager(
            hierarchy=self.hierarchy,
            **manager_defaults
        )
        
        # System-wide statistics
        self.system_stats = {
            'total_operations': 0,
            'successful_operations': 0,
            'failed_operations': 0,
            'average_response_time_ms': 0.0,
            'personality_types_created': 0,
            'integrity_checks_performed': 0,
            'consistency_violations': 0
        }
        
        # Integrity and consistency tracking
        self.integrity_violations = []
        self.consistency_checks = defaultdict(int)
        
        # Distributed storage support
        self.distributed_nodes = {}
        self.replication_factor = 2
        
        logger.info("Advanced memory system initialized")
    
    def store_historical_figure(
        self, 
        figure_name: str, 
        personality_data: np.ndarray,
        events: List[Tuple[np.ndarray, np.ndarray]] = None,
        patterns: List[Tuple[np.ndarray, np.ndarray]] = None
    ) -> Dict:
        """
        Store a complete historical figure with personality, events, and patterns.
        
        Args:
            figure_name: Name of the historical figure
            personality_data: Core personality trait data
            events: List of (address, data) tuples for historical events
            patterns: List of (address, data) tuples for behavioral patterns
            
        Returns:
            Dictionary with storage results and assigned IDs
        """
        start_time = time.time()
        
        try:
            self.system_stats['total_operations'] += 1
            
            # Add personality type and get ID
            personality_id = self.hierarchy.add_personality_type(figure_name)
            if personality_id == -1:
                raise ValueError(f"Failed to create personality type for {figure_name}")
            
            # Store personality traits
            personality_success = self.hierarchy.store_personality_trait(
                trait_data=personality_data,
                personality_id=personality_id,
                trait_name=f"{figure_name}_core_personality",
                stability=1.0
            )
            
            # Store events if provided
            event_results = []
            if events:
                for event_addr, event_data in events:
                    event_success = self.hierarchy.store_event(
                        address=event_addr,
                        data=event_data,
                        timestamp=time.time(),
                        context={'figure': figure_name, 'personality_id': personality_id}
                    )
                    event_results.append(event_success)
            
            # Store patterns if provided
            pattern_results = []
            if patterns:
                for pattern_addr, pattern_data in patterns:
                    pattern_success = self.hierarchy.store_pattern(
                        address=pattern_addr,
                        data=pattern_data,
                        frequency=1,
                        associations=None
                    )
                    pattern_results.append(pattern_success)
            
            # Validate storage integrity
            integrity_valid = self._validate_storage_integrity(personality_id)
            
            response_time = (time.time() - start_time) * 1000
            
            # Update statistics
            if personality_success and all(event_results) and all(pattern_results):
                self.system_stats['successful_operations'] += 1
                self.system_stats['personality_types_created'] += 1
            else:
                self.system_stats['failed_operations'] += 1
            
            self._update_response_time(response_time)
            
            result = {
                'success': personality_success and all(event_results) and all(pattern_results),
                'personality_id': personality_id,
                'figure_name': figure_name,
                'events_stored': sum(event_results),
                'patterns_stored': sum(pattern_results),
                'response_time_ms': response_time,
                'integrity_valid': integrity_valid
            }
            
            logger.info(f"Historical figure {figure_name} stored with ID {personality_id} "
                       f"in {response_time:.2f}ms")
            
            return result
            
        except Exception as e:
            self.system_stats['failed_operations'] += 1
            logger.error(f"Failed to store historical figure {figure_name}: {e}")
            return {
                'success': False,
                'error': str(e),
                'response_time_ms': (time.time() - start_time) * 1000
            }
    
    def retrieve_historical_figure(
        self, 
        query: np.ndarray, 
        figure_name: str = None,
        include_events: bool = True,
        include_patterns: bool = True,
        max_results: int = 5
    ) -> Dict:
        """
        Retrieve historical figure information based on query.
        
        Args:
            query: Query vector for retrieval
            figure_name: Specific figure name to retrieve
            include_events: Whether to include related events
            include_patterns: Whether to include behavioral patterns
            max_results: Maximum number of results to return
            
        Returns:
            Dictionary with retrieval results
        """
        start_time = time.time()
        
        try:
            self.system_stats['total_operations'] += 1
            
            # Retrieve personality matches
            personality_results = self.hierarchy.retrieve_from_layer(
                MemoryLayer.PERSONALITY,
                query
            )
            
            if not personality_results:
                self.system_stats['failed_operations'] += 1
                return {
                    'success': False,
                    'message': 'No personality matches found',
                    'response_time_ms': (time.time() - start_time) * 1000
                }
            
            # Process results
            figure_data = []
            for result in personality_results[:max_results]:
                figure_info = {
                    'personality_id': result.class_id,
                    'confidence': result.confidence,
                    'similarity': result.hamming_similarity,
                    'metadata': result.metadata
                }
                
                # Retrieve related events if requested
                if include_events:
                    events = self.hierarchy.retrieve_from_layer(MemoryLayer.EVENT, query)
                    figure_info['events'] = events if events is not None else []
                
                # Retrieve related patterns if requested
                if include_patterns:
                    patterns = self.hierarchy.retrieve_from_layer(MemoryLayer.PATTERN, query)
                    figure_info['patterns'] = patterns if patterns is not None else []
                
                figure_data.append(figure_info)
            
            response_time = (time.time() - start_time) * 1000
            
            # Ensure <100ms retrieval requirement
            if response_time > 100:
                logger.warning(f"Retrieval time {response_time:.2f}ms exceeds 100ms target")
            
            self.system_stats['successful_operations'] += 1
            self._update_response_time(response_time)
            
            result = {
                'success': True,
                'figures': figure_data,
                'total_found': len(figure_data),
                'response_time_ms': response_time,
                'within_target_time': response_time <= 100
            }
            
            logger.debug(f"Retrieved {len(figure_data)} historical figures in {response_time:.2f}ms")
            
            return result
            
        except Exception as e:
            self.system_stats['failed_operations'] += 1
            logger.error(f"Failed to retrieve historical figure: {e}")
            return {
                'success': False,
                'error': str(e),
                'response_time_ms': (time.time() - start_time) * 1000
            }
    
    def perform_memory_consolidation(self) -> Dict:
        """
        Perform system-wide memory consolidation.
        
        Returns:
            Consolidation results and statistics
        """
        start_time = time.time()
        
        try:
            logger.info("Starting system-wide memory consolidation")
            
            # Perform hierarchical memory consolidation
            self.hierarchy.consolidate_memories()
            
            # Perform memory manager optimization
            self.memory_manager.perform_garbage_collection()
            
            # Validate system integrity
            integrity_results = self._perform_integrity_check()
            
            # Update statistics
            consolidation_time = (time.time() - start_time) * 1000
            
            result = {
                'success': True,
                'consolidation_time_ms': consolidation_time,
                'integrity_check': integrity_results,
                'memory_stats': self.get_system_stats()
            }
            
            logger.info(f"Memory consolidation completed in {consolidation_time:.2f}ms")
            return result
            
        except Exception as e:
            logger.error(f"Memory consolidation failed: {e}")
            return {
                'success': False,
                'error': str(e),
                'consolidation_time_ms': (time.time() - start_time) * 1000
            }
    
    def _validate_storage_integrity(self, personality_id: int) -> bool:
        """Validate the integrity of stored data."""
        try:
            self.system_stats['integrity_checks_performed'] += 1
            
            # Check if personality exists
            hypervector = self.hierarchy.personality_layer.get_class_hypervector(personality_id)
            if hypervector is None:
                self.integrity_violations.append(f"Missing hypervector for personality {personality_id}")
                self.system_stats['consistency_violations'] += 1
                return False
            
            # Additional integrity checks can be added here
            return True
            
        except Exception as e:
            logger.error(f"Integrity validation failed: {e}")
            self.system_stats['consistency_violations'] += 1
            return False
    
    def _perform_integrity_check(self) -> Dict:
        """Perform comprehensive system integrity check."""
        try:
            check_results = {
                'personality_layer_intact': True,
                'event_layer_intact': True,
                'pattern_layer_intact': True,
                'cross_layer_consistency': True,
                'violations_found': len(self.integrity_violations),
                'total_checks': self.system_stats['integrity_checks_performed']
            }
            
            # Check hierarchy stats for anomalies
            hierarchy_stats = self.hierarchy.get_hierarchy_stats()
            
            if hierarchy_stats['total_personalities'] == 0:
                check_results['personality_layer_intact'] = False
            
            # Additional consistency checks can be added here
            
            return check_results
            
        except Exception as e:
            logger.error(f"Integrity check failed: {e}")
            return {'error': str(e)}
    
    def _update_response_time(self, response_time: float):
        """Update average response time statistics."""
        current_avg = self.system_stats['average_response_time_ms']
        total_ops = self.system_stats['total_operations']
        
        if current_avg == 0:
            self.system_stats['average_response_time_ms'] = response_time
        else:
            # Calculate new average
            new_avg = ((current_avg * (total_ops - 1)) + response_time) / total_ops
            self.system_stats['average_response_time_ms'] = new_avg
    
    def add_distributed_node(self, node_id: str, node_config: Dict) -> bool:
        """Add a distributed storage node."""
        try:
            self.distributed_nodes[node_id] = {
                'config': node_config,
                'status': 'active',
                'added_time': time.time()
            }
            logger.info(f"Added distributed node {node_id}")
            return True
        except Exception as e:
            logger.error(f"Failed to add distributed node {node_id}: {e}")
            return False
    
    def get_system_stats(self) -> Dict:
        """Get comprehensive system statistics."""
        hierarchy_stats = self.hierarchy.get_hierarchy_stats()
        manager_stats = self.memory_manager.get_manager_stats()
        
        return {
            'system_stats': self.system_stats.copy(),
            'hierarchy_stats': hierarchy_stats,
            'memory_manager_stats': manager_stats,
            'distributed_nodes': len(self.distributed_nodes),
            'integrity_violations': len(self.integrity_violations),
            'total_memory_usage_mb': hierarchy_stats['total_memory_mb'],
            'performance_target_met': self.system_stats['average_response_time_ms'] <= 100,
            'success_rate': (
                self.system_stats['successful_operations'] / 
                max(self.system_stats['total_operations'], 1)
            )
        }
    
    def clear_system(self):
        """Clear all system memory and reset statistics."""
        self.hierarchy.clear_all_layers()
        self.system_stats = {
            'total_operations': 0,
            'successful_operations': 0,
            'failed_operations': 0,
            'average_response_time_ms': 0.0,
            'personality_types_created': 0,
            'integrity_checks_performed': 0,
            'consistency_violations': 0
        }
        self.integrity_violations.clear()
        self.consistency_checks.clear()
        self.distributed_nodes.clear()
        logger.info("Advanced memory system cleared")
    
    def shutdown(self):
        """Shutdown the advanced memory system."""
        self.memory_manager.shutdown()
        logger.info("Advanced memory system shutdown")