"""
Unified Consciousness Engine for Historical Figure Simulation.

This module integrates Global Workspace Theory, Attention Schema Theory, and 
Integrated Information Theory into a unified consciousness system. It provides
real-time consciousness state tracking, cultural context integration, and
measurable consciousness metrics for authentic historical figure behavior.

Key Features:
- Unified consciousness integration across all theories
- Cultural consciousness embedding for historical periods  
- Dynamic consciousness state tracking and validation
- Real-time consciousness metrics computation
"""

import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union
from dataclasses import dataclass, field
import logging
from datetime import datetime
import time

from .models import (
    ConsciousnessState,
    GlobalWorkspaceState,
    AttentionState, 
    IITMetrics,
    ModalityState,
    ModalityType,
    AttentionMode,
    ConsciousnessLevel,
    ConsciousnessTransition
)
from .gwt import GlobalWorkspaceEngine, CognitiveMod
from .ast import AttentionSchemaEngine
from .iit import IITMetricsCalculator
from .fusion_engine import ConsciousnessFusionEngine, FusionStrategy


logger = logging.getLogger(__name__)


@dataclass
class ConsciousnessConfiguration:
    """
    Configuration parameters for the consciousness engine.
    """
    # Dimensional parameters
    workspace_dimension: int = 1024
    attention_dimension: int = 512
    unified_dimension: int = 2048
    
    # Engine parameters
    num_cognitive_modules: int = 7
    integration_rate: float = 0.1
    consciousness_threshold: float = 0.3
    
    # Temporal parameters
    coherence_window: int = 10
    consistency_threshold: float = 0.8
    update_frequency: float = 10.0  # Hz
    
    # Cultural integration parameters  
    cultural_weight: float = 0.2
    historical_context_strength: float = 0.15
    personality_influence: float = 0.25
    
    # Fusion parameters
    fusion_dimension: int = 2048
    cultural_embedding_dim: int = 128
    fusion_strategy: FusionStrategy = FusionStrategy.ATTENTION_MODULATED
    
    # Performance parameters
    max_history_size: int = 100
    enable_real_time: bool = True
    enable_learning: bool = True
    
    def validate(self) -> bool:
        """Validate configuration parameters."""
        return (self.workspace_dimension > 0 and
                self.attention_dimension > 0 and 
                self.unified_dimension > 0 and
                0 <= self.integration_rate <= 1.0 and
                0 <= self.consciousness_threshold <= 1.0 and
                0 <= self.cultural_weight <= 1.0 and
                0 <= self.historical_context_strength <= 1.0 and
                0 <= self.personality_influence <= 1.0)


class ConsciousnessMetricsTracker:
    """
    Tracker for consciousness metrics and performance statistics.
    """
    
    def __init__(self, max_history: int = 1000):
        """
        Initialize metrics tracker.
        
        Args:
            max_history: Maximum number of metrics to track
        """
        self.max_history = max_history
        
        # Performance metrics
        self.processing_times = []
        self.memory_usage = []
        self.consciousness_levels = []
        self.integration_scores = []
        
        # Quality metrics
        self.coherence_scores = []
        self.consistency_scores = []
        self.phi_values = []
        self.unity_indices = []
        
        # Temporal metrics
        self.timestamps = []
        self.state_transitions = []
        
        logger.debug("Consciousness metrics tracker initialized")
    
    def record_processing_cycle(self, processing_time: float, 
                               consciousness_state: ConsciousnessState,
                               memory_mb: float = 0.0):
        """
        Record metrics from a processing cycle.
        
        Args:
            processing_time: Time taken for processing cycle
            consciousness_state: Current consciousness state  
            memory_mb: Memory usage in MB
        """
        timestamp = time.time()
        
        # Performance metrics
        self.processing_times.append(processing_time)
        self.memory_usage.append(memory_mb)
        self.timestamps.append(timestamp)
        
        # Consciousness metrics
        self.consciousness_levels.append(consciousness_state.consciousness_level.value)
        self.integration_scores.append(consciousness_state.global_workspace.integration_level)
        self.coherence_scores.append(consciousness_state.iit_metrics.temporal_coherence)
        self.consistency_scores.append(consciousness_state.iit_metrics.self_consistency)
        self.phi_values.append(consciousness_state.iit_metrics.phi)
        self.unity_indices.append(consciousness_state.global_workspace.unity_index)
        
        # Limit history size
        self._trim_histories()
    
    def _trim_histories(self):
        """Trim histories to max_history size."""
        histories = [
            self.processing_times, self.memory_usage, self.timestamps,
            self.consciousness_levels, self.integration_scores, 
            self.coherence_scores, self.consistency_scores,
            self.phi_values, self.unity_indices
        ]
        
        for history in histories:
            while len(history) > self.max_history:
                history.pop(0)
    
    def get_performance_summary(self) -> Dict[str, float]:
        """
        Get performance summary statistics.
        
        Returns:
            Dictionary of performance metrics
        """
        summary = {}
        
        if self.processing_times:
            summary['avg_processing_time'] = np.mean(self.processing_times)
            summary['max_processing_time'] = np.max(self.processing_times)
            summary['processing_time_std'] = np.std(self.processing_times)
            
            if len(self.processing_times) > 1:
                summary['processing_hz'] = 1.0 / summary['avg_processing_time']
        
        if self.memory_usage:
            summary['avg_memory_mb'] = np.mean(self.memory_usage)
            summary['max_memory_mb'] = np.max(self.memory_usage)
        
        if self.integration_scores:
            summary['avg_integration'] = np.mean(self.integration_scores)
            summary['integration_stability'] = 1.0 - np.std(self.integration_scores)
        
        if self.coherence_scores:
            summary['avg_coherence'] = np.mean(self.coherence_scores) 
            summary['coherence_stability'] = 1.0 - np.std(self.coherence_scores)
        
        if self.phi_values:
            summary['avg_phi'] = np.mean(self.phi_values)
            summary['phi_stability'] = 1.0 - np.std(self.phi_values)
        
        return summary


class ConsciousnessEngine:
    """
    Unified consciousness engine integrating GWT, AST, and IIT.
    
    This engine provides the main interface for consciousness simulation,
    integrating all theoretical frameworks into a cohesive system for
    historical figure simulation with measurable consciousness metrics.
    """
    
    def __init__(self, config: Optional[ConsciousnessConfiguration] = None,
                 cultural_context: Optional[Dict[str, Any]] = None):
        """
        Initialize the unified consciousness engine.
        
        Args:
            config: Configuration parameters
            cultural_context: Optional cultural context information
        """
        self.config = config if config is not None else ConsciousnessConfiguration()
        if not self.config.validate():
            raise ValueError("Invalid consciousness configuration parameters")
        
        self.cultural_context = cultural_context or {}
        
        # Initialize theoretical framework engines
        self.gwt_engine = GlobalWorkspaceEngine(
            workspace_dimension=self.config.workspace_dimension,
            num_modules=self.config.num_cognitive_modules,
            integration_threshold=self.config.consciousness_threshold
        )
        
        self.ast_engine = AttentionSchemaEngine(
            attention_dimension=self.config.attention_dimension,
            num_modalities=self.config.num_cognitive_modules,
            self_monitoring_rate=self.config.integration_rate
        )
        
        self.iit_calculator = IITMetricsCalculator(
            system_dimension=self.config.unified_dimension,
            coherence_window=self.config.coherence_window,
            consistency_threshold=self.config.consistency_threshold
        )
        
        self.fusion_engine = ConsciousnessFusionEngine(
            fusion_dimension=self.config.fusion_dimension,
            cultural_embedding_dim=self.config.cultural_embedding_dim,
            fusion_strategy=self.config.fusion_strategy
        )
        
        # Unified consciousness state
        self.current_consciousness_state = ConsciousnessState(
            figure_identity="",
            unified_consciousness=torch.zeros(self.config.unified_dimension)
        )
        
        # Integration networks for cross-theory fusion
        self.gwt_to_unified = self._build_integration_network(
            self.config.workspace_dimension, self.config.unified_dimension
        )
        self.ast_to_unified = self._build_integration_network(
            self.config.attention_dimension, self.config.unified_dimension  
        )
        
        # Cultural integration network
        self.cultural_integration_network = self._build_cultural_network()
        
        # Performance tracking
        self.metrics_tracker = ConsciousnessMetricsTracker()
        
        # State tracking
        self.consciousness_history = []
        self.transition_history = []
        
        # Real-time processing
        self.last_update_time = time.time()
        self.processing_enabled = True
        
        logger.info(f"Consciousness Engine initialized with config: {self.config}")
    
    def _build_integration_network(self, input_dim: int, output_dim: int) -> torch.nn.Module:
        """
        Build neural network for cross-theory integration.
        
        Args:
            input_dim: Input dimension
            output_dim: Output dimension
            
        Returns:
            Integration network
        """
        return torch.nn.Sequential(
            torch.nn.Linear(input_dim, input_dim // 2),
            torch.nn.ReLU(),
            torch.nn.Dropout(0.1),
            torch.nn.Linear(input_dim // 2, output_dim // 2),
            torch.nn.ReLU(),
            torch.nn.Linear(output_dim // 2, output_dim),
            torch.nn.Tanh()
        )
    
    def _build_cultural_network(self) -> torch.nn.Module:
        """
        Build network for cultural context integration.
        
        Returns:
            Cultural integration network
        """
        return torch.nn.Sequential(
            torch.nn.Linear(self.config.unified_dimension + 64, 256),  # +64 for cultural embeddings
            torch.nn.ReLU(),
            torch.nn.Linear(256, self.config.unified_dimension),
            torch.nn.Tanh()
        )
    
    def process_consciousness_cycle(self, 
                                   sensory_inputs: Dict[ModalityType, torch.Tensor],
                                   personality_context: Optional[Dict[str, float]] = None,
                                   task_context: Optional[Dict[str, Any]] = None,
                                   voluntary_attention: Optional[torch.Tensor] = None) -> ConsciousnessState:
        """
        Process a complete consciousness cycle integrating all theories.
        
        Args:
            sensory_inputs: Multi-modal sensory inputs
            personality_context: Personality influence factors
            task_context: Current task context
            voluntary_attention: Optional voluntary attention control
            
        Returns:
            Updated unified consciousness state
        """
        cycle_start_time = time.time()
        
        try:
            # 1. Process through Global Workspace Theory
            gwt_inputs = self._prepare_gwt_inputs(sensory_inputs)
            attention_weights = self._extract_attention_weights(voluntary_attention)
            
            gwt_state = self.gwt_engine.process_multimodal_input(gwt_inputs, attention_weights)
            
            # 2. Process through Attention Schema Theory  
            ast_state = self.ast_engine.update_attention_state(
                sensory_inputs, task_context, voluntary_attention
            )
            
            # 3. Use fusion engine for multi-modal consciousness integration
            modality_activations = self._extract_modality_activations(sensory_inputs)
            
            # Extract individual consciousness modalities
            visual_consciousness = modality_activations.get(ModalityType.VISUAL, None)
            auditory_consciousness = modality_activations.get(ModalityType.AUDITORY, None)
            textual_consciousness = modality_activations.get(ModalityType.TEXTUAL, None)
            emotional_consciousness = modality_activations.get(ModalityType.EMOTIONAL, None)
            memory_consciousness = modality_activations.get(ModalityType.MEMORY, None)
            reasoning_consciousness = modality_activations.get(ModalityType.REASONING, None)
            
            # Get temporal context for fusion
            temporal_context = (self.current_consciousness_state.unified_consciousness 
                              if hasattr(self.current_consciousness_state, 'unified_consciousness') 
                              else None)
            
            # Perform multi-modal consciousness fusion
            unified_consciousness = self.fusion_engine.fuse_consciousness(
                visual_consciousness=visual_consciousness,
                auditory_consciousness=auditory_consciousness,
                textual_consciousness=textual_consciousness,
                emotional_consciousness=emotional_consciousness,
                memory_consciousness=memory_consciousness,
                reasoning_consciousness=reasoning_consciousness,
                cultural_context=self.cultural_context,
                attention_state=ast_state,
                temporal_context=temporal_context
            )
            
            # 4. Calculate IIT metrics on the fused consciousness
            iit_metrics = self.iit_calculator.update_metrics(
                unified_consciousness, 
                modality_activations,
                self._extract_self_model_state(ast_state)
            )
            
            # 5. The fusion engine already handles cultural integration
            culturally_integrated_consciousness = unified_consciousness
            
            # 6. Determine consciousness level
            consciousness_level = self.iit_calculator.assess_consciousness_level(iit_metrics)
            consciousness_strength = self._calculate_consciousness_strength(iit_metrics)
            
            # 7. Create unified consciousness state
            new_consciousness_state = ConsciousnessState(
                timestamp=datetime.now(),
                figure_identity=self.current_consciousness_state.figure_identity,
                consciousness_level=consciousness_level,
                consciousness_strength=consciousness_strength,
                global_workspace=gwt_state,
                attention_state=ast_state,
                iit_metrics=iit_metrics,
                modality_states=self._create_modality_states(sensory_inputs),
                unified_consciousness=culturally_integrated_consciousness,
                fusion_weights=self._calculate_fusion_weights(sensory_inputs),
                self_awareness=ast_state.metacognitive_awareness,
                introspective_access=ast_state.self_monitoring_strength,
                reflective_consciousness=self._calculate_reflective_consciousness(ast_state, iit_metrics),
                personality_influence=personality_context or {},
                historical_context_activation=self._calculate_historical_activation(),
                cultural_framework_strength=self.config.cultural_weight
            )
            
            # 8. Track transition if significant change occurred
            if self._is_significant_transition(new_consciousness_state):
                transition = self._create_transition(
                    self.current_consciousness_state, new_consciousness_state
                )
                self.transition_history.append(transition)
            
            # 9. Update state and history
            self.current_consciousness_state = new_consciousness_state
            self.current_consciousness_state.update_history(self.config.max_history_size)
            
            self.consciousness_history.append(new_consciousness_state)
            if len(self.consciousness_history) > self.config.max_history_size:
                self.consciousness_history.pop(0)
            
            # 10. Record performance metrics
            processing_time = time.time() - cycle_start_time
            self.current_consciousness_state.processing_latency = processing_time
            
            self.metrics_tracker.record_processing_cycle(
                processing_time, new_consciousness_state
            )
            
            self.last_update_time = time.time()
            
            return new_consciousness_state
            
        except Exception as e:
            logger.error(f"Error in consciousness processing cycle: {e}")
            raise
    
    def _prepare_gwt_inputs(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[str, torch.Tensor]:
        """
        Prepare inputs for Global Workspace processing.
        
        Args:
            sensory_inputs: Raw sensory inputs
            
        Returns:
            Dictionary of inputs for GWT modules
        """
        gwt_inputs = {}
        
        for modality_type, input_tensor in sensory_inputs.items():
            module_name = f"{modality_type.value}_module"
            
            # Adapt input dimension to workspace dimension
            if input_tensor.shape[-1] != self.config.workspace_dimension:
                if input_tensor.shape[-1] > self.config.workspace_dimension:
                    adapted_input = input_tensor[..., :self.config.workspace_dimension]
                else:
                    padding = torch.zeros(*input_tensor.shape[:-1], 
                                        self.config.workspace_dimension - input_tensor.shape[-1])
                    adapted_input = torch.cat([input_tensor, padding], dim=-1)
            else:
                adapted_input = input_tensor
            
            gwt_inputs[module_name] = adapted_input
        
        return gwt_inputs
    
    def _extract_attention_weights(self, voluntary_attention: Optional[torch.Tensor]) -> Dict[str, float]:
        """
        Extract attention weights for GWT modules.
        
        Args:
            voluntary_attention: Voluntary attention control signal
            
        Returns:
            Dictionary of attention weights
        """
        if voluntary_attention is None:
            # Equal attention allocation
            num_modules = self.config.num_cognitive_modules
            return {f"module_{i}": 1.0 / num_modules for i in range(num_modules)}
        
        # Convert attention tensor to module weights
        attention_per_module = voluntary_attention.shape[-1] // self.config.num_cognitive_modules
        weights = {}
        
        start_idx = 0
        for i in range(self.config.num_cognitive_modules):
            end_idx = min(start_idx + attention_per_module, voluntary_attention.shape[-1])
            module_attention = voluntary_attention[start_idx:end_idx]
            weight = torch.norm(module_attention).item()
            weights[f"module_{i}"] = max(0.1, min(2.0, weight))  # Clamp weights
            start_idx = end_idx
        
        return weights
    
    def _integrate_gwt_ast(self, gwt_state: GlobalWorkspaceState, 
                          ast_state: AttentionState) -> torch.Tensor:
        """
        Integrate GWT and AST into unified consciousness.
        
        Args:
            gwt_state: Global workspace state
            ast_state: Attention schema state
            
        Returns:
            Unified consciousness tensor
        """
        # Transform GWT and AST states to unified dimension
        with torch.no_grad():
            gwt_unified = self.gwt_to_unified(gwt_state.workspace_content)
            ast_unified = self.ast_to_unified(ast_state.attention_focus)
        
        # Weighted integration
        gwt_weight = 0.6
        ast_weight = 0.4
        
        # Apply attention modulation
        attention_modulation = torch.sigmoid(ast_state.attention_focus.mean()) 
        gwt_weight *= attention_modulation.item()
        ast_weight *= (2.0 - attention_modulation.item())
        
        # Normalize weights
        total_weight = gwt_weight + ast_weight
        gwt_weight /= total_weight
        ast_weight /= total_weight
        
        # Integrate
        unified_consciousness = gwt_weight * gwt_unified + ast_weight * ast_unified
        
        # Apply integration function: C(t) = GWT(I(t)) ∘ AST(A(t))
        # Using element-wise multiplication as composition operator
        composition_factor = torch.tanh(gwt_unified) * torch.sigmoid(ast_unified)
        
        unified_consciousness = unified_consciousness + 0.2 * composition_factor
        
        return unified_consciousness
    
    def _extract_modality_activations(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[ModalityType, torch.Tensor]:
        """
        Extract modality activations for IIT calculation.
        
        Args:
            sensory_inputs: Raw sensory inputs
            
        Returns:
            Dictionary of modality activations
        """
        return sensory_inputs  # Direct mapping for now
    
    def _extract_self_model_state(self, ast_state: AttentionState) -> torch.Tensor:
        """
        Extract self-model state from attention state.
        
        Args:
            ast_state: Attention schema state
            
        Returns:
            Self-model state tensor
        """
        # Self-model as weighted combination of attention components
        self_model = (ast_state.attention_focus * ast_state.self_monitoring_strength + 
                     torch.randn_like(ast_state.attention_focus) * 0.1)  # Add introspective noise
        
        return self_model
    
    def _apply_cultural_context(self, unified_consciousness: torch.Tensor,
                               personality_context: Optional[Dict[str, float]] = None) -> torch.Tensor:
        """
        Apply cultural context to consciousness.
        
        Args:
            unified_consciousness: Base unified consciousness
            personality_context: Personality context factors
            
        Returns:
            Culturally contextualized consciousness
        """
        if not self.cultural_context and not personality_context:
            return unified_consciousness
        
        # Create cultural embedding
        cultural_embedding = self._create_cultural_embedding()
        
        # Combine consciousness and cultural context
        combined_input = torch.cat([unified_consciousness, cultural_embedding])
        
        # Apply cultural integration
        with torch.no_grad():
            culturally_integrated = self.cultural_integration_network(combined_input)
        
        # Blend with original consciousness
        blend_factor = self.config.cultural_weight
        final_consciousness = ((1 - blend_factor) * unified_consciousness + 
                             blend_factor * culturally_integrated)
        
        return final_consciousness
    
    def _create_cultural_embedding(self) -> torch.Tensor:
        """
        Create cultural embedding vector from context.
        
        Returns:
            Cultural embedding tensor
        """
        embedding_dim = 64
        embedding = torch.zeros(embedding_dim)
        
        if 'historical_period' in self.cultural_context:
            period = self.cultural_context['historical_period']
            # Simple encoding - could be enhanced with learned embeddings
            period_hash = hash(str(period)) % embedding_dim
            embedding[period_hash] = 1.0
        
        if 'cultural_values' in self.cultural_context:
            values = self.cultural_context['cultural_values']
            for i, (value_name, strength) in enumerate(values.items()):
                if i < embedding_dim:
                    embedding[i] = strength
        
        return embedding
    
    def _calculate_consciousness_strength(self, iit_metrics: IITMetrics) -> float:
        """
        Calculate overall consciousness strength.
        
        Args:
            iit_metrics: IIT metrics
            
        Returns:
            Consciousness strength [0, 1]
        """
        phi_component = min(1.0, iit_metrics.phi / 10.0)
        coherence_component = abs(iit_metrics.temporal_coherence)
        consistency_component = iit_metrics.self_consistency
        richness_component = iit_metrics.phenomenological_richness
        
        # Weighted combination
        strength = (0.3 * phi_component +
                   0.25 * coherence_component + 
                   0.25 * consistency_component +
                   0.2 * richness_component)
        
        return max(0.0, min(1.0, strength))
    
    def _create_modality_states(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[ModalityType, ModalityState]:
        """
        Create modality states from sensory inputs.
        
        Args:
            sensory_inputs: Raw sensory inputs
            
        Returns:
            Dictionary of modality states
        """
        modality_states = {}
        
        for modality_type, input_tensor in sensory_inputs.items():
            # Adapt to standard modality dimension
            modality_dim = 256
            if input_tensor.shape[-1] != modality_dim:
                if input_tensor.shape[-1] > modality_dim:
                    adapted_activation = input_tensor[..., :modality_dim]
                else:
                    padding = torch.zeros(*input_tensor.shape[:-1], 
                                        modality_dim - input_tensor.shape[-1])
                    adapted_activation = torch.cat([input_tensor, padding], dim=-1)
            else:
                adapted_activation = input_tensor
            
            modality_states[modality_type] = ModalityState(
                modality_type=modality_type,
                activation=adapted_activation,
                confidence=torch.sigmoid(torch.mean(adapted_activation)).item(),
                processing_depth=torch.norm(adapted_activation).item() / modality_dim
            )
        
        return modality_states
    
    def _calculate_fusion_weights(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[ModalityType, float]:
        """
        Calculate fusion weights for modalities.
        
        Args:
            sensory_inputs: Raw sensory inputs
            
        Returns:
            Dictionary of fusion weights
        """
        weights = {}
        total_strength = 0.0
        
        # Calculate strength for each modality
        for modality_type, input_tensor in sensory_inputs.items():
            strength = torch.norm(input_tensor).item()
            weights[modality_type] = strength
            total_strength += strength
        
        # Normalize weights
        if total_strength > 0:
            for modality_type in weights:
                weights[modality_type] /= total_strength
        else:
            # Equal weights if no input
            uniform_weight = 1.0 / len(weights) if weights else 0.0
            for modality_type in weights:
                weights[modality_type] = uniform_weight
        
        return weights
    
    def _calculate_reflective_consciousness(self, ast_state: AttentionState, 
                                         iit_metrics: IITMetrics) -> float:
        """
        Calculate reflective consciousness level.
        
        Args:
            ast_state: Attention state
            iit_metrics: IIT metrics
            
        Returns:
            Reflective consciousness level [0, 1]
        """
        metacognitive_component = ast_state.metacognitive_awareness
        self_monitoring_component = ast_state.self_monitoring_strength
        consistency_component = iit_metrics.self_consistency
        
        # Reflective consciousness as combination of meta-cognitive factors
        reflective_consciousness = (0.4 * metacognitive_component +
                                  0.3 * self_monitoring_component + 
                                  0.3 * consistency_component)
        
        return max(0.0, min(1.0, reflective_consciousness))
    
    def _calculate_historical_activation(self) -> float:
        """
        Calculate historical context activation level.
        
        Returns:
            Historical activation level [0, 1]
        """
        if not self.cultural_context:
            return 0.0
        
        activation = 0.0
        
        # Historical period factor
        if 'historical_period' in self.cultural_context:
            activation += 0.3
        
        # Cultural values factor
        if 'cultural_values' in self.cultural_context:
            values = self.cultural_context['cultural_values']
            activation += min(0.4, len(values) * 0.1)
        
        # Social norms factor
        if 'social_norms' in self.cultural_context:
            activation += 0.2
        
        # Language/communication style factor
        if 'communication_style' in self.cultural_context:
            activation += 0.1
        
        return min(1.0, activation * self.config.historical_context_strength)
    
    def _is_significant_transition(self, new_state: ConsciousnessState) -> bool:
        """
        Check if this represents a significant consciousness transition.
        
        Args:
            new_state: New consciousness state
            
        Returns:
            True if transition is significant
        """
        if not hasattr(self, 'current_consciousness_state'):
            return False
        
        current = self.current_consciousness_state
        
        # Check consciousness level change
        if new_state.consciousness_level != current.consciousness_level:
            return True
        
        # Check significant strength change
        strength_change = abs(new_state.consciousness_strength - current.consciousness_strength)
        if strength_change > 0.2:
            return True
        
        # Check significant attention mode change
        if new_state.attention_state.attention_mode != current.attention_state.attention_mode:
            return True
        
        # Check significant unity change
        unity_change = abs(new_state.global_workspace.unity_index - 
                          current.global_workspace.unity_index)
        if unity_change > 0.15:
            return True
        
        return False
    
    def _create_transition(self, from_state: ConsciousnessState, 
                          to_state: ConsciousnessState) -> ConsciousnessTransition:
        """
        Create consciousness transition object.
        
        Args:
            from_state: Previous consciousness state
            to_state: New consciousness state
            
        Returns:
            Consciousness transition
        """
        # Calculate transition vector
        from_vector = from_state.get_unified_vector()
        to_vector = to_state.get_unified_vector()
        
        transition_vector = to_vector - from_vector
        transition_strength = torch.norm(transition_vector).item()
        
        # Determine transition type
        transition_type = self._classify_transition_type(from_state, to_state)
        
        return ConsciousnessTransition(
            from_state=from_state,
            to_state=to_state,
            transition_vector=transition_vector,
            transition_strength=transition_strength,
            transition_type=transition_type,
            duration=0.0,  # Would be set by calling system
            smoothness=self._calculate_transition_smoothness(from_state, to_state)
        )
    
    def _classify_transition_type(self, from_state: ConsciousnessState, 
                                 to_state: ConsciousnessState) -> str:
        """
        Classify the type of consciousness transition.
        
        Args:
            from_state: Previous state
            to_state: New state
            
        Returns:
            Transition type string
        """
        # Level-based transitions
        if to_state.consciousness_level.value > from_state.consciousness_level.value:
            return "consciousness_enhancement"
        elif to_state.consciousness_level.value < from_state.consciousness_level.value:
            return "consciousness_reduction"
        
        # Attention-based transitions
        if (to_state.attention_state.attention_mode != from_state.attention_state.attention_mode):
            return "attention_shift"
        
        # Integration-based transitions
        integration_change = (to_state.global_workspace.integration_level - 
                            from_state.global_workspace.integration_level)
        if abs(integration_change) > 0.15:
            return "integration_change"
        
        return "gradual_evolution"
    
    def _calculate_transition_smoothness(self, from_state: ConsciousnessState,
                                       to_state: ConsciousnessState) -> float:
        """
        Calculate smoothness of the consciousness transition.
        
        Args:
            from_state: Previous state
            to_state: New state
            
        Returns:
            Smoothness value [0, 1]
        """
        from_vector = from_state.get_unified_vector()
        to_vector = to_state.get_unified_vector()
        
        # Smoothness as cosine similarity between states
        similarity = torch.cosine_similarity(
            from_vector.unsqueeze(0), to_vector.unsqueeze(0)
        ).item()
        
        # Convert to [0, 1] where 1 is smoothest
        smoothness = (similarity + 1.0) / 2.0
        
        return smoothness
    
    def set_figure_identity(self, figure_name: str, historical_period: str,
                           cultural_context: Dict[str, Any]):
        """
        Set the identity and context for the historical figure.
        
        Args:
            figure_name: Name of the historical figure
            historical_period: Historical period/era
            cultural_context: Cultural context information
        """
        self.current_consciousness_state.figure_identity = figure_name
        self.cultural_context.update(cultural_context)
        self.cultural_context['historical_period'] = historical_period
        
        logger.info(f"Consciousness engine configured for {figure_name} in {historical_period}")
    
    def get_consciousness_report(self) -> Dict[str, Any]:
        """
        Generate comprehensive consciousness report.
        
        Returns:
            Dictionary containing consciousness analysis
        """
        current_state = self.current_consciousness_state
        
        report = {
            'figure_identity': current_state.figure_identity,
            'timestamp': current_state.timestamp.isoformat(),
            'consciousness_level': current_state.consciousness_level.value,
            'consciousness_strength': current_state.consciousness_strength,
            
            # Core metrics
            'consciousness_signature': current_state.get_consciousness_signature(),
            'iit_signature': self.iit_calculator.get_consciousness_signature(),
            'gwt_metrics': self.gwt_engine.get_consciousness_metrics(),
            'ast_report': self.ast_engine.get_metacognitive_report(),
            'fusion_report': self.fusion_engine.get_fusion_report(),
            
            # Performance metrics
            'performance_summary': self.metrics_tracker.get_performance_summary(),
            
            # Historical context
            'historical_activation': current_state.historical_context_activation,
            'cultural_strength': current_state.cultural_framework_strength,
            'personality_influence': current_state.personality_influence,
            
            # Temporal analysis
            'recent_transitions': len(self.transition_history[-10:]) if self.transition_history else 0,
            'consciousness_stability': self._calculate_consciousness_stability()
        }
        
        return report
    
    def _calculate_consciousness_stability(self) -> float:
        """
        Calculate stability of consciousness over recent history.
        
        Returns:
            Stability measure [0, 1]
        """
        if len(self.consciousness_history) < 5:
            return 0.5  # Not enough data
        
        recent_states = self.consciousness_history[-10:]
        
        # Calculate stability based on consciousness strength variance
        strengths = [state.consciousness_strength for state in recent_states]
        stability = 1.0 - np.std(strengths) if len(strengths) > 1 else 1.0
        
        return max(0.0, min(1.0, stability))
    
    def reset_engine(self):
        """Reset the consciousness engine to initial state."""
        self.gwt_engine.reset_workspace()
        self.ast_engine.reset_engine()
        self.iit_calculator.reset_calculator()
        self.fusion_engine.reset_fusion_engine()
        
        self.current_consciousness_state = ConsciousnessState(
            unified_consciousness=torch.zeros(self.config.unified_dimension)
        )
        
        self.consciousness_history.clear()
        self.transition_history.clear()
        self.metrics_tracker = ConsciousnessMetricsTracker()
        
        logger.info("Consciousness Engine reset to initial state")
    
    def enable_real_time_processing(self, enable: bool = True):
        """
        Enable or disable real-time processing.
        
        Args:
            enable: Whether to enable real-time processing
        """
        self.processing_enabled = enable
        self.config.enable_real_time = enable
        
        logger.info(f"Real-time processing {'enabled' if enable else 'disabled'}")
    
    def get_current_state(self) -> ConsciousnessState:
        """
        Get current consciousness state.
        
        Returns:
            Current consciousness state
        """
        return self.current_consciousness_state