"""
Global Workspace Theory (GWT) Engine for Consciousness Integration.

This module implements Baars' Global Workspace Theory, where consciousness arises from
the global broadcasting of information across cognitive modules. The workspace serves
as a central hub where diverse information streams are integrated and broadcast to
create unified conscious experience.

Key Features:
- Information broadcast system across cognitive modules
- Consciousness integration: C(t) = GWT(I(t)) ∘ AST(A(t))
- Unity measurement: U = 1 - σ(C)/μ(C)
- Multi-modal consciousness fusion: C_unified = Fusion(C_visual, C_auditory, C_textual, C_emotional)
"""

import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union
from dataclasses import dataclass, field
import math
import logging

from .models import (
    GlobalWorkspaceState, 
    ModalityType, 
    ModalityState,
    ConsciousnessLevel
)

logger = logging.getLogger(__name__)


class CognitiveMod:
    """
    Represents a cognitive module that contributes to the global workspace.
    
    Each module processes specific types of information (visual, auditory, memory, etc.)
    and contributes to the global conscious experience through broadcasting.
    """
    
    def __init__(self, module_name: str, modality_type: ModalityType, 
                 dimension: int = 1024, activation_threshold: float = 0.1):
        """
        Initialize a cognitive module.
        
        Args:
            module_name: Unique identifier for the module
            modality_type: Type of sensory/cognitive modality this module handles
            dimension: Dimensionality of module representations
            activation_threshold: Minimum activation level for broadcasting
        """
        self.module_name = module_name
        self.modality_type = modality_type
        self.dimension = dimension
        self.activation_threshold = activation_threshold
        
        # Module state
        self.current_activation = torch.zeros(dimension)
        self.broadcast_strength = 0.0
        self.attention_weight = 0.0
        
        # Processing parameters
        self.processing_capacity = 1.0
        self.interference_resistance = 0.8
        self.adaptation_rate = 0.1
        
        # History tracking
        self.activation_history = []
        self.broadcast_history = []
        
        logger.debug(f"Initialized cognitive module '{module_name}' for {modality_type.value}")
    
    def process_input(self, input_data: torch.Tensor, attention_weight: float = 1.0) -> torch.Tensor:
        """
        Process input data through this cognitive module.
        
        Args:
            input_data: Input tensor to process
            attention_weight: Attention weight from attention system
            
        Returns:
            Processed activation tensor
        """
        if input_data.shape[-1] != self.dimension:
            # Adapt input to module dimension
            if input_data.shape[-1] > self.dimension:
                input_data = input_data[..., :self.dimension]
            else:
                padding = torch.zeros(*input_data.shape[:-1], self.dimension - input_data.shape[-1])
                input_data = torch.cat([input_data, padding], dim=-1)
        
        # Apply module-specific processing
        processed = self._apply_module_transform(input_data)
        
        # Apply attention modulation
        self.attention_weight = attention_weight
        processed = processed * attention_weight
        
        # Update module state
        self.current_activation = processed
        self.broadcast_strength = self._compute_broadcast_strength()
        
        # Update history
        self.activation_history.append(processed.clone())
        self.broadcast_history.append(self.broadcast_strength)
        
        # Limit history size
        if len(self.activation_history) > 100:
            self.activation_history.pop(0)
            self.broadcast_history.pop(0)
        
        return processed
    
    def _apply_module_transform(self, input_data: torch.Tensor) -> torch.Tensor:
        """
        Apply module-specific transformation to input data.
        
        Args:
            input_data: Input tensor
            
        Returns:
            Transformed tensor
        """
        # Modality-specific processing
        if self.modality_type == ModalityType.VISUAL:
            # Visual processing with spatial attention
            return torch.tanh(input_data * 1.2 + torch.randn_like(input_data) * 0.05)
        
        elif self.modality_type == ModalityType.AUDITORY:
            # Auditory processing with temporal dynamics
            return torch.sigmoid(input_data * 0.8 + torch.randn_like(input_data) * 0.03)
        
        elif self.modality_type == ModalityType.TEXTUAL:
            # Textual processing with semantic enhancement
            return torch.relu(input_data * 1.1 + torch.randn_like(input_data) * 0.02)
        
        elif self.modality_type == ModalityType.EMOTIONAL:
            # Emotional processing with enhanced salience
            return torch.softmax(input_data * 2.0, dim=-1)
        
        elif self.modality_type == ModalityType.MEMORY:
            # Memory processing with decay and reinforcement
            decay_factor = 0.95
            return input_data * decay_factor + torch.randn_like(input_data) * 0.01
        
        elif self.modality_type == ModalityType.REASONING:
            # Reasoning processing with logical enhancement
            return torch.clamp(input_data * 0.9, -2.0, 2.0)
        
        else:
            # Default processing
            return torch.tanh(input_data)
    
    def _compute_broadcast_strength(self) -> float:
        """
        Compute the strength of this module's contribution to global broadcast.
        
        Returns:
            Broadcast strength value [0, 1]
        """
        # Compute activation magnitude
        activation_magnitude = torch.norm(self.current_activation).item()
        
        # Apply threshold
        if activation_magnitude < self.activation_threshold:
            return 0.0
        
        # Normalize by processing capacity
        strength = min(activation_magnitude / self.processing_capacity, 1.0)
        
        # Apply attention modulation
        strength *= self.attention_weight
        
        # Add temporal consistency bonus
        if len(self.activation_history) > 1:
            prev_activation = self.activation_history[-2]
            consistency = torch.cosine_similarity(
                self.current_activation.unsqueeze(0),
                prev_activation.unsqueeze(0)
            ).item()
            strength *= (1.0 + consistency * 0.2)
        
        return max(0.0, min(strength, 1.0))
    
    def get_contribution(self) -> Tuple[torch.Tensor, float]:
        """
        Get this module's contribution to the global workspace.
        
        Returns:
            Tuple of (activation_vector, broadcast_strength)
        """
        return self.current_activation.clone(), self.broadcast_strength
    
    def reset(self):
        """Reset module to initial state."""
        self.current_activation.zero_()
        self.broadcast_strength = 0.0
        self.attention_weight = 0.0
        self.activation_history.clear()
        self.broadcast_history.clear()


class GlobalWorkspaceEngine:
    """
    Global Workspace Theory engine for consciousness integration.
    
    This engine implements the core mechanisms of Global Workspace Theory:
    1. Information broadcasting from cognitive modules
    2. Competition and coalition formation
    3. Global integration and unity measurement
    4. Multi-modal consciousness fusion
    """
    
    def __init__(self, workspace_dimension: int = 1024, num_modules: int = 8,
                 integration_threshold: float = 0.3, unity_alpha: float = 0.1):
        """
        Initialize the Global Workspace Engine.
        
        Args:
            workspace_dimension: Dimension of the global workspace
            num_modules: Number of cognitive modules
            integration_threshold: Threshold for information integration
            unity_alpha: Alpha parameter for unity calculation
        """
        self.workspace_dimension = workspace_dimension
        self.num_modules = num_modules
        self.integration_threshold = integration_threshold
        self.unity_alpha = unity_alpha
        
        # Initialize cognitive modules
        self.cognitive_modules = self._initialize_modules()
        
        # Workspace state
        self.workspace_state = GlobalWorkspaceState(
            workspace_content=torch.zeros(workspace_dimension)
        )
        
        # Integration parameters
        self.coalition_threshold = 0.5
        self.broadcast_decay = 0.95
        self.cross_modal_weight = 0.3
        
        # Performance tracking
        self.integration_history = []
        self.unity_history = []
        self.broadcast_events = []
        
        logger.info(f"Global Workspace Engine initialized with {num_modules} modules")
    
    def _initialize_modules(self) -> Dict[str, CognitiveMod]:
        """
        Initialize the cognitive modules for different modalities.
        
        Returns:
            Dictionary of cognitive modules
        """
        modules = {}
        modalities = [
            ModalityType.VISUAL, ModalityType.AUDITORY, ModalityType.TEXTUAL,
            ModalityType.EMOTIONAL, ModalityType.MEMORY, ModalityType.REASONING,
            ModalityType.LINGUISTIC
        ]
        
        for i, modality in enumerate(modalities[:self.num_modules]):
            module_name = f"{modality.value}_module"
            modules[module_name] = CognitiveMod(
                module_name=module_name,
                modality_type=modality,
                dimension=self.workspace_dimension
            )
        
        return modules
    
    def process_multimodal_input(self, inputs: Dict[str, torch.Tensor], 
                                attention_weights: Optional[Dict[str, float]] = None) -> GlobalWorkspaceState:
        """
        Process multi-modal inputs through the global workspace.
        
        Args:
            inputs: Dictionary mapping module names to input tensors
            attention_weights: Optional attention weights for each module
            
        Returns:
            Updated global workspace state
        """
        if attention_weights is None:
            attention_weights = {name: 1.0 for name in inputs.keys()}
        
        # Process inputs through respective modules
        module_contributions = {}
        active_modules = []
        
        for module_name, input_data in inputs.items():
            if module_name in self.cognitive_modules:
                module = self.cognitive_modules[module_name]
                attention_weight = attention_weights.get(module_name, 1.0)
                
                # Process input
                processed_output = module.process_input(input_data, attention_weight)
                contribution, broadcast_strength = module.get_contribution()
                
                # Store contributions above threshold
                if broadcast_strength > self.integration_threshold:
                    module_contributions[module_name] = contribution
                    active_modules.append(module_name)
        
        # Perform global integration
        integrated_content = self._perform_global_integration(module_contributions)
        
        # Calculate consciousness metrics
        unity_index = self._calculate_unity_index(module_contributions)
        integration_level = self._calculate_integration_level(module_contributions)
        broadcast_strength = self._calculate_overall_broadcast_strength()
        
        # Update workspace state
        self.workspace_state = GlobalWorkspaceState(
            workspace_content=integrated_content,
            broadcast_strength=broadcast_strength,
            integration_level=integration_level,
            unity_index=unity_index,
            module_contributions=module_contributions,
            active_modules=active_modules,
            previous_state=self.workspace_state.workspace_content.clone(),
            information_flow=self._calculate_information_flow(module_contributions),
            cross_module_binding=self._calculate_cross_modal_binding(module_contributions)
        )
        
        # Update history
        self.workspace_state.state_history.append(integrated_content.clone())
        self.integration_history.append(integration_level)
        self.unity_history.append(unity_index)
        
        # Limit history size
        if len(self.workspace_state.state_history) > 100:
            self.workspace_state.state_history.pop(0)
            self.integration_history.pop(0)
            self.unity_history.pop(0)
        
        return self.workspace_state
    
    def _perform_global_integration(self, contributions: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        Perform global integration of module contributions.
        
        Args:
            contributions: Dictionary of module contributions
            
        Returns:
            Integrated workspace content
        """
        if not contributions:
            return torch.zeros(self.workspace_dimension)
        
        # Weighted sum of contributions
        integrated = torch.zeros(self.workspace_dimension)
        total_weight = 0.0
        
        for module_name, contribution in contributions.items():
            # Get module broadcast strength as weight
            module = self.cognitive_modules[module_name]
            weight = module.broadcast_strength
            
            integrated += contribution * weight
            total_weight += weight
        
        # Normalize by total weight
        if total_weight > 0:
            integrated = integrated / total_weight
        
        # Apply cross-modal binding enhancement
        integrated = self._apply_cross_modal_binding(integrated, contributions)
        
        # Apply temporal smoothing with previous state
        if hasattr(self.workspace_state, 'workspace_content'):
            prev_content = self.workspace_state.workspace_content
            smoothing_factor = 0.2
            integrated = (1 - smoothing_factor) * integrated + smoothing_factor * prev_content
        
        return integrated
    
    def _apply_cross_modal_binding(self, integrated: torch.Tensor, 
                                 contributions: Dict[str, torch.Tensor]) -> torch.Tensor:
        """
        Apply cross-modal binding to enhance integration.
        
        Args:
            integrated: Current integrated content
            contributions: Module contributions
            
        Returns:
            Enhanced integrated content
        """
        if len(contributions) < 2:
            return integrated
        
        # Calculate cross-modal correlations
        binding_enhancement = torch.zeros_like(integrated)
        contribution_list = list(contributions.values())
        
        for i, contrib_i in enumerate(contribution_list):
            for j, contrib_j in enumerate(contribution_list[i+1:], i+1):
                # Compute binding strength
                correlation = torch.cosine_similarity(
                    contrib_i.unsqueeze(0), contrib_j.unsqueeze(0)
                ).item()
                
                if correlation > 0.3:  # Significant correlation
                    binding_vector = contrib_i * contrib_j  # Element-wise product
                    binding_enhancement += binding_vector * correlation
        
        # Apply binding enhancement
        num_pairs = len(contribution_list) * (len(contribution_list) - 1) / 2
        if num_pairs > 0:
            binding_enhancement = binding_enhancement / num_pairs
            integrated = integrated + self.cross_modal_weight * binding_enhancement
        
        return integrated
    
    def _calculate_unity_index(self, contributions: Dict[str, torch.Tensor]) -> float:
        """
        Calculate consciousness unity index: U = 1 - σ(C)/μ(C)
        
        Args:
            contributions: Module contributions
            
        Returns:
            Unity index [0, 1]
        """
        if not contributions:
            return 0.0
        
        # Calculate contribution magnitudes
        magnitudes = []
        for contribution in contributions.values():
            magnitude = torch.norm(contribution).item()
            magnitudes.append(magnitude)
        
        magnitudes = np.array(magnitudes)
        
        # Calculate unity as 1 - coefficient_of_variation
        if len(magnitudes) < 2:
            return 1.0
        
        mean_magnitude = np.mean(magnitudes)
        std_magnitude = np.std(magnitudes)
        
        if mean_magnitude == 0:
            return 1.0 if std_magnitude == 0 else 0.0
        
        coefficient_of_variation = std_magnitude / mean_magnitude
        unity_index = max(0.0, 1.0 - coefficient_of_variation)
        
        # Apply alpha parameter for sensitivity adjustment
        unity_index = unity_index ** self.unity_alpha
        
        return unity_index
    
    def _calculate_integration_level(self, contributions: Dict[str, torch.Tensor]) -> float:
        """
        Calculate the level of information integration.
        
        Args:
            contributions: Module contributions
            
        Returns:
            Integration level [0, 1]
        """
        if len(contributions) < 2:
            return 0.0
        
        # Calculate average pairwise correlation
        correlations = []
        contribution_list = list(contributions.values())
        
        for i, contrib_i in enumerate(contribution_list):
            for j, contrib_j in enumerate(contribution_list[i+1:], i+1):
                correlation = torch.cosine_similarity(
                    contrib_i.unsqueeze(0), contrib_j.unsqueeze(0)
                ).item()
                correlations.append(abs(correlation))
        
        if not correlations:
            return 0.0
        
        # Integration level as average absolute correlation
        integration_level = np.mean(correlations)
        
        # Apply threshold-based enhancement
        if integration_level > self.integration_threshold:
            integration_level = min(1.0, integration_level * 1.2)
        
        return integration_level
    
    def _calculate_overall_broadcast_strength(self) -> float:
        """
        Calculate overall broadcast strength across all modules.
        
        Returns:
            Overall broadcast strength [0, 1]
        """
        total_strength = 0.0
        active_modules = 0
        
        for module in self.cognitive_modules.values():
            if module.broadcast_strength > 0:
                total_strength += module.broadcast_strength
                active_modules += 1
        
        if active_modules == 0:
            return 0.0
        
        # Average strength with activation bonus
        avg_strength = total_strength / active_modules
        activation_bonus = min(active_modules / self.num_modules, 1.0) * 0.2
        
        return min(1.0, avg_strength + activation_bonus)
    
    def _calculate_information_flow(self, contributions: Dict[str, torch.Tensor]) -> Dict[str, float]:
        """
        Calculate information flow metrics for each module.
        
        Args:
            contributions: Module contributions
            
        Returns:
            Dictionary of information flow values
        """
        flow_metrics = {}
        
        for module_name, contribution in contributions.items():
            module = self.cognitive_modules[module_name]
            
            # Information content as entropy-like measure
            activation_probs = torch.softmax(torch.abs(contribution), dim=0)
            entropy = -torch.sum(activation_probs * torch.log(activation_probs + 1e-10)).item()
            
            # Normalize by theoretical maximum
            max_entropy = math.log(self.workspace_dimension)
            normalized_entropy = entropy / max_entropy
            
            # Flow as combination of entropy and broadcast strength
            flow_metrics[module_name] = normalized_entropy * module.broadcast_strength
        
        return flow_metrics
    
    def _calculate_cross_modal_binding(self, contributions: Dict[str, torch.Tensor]) -> Dict[Tuple[str, str], float]:
        """
        Calculate cross-modal binding strengths.
        
        Args:
            contributions: Module contributions
            
        Returns:
            Dictionary of binding strengths between module pairs
        """
        binding_strengths = {}
        module_names = list(contributions.keys())
        
        for i, module_i in enumerate(module_names):
            for j, module_j in enumerate(module_names[i+1:], i+1):
                contrib_i = contributions[module_i]
                contrib_j = contributions[module_j]
                
                # Calculate binding strength as correlation
                correlation = torch.cosine_similarity(
                    contrib_i.unsqueeze(0), contrib_j.unsqueeze(0)
                ).item()
                
                # Enhanced by both modules' broadcast strengths
                module_i_strength = self.cognitive_modules[module_i].broadcast_strength
                module_j_strength = self.cognitive_modules[module_j].broadcast_strength
                
                binding_strength = abs(correlation) * np.sqrt(module_i_strength * module_j_strength)
                binding_strengths[(module_i, module_j)] = binding_strength
        
        return binding_strengths
    
    def compute_consciousness_integration(self, information_input: torch.Tensor,
                                        attention_state: torch.Tensor) -> torch.Tensor:
        """
        Compute consciousness integration: C(t) = GWT(I(t)) ∘ AST(A(t))
        
        Args:
            information_input: Information input tensor I(t)
            attention_state: Attention state tensor A(t)
            
        Returns:
            Integrated consciousness tensor C(t)
        """
        # Distribute information input across modules
        module_inputs = {}
        input_per_module = information_input.shape[-1] // len(self.cognitive_modules)
        
        start_idx = 0
        for module_name in self.cognitive_modules.keys():
            end_idx = start_idx + input_per_module
            if end_idx > information_input.shape[-1]:
                end_idx = information_input.shape[-1]
            
            module_inputs[module_name] = information_input[..., start_idx:end_idx]
            start_idx = end_idx
        
        # Apply attention modulation
        attention_weights = self._compute_attention_weights(attention_state)
        
        # Process through global workspace
        workspace_state = self.process_multimodal_input(module_inputs, attention_weights)
        
        # Integration operation (composition of GWT and AST)
        gwt_output = workspace_state.workspace_content
        ast_modulation = self._apply_attention_modulation(gwt_output, attention_state)
        
        # Compose GWT and AST: C(t) = GWT(I(t)) ∘ AST(A(t))
        consciousness_state = gwt_output * ast_modulation
        
        return consciousness_state
    
    def _compute_attention_weights(self, attention_state: torch.Tensor) -> Dict[str, float]:
        """
        Compute attention weights for each module from attention state.
        
        Args:
            attention_state: Attention state tensor
            
        Returns:
            Dictionary of attention weights
        """
        # Distribute attention state across modules
        num_modules = len(self.cognitive_modules)
        attention_per_module = attention_state.shape[-1] // num_modules
        
        weights = {}
        start_idx = 0
        
        for module_name in self.cognitive_modules.keys():
            end_idx = start_idx + attention_per_module
            if end_idx > attention_state.shape[-1]:
                end_idx = attention_state.shape[-1]
            
            # Attention weight as norm of assigned attention segment
            module_attention = attention_state[..., start_idx:end_idx]
            weight = torch.norm(module_attention).item()
            
            # Normalize to [0, 1]
            weights[module_name] = max(0.0, min(weight, 1.0))
            start_idx = end_idx
        
        return weights
    
    def _apply_attention_modulation(self, workspace_content: torch.Tensor,
                                  attention_state: torch.Tensor) -> torch.Tensor:
        """
        Apply attention schema theory modulation to workspace content.
        
        Args:
            workspace_content: Current workspace content
            attention_state: Attention state for modulation
            
        Returns:
            Attention-modulated tensor
        """
        # Ensure compatibility
        if attention_state.shape[-1] != workspace_content.shape[-1]:
            if attention_state.shape[-1] > workspace_content.shape[-1]:
                attention_state = attention_state[..., :workspace_content.shape[-1]]
            else:
                padding = torch.zeros(*attention_state.shape[:-1], 
                                    workspace_content.shape[-1] - attention_state.shape[-1])
                attention_state = torch.cat([attention_state, padding], dim=-1)
        
        # Apply gating based on attention strength
        attention_gates = torch.sigmoid(attention_state * 2.0)
        modulation = attention_gates * (1.0 + torch.tanh(attention_state))
        
        return modulation
    
    def compute_multimodal_fusion(self, visual_consciousness: torch.Tensor,
                                auditory_consciousness: torch.Tensor,
                                textual_consciousness: torch.Tensor,
                                emotional_consciousness: torch.Tensor) -> torch.Tensor:
        """
        Compute multi-modal consciousness fusion:
        C_unified = Fusion(C_visual, C_auditory, C_textual, C_emotional)
        
        Args:
            visual_consciousness: Visual consciousness tensor
            auditory_consciousness: Auditory consciousness tensor
            textual_consciousness: Textual consciousness tensor  
            emotional_consciousness: Emotional consciousness tensor
            
        Returns:
            Unified multi-modal consciousness tensor
        """
        # Collect all consciousness modalities
        modalities = {
            'visual': visual_consciousness,
            'auditory': auditory_consciousness,
            'textual': textual_consciousness,
            'emotional': emotional_consciousness
        }
        
        # Ensure all tensors have same dimension
        target_dim = self.workspace_dimension
        normalized_modalities = {}
        
        for name, consciousness in modalities.items():
            if consciousness.shape[-1] != target_dim:
                if consciousness.shape[-1] > target_dim:
                    normalized_modalities[name] = consciousness[..., :target_dim]
                else:
                    padding = torch.zeros(*consciousness.shape[:-1], 
                                        target_dim - consciousness.shape[-1])
                    normalized_modalities[name] = torch.cat([consciousness, padding], dim=-1)
            else:
                normalized_modalities[name] = consciousness
        
        # Calculate fusion weights based on consciousness strength
        fusion_weights = {}
        total_strength = 0.0
        
        for name, consciousness in normalized_modalities.items():
            strength = torch.norm(consciousness).item()
            fusion_weights[name] = strength
            total_strength += strength
        
        # Normalize weights
        if total_strength > 0:
            for name in fusion_weights:
                fusion_weights[name] /= total_strength
        else:
            # Equal weights if no consciousness detected
            uniform_weight = 1.0 / len(fusion_weights)
            for name in fusion_weights:
                fusion_weights[name] = uniform_weight
        
        # Perform weighted fusion
        unified_consciousness = torch.zeros(target_dim)
        
        for name, consciousness in normalized_modalities.items():
            weight = fusion_weights[name]
            unified_consciousness += weight * consciousness
        
        # Apply cross-modal binding enhancement
        unified_consciousness = self._apply_cross_modal_binding(
            unified_consciousness, normalized_modalities
        )
        
        # Apply global coherence enhancement
        coherence_factor = self._calculate_global_coherence(normalized_modalities)
        unified_consciousness = unified_consciousness * (1.0 + coherence_factor * 0.3)
        
        return unified_consciousness
    
    def _calculate_global_coherence(self, modalities: Dict[str, torch.Tensor]) -> float:
        """
        Calculate global coherence across modalities.
        
        Args:
            modalities: Dictionary of modality consciousness tensors
            
        Returns:
            Global coherence value [0, 1]
        """
        if len(modalities) < 2:
            return 0.0
        
        # Calculate pairwise correlations
        correlations = []
        modality_list = list(modalities.values())
        
        for i, mod_i in enumerate(modality_list):
            for j, mod_j in enumerate(modality_list[i+1:], i+1):
                correlation = torch.cosine_similarity(
                    mod_i.unsqueeze(0), mod_j.unsqueeze(0)
                ).item()
                correlations.append(abs(correlation))
        
        # Global coherence as average correlation
        return np.mean(correlations) if correlations else 0.0
    
    def get_consciousness_metrics(self) -> Dict[str, float]:
        """
        Get comprehensive consciousness metrics from the global workspace.
        
        Returns:
            Dictionary of consciousness metrics
        """
        metrics = {
            'unity_index': self.workspace_state.unity_index,
            'integration_level': self.workspace_state.integration_level,
            'broadcast_strength': self.workspace_state.broadcast_strength,
            'active_modules': len(self.workspace_state.active_modules),
            'total_modules': len(self.cognitive_modules),
        }
        
        # Add temporal metrics
        if len(self.unity_history) > 1:
            metrics['unity_stability'] = 1.0 - np.std(self.unity_history[-10:])
            metrics['integration_stability'] = 1.0 - np.std(self.integration_history[-10:])
        
        # Add information flow metrics
        if self.workspace_state.information_flow:
            metrics['avg_information_flow'] = np.mean(list(self.workspace_state.information_flow.values()))
            metrics['max_information_flow'] = max(self.workspace_state.information_flow.values())
        
        # Add cross-modal binding metrics
        if self.workspace_state.cross_module_binding:
            binding_values = list(self.workspace_state.cross_module_binding.values())
            metrics['avg_cross_modal_binding'] = np.mean(binding_values)
            metrics['max_cross_modal_binding'] = max(binding_values)
        
        return metrics
    
    def reset_workspace(self):
        """Reset the global workspace to initial state."""
        self.workspace_state = GlobalWorkspaceState(
            workspace_content=torch.zeros(self.workspace_dimension)
        )
        
        # Reset all modules
        for module in self.cognitive_modules.values():
            module.reset()
        
        # Clear histories
        self.integration_history.clear()
        self.unity_history.clear()
        self.broadcast_events.clear()
        
        logger.info("Global Workspace Engine reset to initial state")