"""
Multi-Modal Consciousness Fusion Engine.

This module implements the fusion system for combining different modalities of 
consciousness (visual, auditory, textual, emotional) into a unified conscious 
experience. It provides cross-modal binding mechanisms and cultural consciousness 
embedding for historical period authenticity.

Key Features:
- Multi-modal consciousness fusion: C_unified = Fusion(C_visual, C_auditory, C_textual, C_emotional)
- Cross-modal binding: C_bound = (M₁ ⊗ C₁) + (M₂ ⊗ C₂) + ... + (Mₙ ⊗ Cₙ)
- Cultural consciousness embedding with historical period values
- Adaptive fusion weights based on modality salience and context
- Temporal consistency maintenance across fusion operations
"""

import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union
from dataclasses import dataclass, field
import math
import logging
from datetime import datetime
from enum import Enum

from .models import (
    ConsciousnessState,
    ModalityType,
    ModalityState,
    ConsciousnessLevel,
    GlobalWorkspaceState,
    AttentionState,
    IITMetrics
)

logger = logging.getLogger(__name__)


class FusionStrategy(Enum):
    """Different strategies for consciousness fusion."""
    WEIGHTED_AVERAGE = "weighted_average"
    ATTENTION_MODULATED = "attention_modulated"
    HIERARCHICAL = "hierarchical"
    COMPETITIVE = "competitive"
    COOPERATIVE = "cooperative"


@dataclass
class FusionWeights:
    """
    Represents fusion weights for different modalities and contexts.
    """
    modality_weights: Dict[ModalityType, float] = field(default_factory=dict)
    attention_weights: Dict[str, float] = field(default_factory=dict)
    cultural_weight: float = 0.0
    temporal_weight: float = 0.0
    context_weight: float = 0.0
    
    def normalize_weights(self):
        """Normalize all weights to sum to 1.0."""
        # Normalize modality weights
        total_modality = sum(self.modality_weights.values())
        if total_modality > 0:
            for modality in self.modality_weights:
                self.modality_weights[modality] /= total_modality
        
        # Normalize attention weights  
        total_attention = sum(self.attention_weights.values())
        if total_attention > 0:
            for attention_type in self.attention_weights:
                self.attention_weights[attention_type] /= total_attention
        
        # Clamp other weights to [0, 1]
        self.cultural_weight = max(0.0, min(1.0, self.cultural_weight))
        self.temporal_weight = max(0.0, min(1.0, self.temporal_weight))
        self.context_weight = max(0.0, min(1.0, self.context_weight))


@dataclass 
class CulturalEmbedding:
    """
    Represents cultural consciousness embedding for historical authenticity.
    """
    historical_period: str = ""
    cultural_values: Dict[str, float] = field(default_factory=dict)
    social_norms: Dict[str, float] = field(default_factory=dict)
    worldview_context: Dict[str, float] = field(default_factory=dict)
    language_patterns: Dict[str, float] = field(default_factory=dict)
    embedding_vector: torch.Tensor = field(default_factory=lambda: torch.zeros(128))
    embedding_strength: float = 0.0
    
    def encode_cultural_context(self, context: Dict[str, Any]) -> torch.Tensor:
        """
        Encode cultural context into embedding vector.
        
        Args:
            context: Cultural context dictionary
            
        Returns:
            Cultural embedding tensor
        """
        embedding = torch.zeros(128)
        
        # Historical period encoding (first 32 dimensions)
        if 'historical_period' in context:
            period = context['historical_period']
            period_hash = hash(period) % 32
            embedding[period_hash] = 1.0
            
            # Add period-specific patterns
            if 'ancient' in period.lower():
                embedding[:8] += torch.randn(8) * 0.3
            elif 'medieval' in period.lower():
                embedding[8:16] += torch.randn(8) * 0.3
            elif 'renaissance' in period.lower():
                embedding[16:24] += torch.randn(8) * 0.3
            elif 'modern' in period.lower():
                embedding[24:32] += torch.randn(8) * 0.3
        
        # Cultural values encoding (next 32 dimensions)
        if 'cultural_values' in context:
            values = context['cultural_values']
            for i, (value_name, strength) in enumerate(values.items()):
                if i < 32:
                    embedding[32 + i] = strength
        
        # Social norms encoding (next 32 dimensions)
        if 'social_norms' in context:
            norms = context['social_norms']
            for i, (norm_name, strength) in enumerate(norms.items()):
                if i < 32:
                    embedding[64 + i] = strength
        
        # Worldview encoding (remaining 32 dimensions)
        if 'worldview' in context:
            worldview = context['worldview']
            for i, (aspect, strength) in enumerate(worldview.items()):
                if i < 32:
                    embedding[96 + i] = strength
        
        self.embedding_vector = embedding
        self.embedding_strength = torch.norm(embedding).item()
        
        return embedding


class CrossModalBinder:
    """
    Implements cross-modal binding mechanisms for consciousness fusion.
    
    Cross-modal binding creates coherent unified experiences by linking 
    related information across different sensory and cognitive modalities.
    """
    
    def __init__(self, binding_dimension: int = 512, binding_threshold: float = 0.3):
        """
        Initialize cross-modal binder.
        
        Args:
            binding_dimension: Dimension for binding operations
            binding_threshold: Minimum correlation for binding
        """
        self.binding_dimension = binding_dimension
        self.binding_threshold = binding_threshold
        
        # Binding networks for different modality pairs
        self.binding_networks = self._build_binding_networks()
        
        # Binding state tracking
        self.active_bindings = {}
        self.binding_strengths = {}
        self.binding_history = []
        
        logger.debug(f"Cross-modal binder initialized with dimension {binding_dimension}")
    
    def _build_binding_networks(self) -> Dict[Tuple[ModalityType, ModalityType], torch.nn.Module]:
        """
        Build neural networks for cross-modal binding.
        
        Returns:
            Dictionary of binding networks for modality pairs
        """
        networks = {}
        modality_pairs = [
            (ModalityType.VISUAL, ModalityType.AUDITORY),
            (ModalityType.VISUAL, ModalityType.TEXTUAL),
            (ModalityType.VISUAL, ModalityType.EMOTIONAL),
            (ModalityType.AUDITORY, ModalityType.TEXTUAL),
            (ModalityType.AUDITORY, ModalityType.EMOTIONAL),
            (ModalityType.TEXTUAL, ModalityType.EMOTIONAL),
            (ModalityType.MEMORY, ModalityType.VISUAL),
            (ModalityType.MEMORY, ModalityType.TEXTUAL),
            (ModalityType.REASONING, ModalityType.TEXTUAL),
            (ModalityType.LINGUISTIC, ModalityType.TEXTUAL)
        ]
        
        for modality1, modality2 in modality_pairs:
            # Create binding network for this pair
            network = torch.nn.Sequential(
                torch.nn.Linear(self.binding_dimension * 2, 256),
                torch.nn.ReLU(),
                torch.nn.Linear(256, 128),
                torch.nn.ReLU(),
                torch.nn.Linear(128, self.binding_dimension),
                torch.nn.Tanh()
            )
            networks[(modality1, modality2)] = network
        
        return networks
    
    def compute_cross_modal_binding(self, modality_states: Dict[ModalityType, ModalityState]) -> torch.Tensor:
        """
        Compute cross-modal binding: C_bound = (M₁ ⊗ C₁) + (M₂ ⊗ C₂) + ... + (Mₙ ⊗ Cₙ)
        
        Args:
            modality_states: Dictionary of modality states
            
        Returns:
            Cross-modal bound consciousness tensor
        """
        if len(modality_states) < 2:
            # Need at least 2 modalities for binding
            if modality_states:
                state = list(modality_states.values())[0]
                return self._adapt_dimension(state.activation)
            else:
                return torch.zeros(self.binding_dimension)
        
        bound_consciousness = torch.zeros(self.binding_dimension)
        binding_contributions = []
        total_binding_strength = 0.0
        
        # Process all modality pairs
        modality_items = list(modality_states.items())
        for i, (mod1_type, mod1_state) in enumerate(modality_items):
            for j, (mod2_type, mod2_state) in enumerate(modality_items[i+1:], i+1):
                
                # Check if we have a binding network for this pair
                pair_key = (mod1_type, mod2_type)
                reverse_pair_key = (mod2_type, mod1_type)
                
                binding_network = None
                if pair_key in self.binding_networks:
                    binding_network = self.binding_networks[pair_key]
                elif reverse_pair_key in self.binding_networks:
                    binding_network = self.binding_networks[reverse_pair_key]
                
                if binding_network is not None:
                    # Compute binding for this pair
                    binding_result, binding_strength = self._compute_pair_binding(
                        mod1_state, mod2_state, binding_network
                    )
                    
                    if binding_strength > self.binding_threshold:
                        binding_contributions.append(binding_result * binding_strength)
                        total_binding_strength += binding_strength
                        
                        # Track this binding
                        self.active_bindings[pair_key] = binding_strength
                        self.binding_strengths[pair_key] = binding_strength
        
        # Combine binding contributions
        if binding_contributions and total_binding_strength > 0:
            for contribution in binding_contributions:
                bound_consciousness += contribution
            
            # Normalize by total binding strength
            bound_consciousness = bound_consciousness / total_binding_strength
        
        # Apply cross-modal enhancement
        bound_consciousness = self._apply_cross_modal_enhancement(bound_consciousness, modality_states)
        
        # Update binding history
        self.binding_history.append(bound_consciousness.clone())
        if len(self.binding_history) > 100:
            self.binding_history.pop(0)
        
        return bound_consciousness
    
    def _compute_pair_binding(self, state1: ModalityState, state2: ModalityState, 
                            binding_network: torch.nn.Module) -> Tuple[torch.Tensor, float]:
        """
        Compute binding between two modality states.
        
        Args:
            state1: First modality state
            state2: Second modality state  
            binding_network: Binding network for this pair
            
        Returns:
            Tuple of (binding_result, binding_strength)
        """
        # Adapt dimensions
        act1_adapted = self._adapt_dimension(state1.activation)
        act2_adapted = self._adapt_dimension(state2.activation)
        
        # Compute correlation as initial binding strength
        correlation = torch.cosine_similarity(
            act1_adapted.unsqueeze(0), act2_adapted.unsqueeze(0)
        ).item()
        
        base_binding_strength = abs(correlation)
        
        # Apply confidence modulation
        confidence_modulation = math.sqrt(state1.confidence * state2.confidence)
        binding_strength = base_binding_strength * confidence_modulation
        
        if binding_strength > self.binding_threshold:
            # Apply binding network
            combined_input = torch.cat([act1_adapted, act2_adapted])
            
            with torch.no_grad():
                binding_result = binding_network(combined_input)
            
            # Modulate by cross-modal binding strengths if available
            if state1.cross_modal_bindings and state2.modality_type in state1.cross_modal_bindings:
                existing_binding = state1.cross_modal_bindings[state2.modality_type]
                binding_strength = binding_strength * (1.0 + existing_binding * 0.5)
            
            return binding_result, binding_strength
        else:
            # No significant binding
            return torch.zeros(self.binding_dimension), 0.0
    
    def _apply_cross_modal_enhancement(self, bound_consciousness: torch.Tensor,
                                     modality_states: Dict[ModalityType, ModalityState]) -> torch.Tensor:
        """
        Apply cross-modal enhancement to bound consciousness.
        
        Args:
            bound_consciousness: Base bound consciousness
            modality_states: Modality states for context
            
        Returns:
            Enhanced bound consciousness
        """
        if len(modality_states) < 2:
            return bound_consciousness
        
        # Calculate overall modality coherence
        coherence_bonus = 0.0
        modality_activations = [self._adapt_dimension(state.activation) 
                              for state in modality_states.values()]
        
        # Pairwise coherence calculation
        coherence_pairs = 0
        for i in range(len(modality_activations)):
            for j in range(i + 1, len(modality_activations)):
                coherence = torch.cosine_similarity(
                    modality_activations[i].unsqueeze(0),
                    modality_activations[j].unsqueeze(0)
                ).item()
                coherence_bonus += abs(coherence)
                coherence_pairs += 1
        
        if coherence_pairs > 0:
            avg_coherence = coherence_bonus / coherence_pairs
            enhancement_factor = 1.0 + avg_coherence * 0.3
            bound_consciousness = bound_consciousness * enhancement_factor
        
        return bound_consciousness
    
    def _adapt_dimension(self, tensor: torch.Tensor) -> torch.Tensor:
        """Adapt tensor to binding dimension."""
        if tensor.shape[-1] == self.binding_dimension:
            return tensor
        elif tensor.shape[-1] > self.binding_dimension:
            return tensor[..., :self.binding_dimension]
        else:
            padding = torch.zeros(*tensor.shape[:-1], 
                                self.binding_dimension - tensor.shape[-1])
            return torch.cat([tensor, padding], dim=-1)
    
    def get_binding_report(self) -> Dict[str, Any]:
        """
        Get report on current cross-modal bindings.
        
        Returns:
            Dictionary with binding information
        """
        return {
            'active_bindings': len(self.active_bindings),
            'binding_pairs': list(self.active_bindings.keys()),
            'binding_strengths': dict(self.binding_strengths),
            'avg_binding_strength': np.mean(list(self.binding_strengths.values())) if self.binding_strengths else 0.0,
            'max_binding_strength': max(self.binding_strengths.values()) if self.binding_strengths else 0.0,
            'binding_stability': 1.0 - np.std([torch.norm(state).item() for state in self.binding_history[-10:]]) if len(self.binding_history) >= 5 else 0.5
        }


class ConsciousnessFusionEngine:
    """
    Main consciousness fusion engine for multi-modal consciousness integration.
    
    This engine implements the fusion mechanisms to combine different modalities 
    of consciousness (visual, auditory, textual, emotional) with cultural context
    into a unified conscious experience suitable for historical figure simulation.
    """
    
    def __init__(self, fusion_dimension: int = 2048, 
                 cultural_embedding_dim: int = 128,
                 fusion_strategy: FusionStrategy = FusionStrategy.ATTENTION_MODULATED):
        """
        Initialize consciousness fusion engine.
        
        Args:
            fusion_dimension: Dimension of fused consciousness representations
            cultural_embedding_dim: Dimension of cultural embeddings
            fusion_strategy: Strategy for fusion operations
        """
        self.fusion_dimension = fusion_dimension
        self.cultural_embedding_dim = cultural_embedding_dim
        self.fusion_strategy = fusion_strategy
        
        # Core fusion components
        self.cross_modal_binder = CrossModalBinder(
            binding_dimension=fusion_dimension // 4,
            binding_threshold=0.3
        )
        
        # Fusion networks
        self.modality_fusion_network = self._build_modality_fusion_network()
        self.cultural_integration_network = self._build_cultural_integration_network()
        self.temporal_consistency_network = self._build_temporal_consistency_network()
        
        # Current fusion state
        self.current_fusion_weights = FusionWeights()
        self.cultural_embedding = CulturalEmbedding()
        self.fused_consciousness_history = []
        
        # Fusion tracking
        self.fusion_metrics = {
            'coherence_history': [],
            'diversity_history': [],
            'cultural_strength_history': [],
            'temporal_consistency_history': []
        }
        
        logger.info(f"Consciousness Fusion Engine initialized with dimension {fusion_dimension}")
    
    def _build_modality_fusion_network(self) -> torch.nn.Module:
        """
        Build network for modality fusion.
        
        Returns:
            Modality fusion network
        """
        # Network that takes multiple modality inputs and fuses them
        return torch.nn.Sequential(
            torch.nn.Linear(self.fusion_dimension, 1024),
            torch.nn.LayerNorm(1024),
            torch.nn.ReLU(),
            torch.nn.Dropout(0.1),
            torch.nn.Linear(1024, 512),
            torch.nn.LayerNorm(512),
            torch.nn.ReLU(),
            torch.nn.Linear(512, self.fusion_dimension),
            torch.nn.Tanh()
        )
    
    def _build_cultural_integration_network(self) -> torch.nn.Module:
        """
        Build network for cultural context integration.
        
        Returns:
            Cultural integration network
        """
        input_dim = self.fusion_dimension + self.cultural_embedding_dim
        return torch.nn.Sequential(
            torch.nn.Linear(input_dim, 512),
            torch.nn.LayerNorm(512),
            torch.nn.ReLU(),
            torch.nn.Linear(512, 256),
            torch.nn.ReLU(),
            torch.nn.Linear(256, self.fusion_dimension),
            torch.nn.Tanh()
        )
    
    def _build_temporal_consistency_network(self) -> torch.nn.Module:
        """
        Build network for maintaining temporal consistency.
        
        Returns:
            Temporal consistency network
        """
        return torch.nn.Sequential(
            torch.nn.Linear(self.fusion_dimension * 2, 512),  # Current + previous
            torch.nn.ReLU(),
            torch.nn.Linear(512, 256),
            torch.nn.ReLU(), 
            torch.nn.Linear(256, self.fusion_dimension),
            torch.nn.Tanh()
        )
    
    def fuse_consciousness(self, 
                          visual_consciousness: Optional[torch.Tensor] = None,
                          auditory_consciousness: Optional[torch.Tensor] = None,
                          textual_consciousness: Optional[torch.Tensor] = None,
                          emotional_consciousness: Optional[torch.Tensor] = None,
                          memory_consciousness: Optional[torch.Tensor] = None,
                          reasoning_consciousness: Optional[torch.Tensor] = None,
                          cultural_context: Optional[Dict[str, Any]] = None,
                          attention_state: Optional[AttentionState] = None,
                          temporal_context: Optional[torch.Tensor] = None) -> torch.Tensor:
        """
        Fuse multi-modal consciousness: C_unified = Fusion(C_visual, C_auditory, C_textual, C_emotional)
        
        Args:
            visual_consciousness: Visual consciousness tensor
            auditory_consciousness: Auditory consciousness tensor  
            textual_consciousness: Textual consciousness tensor
            emotional_consciousness: Emotional consciousness tensor
            memory_consciousness: Memory consciousness tensor
            reasoning_consciousness: Reasoning consciousness tensor
            cultural_context: Cultural context for historical authenticity
            attention_state: Current attention state for modulation
            temporal_context: Previous consciousness state for consistency
            
        Returns:
            Unified consciousness tensor
        """
        # Collect available consciousness modalities
        modality_inputs = {}
        
        if visual_consciousness is not None:
            modality_inputs[ModalityType.VISUAL] = self._adapt_dimension(visual_consciousness)
        if auditory_consciousness is not None:
            modality_inputs[ModalityType.AUDITORY] = self._adapt_dimension(auditory_consciousness)
        if textual_consciousness is not None:
            modality_inputs[ModalityType.TEXTUAL] = self._adapt_dimension(textual_consciousness)
        if emotional_consciousness is not None:
            modality_inputs[ModalityType.EMOTIONAL] = self._adapt_dimension(emotional_consciousness)
        if memory_consciousness is not None:
            modality_inputs[ModalityType.MEMORY] = self._adapt_dimension(memory_consciousness)
        if reasoning_consciousness is not None:
            modality_inputs[ModalityType.REASONING] = self._adapt_dimension(reasoning_consciousness)
        
        if not modality_inputs:
            # No inputs provided
            return torch.zeros(self.fusion_dimension)
        
        # 1. Create modality states for binding
        modality_states = self._create_modality_states(modality_inputs)
        
        # 2. Compute fusion weights
        self.current_fusion_weights = self._compute_fusion_weights(
            modality_states, attention_state, cultural_context
        )
        
        # 3. Apply cross-modal binding
        bound_consciousness = self.cross_modal_binder.compute_cross_modal_binding(modality_states)
        
        # 4. Perform modality fusion based on strategy
        if self.fusion_strategy == FusionStrategy.WEIGHTED_AVERAGE:
            fused_consciousness = self._weighted_average_fusion(modality_inputs, self.current_fusion_weights)
        elif self.fusion_strategy == FusionStrategy.ATTENTION_MODULATED:
            fused_consciousness = self._attention_modulated_fusion(modality_inputs, attention_state, self.current_fusion_weights)
        elif self.fusion_strategy == FusionStrategy.HIERARCHICAL:
            fused_consciousness = self._hierarchical_fusion(modality_inputs, self.current_fusion_weights)
        elif self.fusion_strategy == FusionStrategy.COMPETITIVE:
            fused_consciousness = self._competitive_fusion(modality_inputs, self.current_fusion_weights)
        else:  # COOPERATIVE
            fused_consciousness = self._cooperative_fusion(modality_inputs, self.current_fusion_weights)
        
        # 5. Integrate cross-modal binding
        binding_weight = 0.3
        # Ensure dimensions match before fusion
        bound_consciousness_adapted = self._adapt_dimension(bound_consciousness)
        fused_consciousness = (1 - binding_weight) * fused_consciousness + binding_weight * bound_consciousness_adapted
        
        # 6. Apply modality fusion network
        with torch.no_grad():
            fused_consciousness = self.modality_fusion_network(fused_consciousness)
        
        # 7. Integrate cultural context
        if cultural_context:
            cultural_embedding = self.cultural_embedding.encode_cultural_context(cultural_context)
            fused_consciousness = self._integrate_cultural_context(fused_consciousness, cultural_embedding)
        
        # 8. Apply temporal consistency
        if temporal_context is not None and len(self.fused_consciousness_history) > 0:
            fused_consciousness = self._apply_temporal_consistency(fused_consciousness, temporal_context)
        
        # 9. Update history and metrics
        self._update_fusion_metrics(fused_consciousness, modality_states)
        self.fused_consciousness_history.append(fused_consciousness.clone())
        if len(self.fused_consciousness_history) > 100:
            self.fused_consciousness_history.pop(0)
        
        return fused_consciousness
    
    def _create_modality_states(self, modality_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[ModalityType, ModalityState]:
        """
        Create modality states from input tensors.
        
        Args:
            modality_inputs: Dictionary of modality input tensors
            
        Returns:
            Dictionary of modality states
        """
        modality_states = {}
        
        for modality_type, input_tensor in modality_inputs.items():
            # Adapt to standard modality dimension (256)
            modality_dim = 256
            if input_tensor.shape[-1] != modality_dim:
                if input_tensor.shape[-1] > modality_dim:
                    adapted_activation = input_tensor[..., :modality_dim]
                else:
                    padding = torch.zeros(*input_tensor.shape[:-1], 
                                        modality_dim - input_tensor.shape[-1])
                    adapted_activation = torch.cat([input_tensor, padding], dim=-1)
            else:
                adapted_activation = input_tensor
            
            modality_states[modality_type] = ModalityState(
                modality_type=modality_type,
                activation=adapted_activation,
                confidence=torch.sigmoid(torch.mean(adapted_activation)).item(),
                processing_depth=torch.norm(adapted_activation).item() / modality_dim
            )
        
        return modality_states
    
    def _compute_fusion_weights(self, modality_states: Dict[ModalityType, ModalityState],
                              attention_state: Optional[AttentionState] = None,
                              cultural_context: Optional[Dict[str, Any]] = None) -> FusionWeights:
        """
        Compute fusion weights for different modalities and contexts.
        
        Args:
            modality_states: Dictionary of modality states
            attention_state: Current attention state
            cultural_context: Cultural context information
            
        Returns:
            Computed fusion weights
        """
        weights = FusionWeights()
        
        # 1. Compute modality weights based on activation strength and confidence
        total_salience = 0.0
        modality_saliences = {}
        
        for modality_type, state in modality_states.items():
            # Salience combines activation strength and confidence
            activation_strength = torch.norm(state.activation).item()
            salience = activation_strength * state.confidence
            modality_saliences[modality_type] = salience
            total_salience += salience
        
        # Normalize modality weights
        if total_salience > 0:
            for modality_type, salience in modality_saliences.items():
                weights.modality_weights[modality_type] = salience / total_salience
        else:
            # Equal weights if no salience
            uniform_weight = 1.0 / len(modality_states)
            weights.modality_weights = {mod: uniform_weight for mod in modality_states.keys()}
        
        # 2. Compute attention-based weights
        if attention_state is not None:
            weights.attention_weights = {
                'focus_strength': attention_state.attention_strength,
                'metacognitive_awareness': attention_state.metacognitive_awareness,
                'self_monitoring': attention_state.self_monitoring_strength
            }
            
            # Modulate modality weights by attention
            if attention_state.modality_weights:
                for modality_type in weights.modality_weights.keys():
                    if modality_type in attention_state.modality_weights:
                        attention_modulation = attention_state.modality_weights[modality_type]
                        weights.modality_weights[modality_type] *= (1.0 + attention_modulation * 0.5)
        
        # 3. Compute cultural weight
        if cultural_context:
            weights.cultural_weight = 0.2  # Base cultural influence
            
            # Increase for strong historical contexts
            if 'historical_period' in cultural_context:
                weights.cultural_weight += 0.1
            if 'cultural_values' in cultural_context:
                num_values = len(cultural_context['cultural_values'])
                weights.cultural_weight += min(0.2, num_values * 0.05)
        
        # 4. Compute temporal weight based on history
        if len(self.fused_consciousness_history) > 0:
            weights.temporal_weight = 0.15  # Moderate temporal consistency
        
        # 5. Normalize all weights
        weights.normalize_weights()
        
        return weights
    
    def _weighted_average_fusion(self, modality_inputs: Dict[ModalityType, torch.Tensor],
                               fusion_weights: FusionWeights) -> torch.Tensor:
        """
        Perform weighted average fusion.
        
        Args:
            modality_inputs: Dictionary of modality inputs
            fusion_weights: Fusion weights
            
        Returns:
            Fused consciousness tensor
        """
        fused = torch.zeros(self.fusion_dimension)
        
        for modality_type, input_tensor in modality_inputs.items():
            weight = fusion_weights.modality_weights.get(modality_type, 1.0 / len(modality_inputs))
            fused += weight * input_tensor
        
        return fused
    
    def _attention_modulated_fusion(self, modality_inputs: Dict[ModalityType, torch.Tensor],
                                  attention_state: Optional[AttentionState],
                                  fusion_weights: FusionWeights) -> torch.Tensor:
        """
        Perform attention-modulated fusion.
        
        Args:
            modality_inputs: Dictionary of modality inputs
            attention_state: Current attention state
            fusion_weights: Fusion weights
            
        Returns:
            Attention-modulated fused consciousness
        """
        # Start with weighted average
        fused = self._weighted_average_fusion(modality_inputs, fusion_weights)
        
        if attention_state is not None:
            # Apply attention focus modulation
            attention_focus_adapted = self._adapt_dimension(attention_state.attention_focus)
            
            # Attention gating: enhance attended regions
            attention_gates = torch.sigmoid(attention_focus_adapted * 2.0)
            modulated_fused = fused * attention_gates
            
            # Blend with original based on attention strength
            blend_factor = attention_state.attention_strength
            fused = (1 - blend_factor) * fused + blend_factor * modulated_fused
        
        return fused
    
    def _hierarchical_fusion(self, modality_inputs: Dict[ModalityType, torch.Tensor],
                           fusion_weights: FusionWeights) -> torch.Tensor:
        """
        Perform hierarchical fusion with priority ordering.
        
        Args:
            modality_inputs: Dictionary of modality inputs
            fusion_weights: Fusion weights
            
        Returns:
            Hierarchically fused consciousness
        """
        # Define hierarchy: higher-level cognition takes precedence
        hierarchy_order = [
            ModalityType.REASONING,
            ModalityType.MEMORY,
            ModalityType.TEXTUAL,
            ModalityType.EMOTIONAL,
            ModalityType.VISUAL,
            ModalityType.AUDITORY,
            ModalityType.LINGUISTIC
        ]
        
        fused = torch.zeros(self.fusion_dimension)
        total_weight = 0.0
        
        # Apply hierarchical weighting
        for i, modality_type in enumerate(hierarchy_order):
            if modality_type in modality_inputs:
                # Higher in hierarchy gets higher weight
                hierarchy_weight = (len(hierarchy_order) - i) / len(hierarchy_order)
                base_weight = fusion_weights.modality_weights.get(modality_type, 0.0)
                
                final_weight = hierarchy_weight * base_weight
                fused += final_weight * modality_inputs[modality_type]
                total_weight += final_weight
        
        # Normalize
        if total_weight > 0:
            fused = fused / total_weight
        
        return fused
    
    def _competitive_fusion(self, modality_inputs: Dict[ModalityType, torch.Tensor],
                          fusion_weights: FusionWeights) -> torch.Tensor:
        """
        Perform competitive fusion where strongest modality dominates.
        
        Args:
            modality_inputs: Dictionary of modality inputs  
            fusion_weights: Fusion weights
            
        Returns:
            Competitively fused consciousness
        """
        # Find the modality with highest weight
        max_weight = 0.0
        dominant_modality = None
        
        for modality_type, weight in fusion_weights.modality_weights.items():
            if weight > max_weight:
                max_weight = weight
                dominant_modality = modality_type
        
        if dominant_modality is not None and dominant_modality in modality_inputs:
            dominant_input = modality_inputs[dominant_modality]
            
            # Blend dominant with suppressed others
            fused = dominant_input * 0.7
            
            # Add suppressed contributions from other modalities
            for modality_type, input_tensor in modality_inputs.items():
                if modality_type != dominant_modality:
                    weight = fusion_weights.modality_weights.get(modality_type, 0.0)
                    suppressed_contribution = input_tensor * weight * 0.3
                    fused += suppressed_contribution
        else:
            # Fallback to weighted average
            fused = self._weighted_average_fusion(modality_inputs, fusion_weights)
        
        return fused
    
    def _cooperative_fusion(self, modality_inputs: Dict[ModalityType, torch.Tensor],
                          fusion_weights: FusionWeights) -> torch.Tensor:
        """
        Perform cooperative fusion with mutual enhancement.
        
        Args:
            modality_inputs: Dictionary of modality inputs
            fusion_weights: Fusion weights
            
        Returns:
            Cooperatively fused consciousness  
        """
        # Start with weighted average
        fused = self._weighted_average_fusion(modality_inputs, fusion_weights)
        
        # Apply mutual enhancement based on modality correlations
        modality_items = list(modality_inputs.items())
        for i, (mod1_type, input1) in enumerate(modality_items):
            for j, (mod2_type, input2) in enumerate(modality_items[i+1:], i+1):
                
                # Calculate correlation between modalities
                correlation = torch.cosine_similarity(
                    input1.unsqueeze(0), input2.unsqueeze(0)
                ).item()
                
                if correlation > 0.3:  # Significant positive correlation
                    # Mutual enhancement
                    enhancement_factor = correlation * 0.2
                    weight1 = fusion_weights.modality_weights.get(mod1_type, 0.0)
                    weight2 = fusion_weights.modality_weights.get(mod2_type, 0.0)
                    
                    # Enhance both modalities
                    enhancement1 = input1 * enhancement_factor * weight2
                    enhancement2 = input2 * enhancement_factor * weight1
                    
                    fused += enhancement1 + enhancement2
        
        return fused
    
    def _integrate_cultural_context(self, fused_consciousness: torch.Tensor,
                                  cultural_embedding: torch.Tensor) -> torch.Tensor:
        """
        Integrate cultural context into fused consciousness.
        
        Args:
            fused_consciousness: Base fused consciousness
            cultural_embedding: Cultural embedding tensor
            
        Returns:
            Culturally integrated consciousness
        """
        # Combine consciousness and cultural embedding
        combined_input = torch.cat([fused_consciousness, cultural_embedding])
        
        # Apply cultural integration network
        with torch.no_grad():
            culturally_integrated = self.cultural_integration_network(combined_input)
        
        # Blend with original consciousness
        cultural_weight = self.current_fusion_weights.cultural_weight
        final_consciousness = ((1 - cultural_weight) * fused_consciousness + 
                             cultural_weight * culturally_integrated)
        
        return final_consciousness
    
    def _apply_temporal_consistency(self, current_consciousness: torch.Tensor,
                                  temporal_context: torch.Tensor) -> torch.Tensor:
        """
        Apply temporal consistency to maintain coherent consciousness flow.
        
        Args:
            current_consciousness: Current fused consciousness
            temporal_context: Previous consciousness state
            
        Returns:
            Temporally consistent consciousness
        """
        if len(self.fused_consciousness_history) == 0:
            return current_consciousness
        
        # Use most recent state as temporal context
        previous_consciousness = self.fused_consciousness_history[-1]
        
        # Combine current and previous states
        temporal_input = torch.cat([current_consciousness, previous_consciousness])
        
        # Apply temporal consistency network
        with torch.no_grad():
            temporally_consistent = self.temporal_consistency_network(temporal_input)
        
        # Blend based on temporal weight
        temporal_weight = self.current_fusion_weights.temporal_weight
        final_consciousness = ((1 - temporal_weight) * current_consciousness + 
                             temporal_weight * temporally_consistent)
        
        return final_consciousness
    
    def _update_fusion_metrics(self, fused_consciousness: torch.Tensor,
                             modality_states: Dict[ModalityType, ModalityState]):
        """
        Update fusion performance metrics.
        
        Args:
            fused_consciousness: Current fused consciousness
            modality_states: Modality states used in fusion
        """
        # Calculate coherence with individual modalities
        coherence_scores = []
        for state in modality_states.values():
            state_adapted = self._adapt_dimension(state.activation)
            coherence = torch.cosine_similarity(
                fused_consciousness.unsqueeze(0), state_adapted.unsqueeze(0)
            ).item()
            coherence_scores.append(abs(coherence))
        
        avg_coherence = np.mean(coherence_scores) if coherence_scores else 0.0
        self.fusion_metrics['coherence_history'].append(avg_coherence)
        
        # Calculate diversity (entropy of fused consciousness)
        consciousness_probs = torch.softmax(torch.abs(fused_consciousness), dim=0)
        diversity = -torch.sum(consciousness_probs * torch.log(consciousness_probs + 1e-10)).item()
        max_diversity = math.log(len(fused_consciousness))
        normalized_diversity = diversity / max_diversity if max_diversity > 0 else 0.0
        self.fusion_metrics['diversity_history'].append(normalized_diversity)
        
        # Cultural strength
        cultural_strength = self.cultural_embedding.embedding_strength
        self.fusion_metrics['cultural_strength_history'].append(cultural_strength)
        
        # Temporal consistency
        if len(self.fused_consciousness_history) > 0:
            prev_consciousness = self.fused_consciousness_history[-1]
            temporal_consistency = torch.cosine_similarity(
                fused_consciousness.unsqueeze(0), prev_consciousness.unsqueeze(0)
            ).item()
            self.fusion_metrics['temporal_consistency_history'].append(temporal_consistency)
        
        # Limit history sizes
        for key in self.fusion_metrics:
            if len(self.fusion_metrics[key]) > 100:
                self.fusion_metrics[key].pop(0)
    
    def _adapt_dimension(self, tensor: torch.Tensor) -> torch.Tensor:
        """Adapt tensor to fusion dimension."""
        if tensor.shape[-1] == self.fusion_dimension:
            return tensor
        elif tensor.shape[-1] > self.fusion_dimension:
            return tensor[..., :self.fusion_dimension]
        else:
            padding = torch.zeros(*tensor.shape[:-1], 
                                self.fusion_dimension - tensor.shape[-1])
            return torch.cat([tensor, padding], dim=-1)
    
    def set_cultural_context(self, historical_period: str, cultural_context: Dict[str, Any]):
        """
        Set cultural context for consciousness fusion.
        
        Args:
            historical_period: Historical period identifier
            cultural_context: Cultural context information
        """
        self.cultural_embedding.historical_period = historical_period
        
        if 'cultural_values' in cultural_context:
            self.cultural_embedding.cultural_values = cultural_context['cultural_values']
        if 'social_norms' in cultural_context:
            self.cultural_embedding.social_norms = cultural_context['social_norms']
        if 'worldview' in cultural_context:
            self.cultural_embedding.worldview_context = cultural_context['worldview']
        if 'language_patterns' in cultural_context:
            self.cultural_embedding.language_patterns = cultural_context['language_patterns']
        
        # Re-encode embedding
        self.cultural_embedding.encode_cultural_context(cultural_context)
        
        logger.info(f"Cultural context set for period: {historical_period}")
    
    def get_fusion_report(self) -> Dict[str, Any]:
        """
        Generate comprehensive fusion report.
        
        Returns:
            Dictionary containing fusion analysis
        """
        report = {
            'fusion_strategy': self.fusion_strategy.value,
            'fusion_dimension': self.fusion_dimension,
            'cultural_embedding_strength': self.cultural_embedding.embedding_strength,
            'current_fusion_weights': {
                'modality_weights': {mod.value: weight for mod, weight in 
                                   self.current_fusion_weights.modality_weights.items()},
                'cultural_weight': self.current_fusion_weights.cultural_weight,
                'temporal_weight': self.current_fusion_weights.temporal_weight
            }
        }
        
        # Add cross-modal binding report
        report.update(self.cross_modal_binder.get_binding_report())
        
        # Add fusion metrics
        if self.fusion_metrics['coherence_history']:
            report['avg_coherence'] = np.mean(self.fusion_metrics['coherence_history'][-10:])
            report['coherence_stability'] = 1.0 - np.std(self.fusion_metrics['coherence_history'][-10:])
        
        if self.fusion_metrics['diversity_history']:
            report['avg_diversity'] = np.mean(self.fusion_metrics['diversity_history'][-10:])
        
        if self.fusion_metrics['temporal_consistency_history']:
            report['avg_temporal_consistency'] = np.mean(self.fusion_metrics['temporal_consistency_history'][-10:])
            report['temporal_stability'] = 1.0 - np.std(self.fusion_metrics['temporal_consistency_history'][-10:])
        
        # Cultural context information
        report['cultural_context'] = {
            'historical_period': self.cultural_embedding.historical_period,
            'num_cultural_values': len(self.cultural_embedding.cultural_values),
            'num_social_norms': len(self.cultural_embedding.social_norms),
            'worldview_aspects': len(self.cultural_embedding.worldview_context)
        }
        
        return report
    
    def reset_fusion_engine(self):
        """Reset the fusion engine to initial state."""
        self.current_fusion_weights = FusionWeights()
        self.cultural_embedding = CulturalEmbedding()
        self.fused_consciousness_history.clear()
        
        # Reset metrics
        for key in self.fusion_metrics:
            self.fusion_metrics[key].clear()
        
        # Reset cross-modal binder
        self.cross_modal_binder.active_bindings.clear()
        self.cross_modal_binder.binding_strengths.clear()
        self.cross_modal_binder.binding_history.clear()
        
        logger.info("Consciousness Fusion Engine reset to initial state")