"""
Attention Schema Theory (AST) Engine for Meta-Cognitive Awareness.

This module implements Graziano's Attention Schema Theory, where consciousness
emerges from the brain's internal model of its own attention processes. The system
maintains a schema that tracks, predicts, and controls attention allocation across
cognitive resources.

Key Features:
- Attention state modeling and prediction
- Meta-cognitive awareness of attention allocation  
- Internal models for attention resource management
- Self-monitoring mechanisms for consciousness validation
"""

import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union, Callable
from dataclasses import dataclass, field
import math
import logging
from enum import Enum

from .models import (
    AttentionState, 
    AttentionMode, 
    ModalityType,
    ConsciousnessLevel
)

logger = logging.getLogger(__name__)


class AttentionControlSignal(Enum):
    """Types of attention control signals."""
    ENHANCE = "enhance"
    SUPPRESS = "suppress"
    MAINTAIN = "maintain"
    SWITCH = "switch"
    DIVIDE = "divide"


@dataclass
class AttentionEvent:
    """
    Represents an attention switching or control event.
    """
    timestamp: float
    event_type: AttentionControlSignal
    source_location: Optional[torch.Tensor] = None
    target_location: Optional[torch.Tensor] = None
    strength: float = 0.0
    duration: float = 0.0
    metadata: Dict[str, Any] = field(default_factory=dict)


class AttentionModel:
    """
    Internal model of attention processes for meta-cognitive awareness.
    
    This model maintains predictions about attention allocation, switching patterns,
    and resource utilization to enable conscious control over attention.
    """
    
    def __init__(self, attention_dimension: int = 512, prediction_window: int = 10):
        """
        Initialize attention model.
        
        Args:
            attention_dimension: Dimension of attention representations
            prediction_window: Number of time steps for prediction
        """
        self.attention_dimension = attention_dimension
        self.prediction_window = prediction_window
        
        # Model parameters
        self.prediction_network = self._build_prediction_network()
        self.confidence_estimator = self._build_confidence_estimator()
        
        # State tracking
        self.attention_history = []
        self.prediction_history = []
        self.confidence_history = []
        self.error_history = []
        
        # Meta-cognitive parameters
        self.self_awareness_threshold = 0.7
        self.prediction_accuracy = 0.5
        self.attention_volatility = 0.0
        
        logger.debug("Attention model initialized")
    
    def _build_prediction_network(self) -> torch.nn.Module:
        """
        Build neural network for attention prediction.
        
        Returns:
            PyTorch model for attention prediction
        """
        return torch.nn.Sequential(
            torch.nn.Linear(self.attention_dimension * self.prediction_window, 256),
            torch.nn.ReLU(),
            torch.nn.Linear(256, 128),
            torch.nn.ReLU(),
            torch.nn.Linear(128, self.attention_dimension),
            torch.nn.Tanh()
        )
    
    def _build_confidence_estimator(self) -> torch.nn.Module:
        """
        Build network for confidence estimation.
        
        Returns:
            PyTorch model for confidence estimation
        """
        return torch.nn.Sequential(
            torch.nn.Linear(self.attention_dimension * 2, 64),  # Current + prediction
            torch.nn.ReLU(),
            torch.nn.Linear(64, 32),
            torch.nn.ReLU(),
            torch.nn.Linear(32, 1),
            torch.nn.Sigmoid()
        )
    
    def update_model(self, current_attention: torch.Tensor) -> Tuple[torch.Tensor, float]:
        """
        Update attention model with current attention state.
        
        Args:
            current_attention: Current attention state
            
        Returns:
            Tuple of (predicted_attention, confidence)
        """
        # Ensure correct dimension
        if current_attention.shape[-1] != self.attention_dimension:
            current_attention = self._adapt_dimension(current_attention)
        
        # Update history
        self.attention_history.append(current_attention.clone())
        if len(self.attention_history) > self.prediction_window * 2:
            self.attention_history.pop(0)
        
        # Make prediction if we have sufficient history
        if len(self.attention_history) >= self.prediction_window:
            predicted_attention = self._predict_next_attention()
            confidence = self._estimate_confidence(current_attention, predicted_attention)
            
            # Update prediction history
            self.prediction_history.append(predicted_attention.clone())
            self.confidence_history.append(confidence)
            
            # Calculate prediction error if we have previous predictions
            if len(self.prediction_history) > 1:
                previous_prediction = self.prediction_history[-2]
                error = torch.nn.functional.mse_loss(current_attention, previous_prediction).item()
                self.error_history.append(error)
                
                # Update prediction accuracy
                self._update_prediction_accuracy()
            
            # Limit history sizes
            if len(self.prediction_history) > 100:
                self.prediction_history.pop(0)
                self.confidence_history.pop(0)
            
            if len(self.error_history) > 50:
                self.error_history.pop(0)
            
            return predicted_attention, confidence
        
        else:
            # Not enough history, return current state
            return current_attention, 0.5
    
    def _predict_next_attention(self) -> torch.Tensor:
        """
        Predict next attention state based on history.
        
        Returns:
            Predicted attention state
        """
        # Prepare input sequence
        history_sequence = torch.stack(self.attention_history[-self.prediction_window:])
        history_flat = history_sequence.flatten()
        
        # Make prediction
        with torch.no_grad():
            predicted = self.prediction_network(history_flat)
        
        return predicted
    
    def _estimate_confidence(self, current: torch.Tensor, predicted: torch.Tensor) -> float:
        """
        Estimate confidence in the attention prediction.
        
        Args:
            current: Current attention state
            predicted: Predicted attention state
            
        Returns:
            Confidence value [0, 1]
        """
        # Prepare input
        combined_input = torch.cat([current, predicted])
        
        # Estimate confidence
        with torch.no_grad():
            confidence = self.confidence_estimator(combined_input)
        
        return confidence.item()
    
    def _update_prediction_accuracy(self):
        """Update running prediction accuracy estimate."""
        if not self.error_history:
            return
        
        # Calculate recent accuracy
        recent_errors = self.error_history[-10:]
        avg_error = np.mean(recent_errors)
        
        # Convert error to accuracy (lower error = higher accuracy)
        accuracy = max(0.0, 1.0 - avg_error)
        
        # Update with exponential moving average
        alpha = 0.1
        self.prediction_accuracy = (1 - alpha) * self.prediction_accuracy + alpha * accuracy
        
        # Update volatility measure
        if len(recent_errors) > 1:
            self.attention_volatility = np.std(recent_errors)
    
    def _adapt_dimension(self, tensor: torch.Tensor) -> torch.Tensor:
        """Adapt tensor dimension to attention dimension."""
        if tensor.shape[-1] > self.attention_dimension:
            return tensor[..., :self.attention_dimension]
        else:
            padding = torch.zeros(*tensor.shape[:-1], 
                                self.attention_dimension - tensor.shape[-1])
            return torch.cat([tensor, padding], dim=-1)
    
    def get_meta_cognitive_awareness(self) -> float:
        """
        Calculate meta-cognitive awareness level based on model performance.
        
        Returns:
            Meta-cognitive awareness [0, 1]
        """
        # Base awareness on prediction accuracy
        accuracy_component = self.prediction_accuracy
        
        # Add confidence consistency component
        if len(self.confidence_history) > 5:
            confidence_std = np.std(self.confidence_history[-5:])
            confidence_consistency = max(0.0, 1.0 - confidence_std)
        else:
            confidence_consistency = 0.5
        
        # Add temporal stability component
        stability_component = max(0.0, 1.0 - self.attention_volatility)
        
        # Combine components
        awareness = (0.5 * accuracy_component + 
                    0.3 * confidence_consistency + 
                    0.2 * stability_component)
        
        return min(1.0, max(0.0, awareness))


class AttentionController:
    """
    Attention control system for directing and switching attention.
    """
    
    def __init__(self, control_dimension: int = 512):
        """
        Initialize attention controller.
        
        Args:
            control_dimension: Dimension of control signals
        """
        self.control_dimension = control_dimension
        
        # Control parameters
        self.switching_threshold = 0.3
        self.enhancement_gain = 1.5
        self.suppression_factor = 0.3
        self.maintenance_decay = 0.95
        
        # Control state
        self.current_control_signal = torch.zeros(control_dimension)
        self.control_history = []
        self.switching_events = []
        
        # Resource management
        self.total_resources = 1.0
        self.resource_allocation = {}
        self.resource_efficiency = 0.8
        
        logger.debug("Attention controller initialized")
    
    def generate_control_signal(self, attention_prediction: torch.Tensor,
                               attention_error: torch.Tensor,
                               task_demands: Optional[Dict[str, float]] = None) -> torch.Tensor:
        """
        Generate attention control signal based on predictions and errors.
        
        Args:
            attention_prediction: Predicted attention state
            attention_error: Prediction error signal
            task_demands: Optional task-specific attention demands
            
        Returns:
            Control signal tensor
        """
        # Initialize control signal
        control_signal = torch.zeros(self.control_dimension)
        
        # Error-based control
        error_magnitude = torch.norm(attention_error).item()
        
        if error_magnitude > self.switching_threshold:
            # Need attention switching
            control_signal += self._generate_switching_signal(attention_error)
            
            # Record switching event
            switch_event = AttentionEvent(
                timestamp=0.0,  # Would be set by calling system
                event_type=AttentionControlSignal.SWITCH,
                strength=error_magnitude
            )
            self.switching_events.append(switch_event)
        
        # Prediction-based enhancement
        prediction_strength = torch.norm(attention_prediction).item()
        if prediction_strength > 0.7:
            enhancement_signal = attention_prediction * self.enhancement_gain
            control_signal += enhancement_signal[:self.control_dimension]
        
        # Task-demand based control
        if task_demands:
            demand_signal = self._process_task_demands(task_demands)
            control_signal += demand_signal
        
        # Resource management
        control_signal = self._apply_resource_constraints(control_signal)
        
        # Update control state
        self.current_control_signal = control_signal
        self.control_history.append(control_signal.clone())
        
        # Limit history size
        if len(self.control_history) > 100:
            self.control_history.pop(0)
        
        if len(self.switching_events) > 50:
            self.switching_events.pop(0)
        
        return control_signal
    
    def _generate_switching_signal(self, error: torch.Tensor) -> torch.Tensor:
        """
        Generate attention switching signal based on error.
        
        Args:
            error: Attention error tensor
            
        Returns:
            Switching control signal
        """
        # Find locations of highest error
        error_magnitude = torch.abs(error)
        
        # Create switching signal that suppresses high-error locations
        # and enhances complementary locations
        switch_signal = torch.zeros_like(error)
        
        # Suppress high-error regions
        high_error_mask = error_magnitude > torch.mean(error_magnitude)
        switch_signal[high_error_mask] = -error_magnitude[high_error_mask] * self.suppression_factor
        
        # Enhance low-error regions
        low_error_mask = ~high_error_mask
        switch_signal[low_error_mask] = torch.mean(error_magnitude) * 0.5
        
        # Ensure correct dimension
        if switch_signal.shape[-1] != self.control_dimension:
            switch_signal = switch_signal[:self.control_dimension]
        
        return switch_signal
    
    def _process_task_demands(self, demands: Dict[str, float]) -> torch.Tensor:
        """
        Process task demands into control signal.
        
        Args:
            demands: Dictionary of task demands
            
        Returns:
            Task-based control signal
        """
        signal = torch.zeros(self.control_dimension)
        
        # Convert demands to signal
        demand_items = list(demands.items())
        signal_per_demand = self.control_dimension // len(demand_items)
        
        start_idx = 0
        for demand_name, demand_strength in demand_items:
            end_idx = min(start_idx + signal_per_demand, self.control_dimension)
            signal[start_idx:end_idx] = demand_strength
            start_idx = end_idx
        
        return signal
    
    def _apply_resource_constraints(self, control_signal: torch.Tensor) -> torch.Tensor:
        """
        Apply resource constraints to control signal.
        
        Args:
            control_signal: Unconstrained control signal
            
        Returns:
            Resource-constrained control signal
        """
        # Calculate current resource usage
        signal_magnitude = torch.norm(control_signal).item()
        
        if signal_magnitude > self.total_resources:
            # Need to normalize to available resources
            scaling_factor = self.total_resources / signal_magnitude
            control_signal = control_signal * scaling_factor * self.resource_efficiency
        
        return control_signal
    
    def allocate_resources(self, modalities: Dict[str, float]) -> Dict[str, float]:
        """
        Allocate attention resources across modalities.
        
        Args:
            modalities: Dictionary of modality importance scores
            
        Returns:
            Dictionary of resource allocations
        """
        # Normalize importance scores
        total_importance = sum(modalities.values())
        if total_importance == 0:
            # Equal allocation if no preferences
            allocation = {mod: self.total_resources / len(modalities) 
                         for mod in modalities.keys()}
        else:
            # Proportional allocation
            allocation = {mod: (importance / total_importance) * self.total_resources
                         for mod, importance in modalities.items()}
        
        # Apply efficiency factor
        for mod in allocation:
            allocation[mod] *= self.resource_efficiency
        
        self.resource_allocation = allocation
        return allocation


class AttentionSchemaEngine:
    """
    Main Attention Schema Theory engine for meta-cognitive awareness.
    
    This engine implements the core AST mechanisms:
    1. Attention state tracking and modeling
    2. Prediction of attention dynamics
    3. Meta-cognitive awareness through self-monitoring
    4. Attention control and resource management
    """
    
    def __init__(self, attention_dimension: int = 512, 
                 num_modalities: int = 7,
                 self_monitoring_rate: float = 0.1):
        """
        Initialize the Attention Schema Engine.
        
        Args:
            attention_dimension: Dimension of attention representations
            num_modalities: Number of attention modalities to track
            self_monitoring_rate: Rate of self-monitoring updates
        """
        self.attention_dimension = attention_dimension
        self.num_modalities = num_modalities
        self.self_monitoring_rate = self_monitoring_rate
        
        # Core components
        self.attention_model = AttentionModel(attention_dimension)
        self.attention_controller = AttentionController(attention_dimension)
        
        # Attention state
        self.current_attention_state = AttentionState(
            attention_focus=torch.zeros(attention_dimension)
        )
        
        # Self-monitoring system
        self.self_monitoring_strength = 0.0
        self.introspective_access = 0.0
        self.metacognitive_confidence = 0.0
        
        # Schema predictions
        self.attention_schema = torch.zeros(attention_dimension)
        self.schema_confidence = 0.5
        self.schema_update_rate = 0.05
        
        # Performance tracking
        self.monitoring_history = []
        self.schema_accuracy_history = []
        self.control_effectiveness_history = []
        
        logger.info(f"Attention Schema Engine initialized with dimension {attention_dimension}")
    
    def update_attention_state(self, sensory_inputs: Dict[ModalityType, torch.Tensor],
                             task_context: Optional[Dict[str, Any]] = None,
                             voluntary_control: Optional[torch.Tensor] = None) -> AttentionState:
        """
        Update attention state based on inputs and context.
        
        Args:
            sensory_inputs: Dictionary of sensory input tensors
            task_context: Optional task context information
            voluntary_control: Optional voluntary attention control signal
            
        Returns:
            Updated attention state
        """
        # Process sensory inputs to determine attention demands
        attention_demands = self._compute_attention_demands(sensory_inputs)
        
        # Apply task context modulation
        if task_context:
            attention_demands = self._apply_task_context(attention_demands, task_context)
        
        # Update attention model
        current_focus = self._compute_attention_focus(attention_demands)
        predicted_attention, prediction_confidence = self.attention_model.update_model(current_focus)
        
        # Apply voluntary control if provided
        if voluntary_control is not None:
            current_focus = self._apply_voluntary_control(current_focus, voluntary_control)
        
        # Update attention state
        self.current_attention_state = AttentionState(
            attention_focus=current_focus,
            attention_strength=torch.norm(current_focus).item(),
            attention_mode=self._determine_attention_mode(current_focus),
            modality_weights=self._compute_modality_weights(sensory_inputs),
            resource_allocation=self.attention_controller.resource_allocation,
            attention_confidence=prediction_confidence,
            self_monitoring_strength=self._update_self_monitoring(),
            metacognitive_awareness=self.attention_model.get_meta_cognitive_awareness(),
            predicted_attention=predicted_attention,
            prediction_confidence=prediction_confidence
        )
        
        # Update attention history
        self.current_attention_state.attention_history.append(current_focus.clone())
        if len(self.current_attention_state.attention_history) > 100:
            self.current_attention_state.attention_history.pop(0)
        
        # Update schema
        self._update_attention_schema()
        
        return self.current_attention_state
    
    def _compute_attention_demands(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> torch.Tensor:
        """
        Compute attention demands from sensory inputs.
        
        Args:
            sensory_inputs: Dictionary of sensory inputs
            
        Returns:
            Attention demand tensor
        """
        demands = torch.zeros(self.attention_dimension)
        
        if not sensory_inputs:
            return demands
        
        # Allocate attention dimension across modalities
        dim_per_modality = self.attention_dimension // len(sensory_inputs)
        
        start_idx = 0
        for modality_type, input_tensor in sensory_inputs.items():
            end_idx = min(start_idx + dim_per_modality, self.attention_dimension)
            
            # Compute salience of this input
            salience = self._compute_input_salience(input_tensor, modality_type)
            
            # Fill attention demand for this modality
            demands[start_idx:end_idx] = salience
            start_idx = end_idx
        
        return demands
    
    def _compute_input_salience(self, input_tensor: torch.Tensor, modality_type: ModalityType) -> float:
        """
        Compute salience of input for attention allocation.
        
        Args:
            input_tensor: Input tensor
            modality_type: Type of modality
            
        Returns:
            Salience value
        """
        # Base salience on input magnitude and variability
        magnitude = torch.norm(input_tensor).item()
        
        if input_tensor.numel() > 1:
            variability = torch.std(input_tensor).item()
        else:
            variability = 0.0
        
        # Modality-specific salience weighting
        modality_weights = {
            ModalityType.VISUAL: 1.2,
            ModalityType.AUDITORY: 1.1,
            ModalityType.EMOTIONAL: 1.5,
            ModalityType.TEXTUAL: 1.0,
            ModalityType.MEMORY: 0.8,
            ModalityType.REASONING: 0.9,
            ModalityType.LINGUISTIC: 1.0
        }
        
        weight = modality_weights.get(modality_type, 1.0)
        salience = (magnitude + variability) * weight
        
        return min(salience, 2.0)  # Cap maximum salience
    
    def _apply_task_context(self, attention_demands: torch.Tensor, 
                           task_context: Dict[str, Any]) -> torch.Tensor:
        """
        Apply task context to modulate attention demands.
        
        Args:
            attention_demands: Base attention demands
            task_context: Task context information
            
        Returns:
            Context-modulated attention demands
        """
        modulated_demands = attention_demands.clone()
        
        # Apply task-specific attention biases
        if 'focus_modalities' in task_context:
            focus_modalities = task_context['focus_modalities']
            for modality, weight in focus_modalities.items():
                # Find relevant sections of attention demand
                # (This is a simplified implementation)
                modulated_demands *= (1.0 + weight * 0.2)
        
        if 'attention_type' in task_context:
            attention_type = task_context['attention_type']
            if attention_type == 'focused':
                # Enhance peak demands
                peak_indices = torch.topk(torch.abs(modulated_demands), 
                                        k=min(10, len(modulated_demands))).indices
                modulated_demands[peak_indices] *= 1.3
            elif attention_type == 'diffuse':
                # Smooth out demands
                modulated_demands = torch.softmax(modulated_demands, dim=0) * torch.sum(modulated_demands)
        
        return modulated_demands
    
    def _compute_attention_focus(self, attention_demands: torch.Tensor) -> torch.Tensor:
        """
        Compute attention focus from demands.
        
        Args:
            attention_demands: Attention demand tensor
            
        Returns:
            Attention focus tensor
        """
        # Apply attention control
        prediction_error = torch.zeros_like(attention_demands)
        if hasattr(self.current_attention_state, 'predicted_attention') and \
           self.current_attention_state.predicted_attention is not None:
            prediction_error = attention_demands - self.current_attention_state.predicted_attention
        
        control_signal = self.attention_controller.generate_control_signal(
            attention_demands, prediction_error
        )
        
        # Combine demands and control
        attention_focus = attention_demands + 0.3 * control_signal
        
        # Apply normalization
        attention_focus = torch.tanh(attention_focus)
        
        return attention_focus
    
    def _apply_voluntary_control(self, current_focus: torch.Tensor, 
                               voluntary_control: torch.Tensor) -> torch.Tensor:
        """
        Apply voluntary attention control.
        
        Args:
            current_focus: Current attention focus
            voluntary_control: Voluntary control signal
            
        Returns:
            Modified attention focus
        """
        # Ensure compatible dimensions
        if voluntary_control.shape[-1] != current_focus.shape[-1]:
            if voluntary_control.shape[-1] > current_focus.shape[-1]:
                voluntary_control = voluntary_control[:current_focus.shape[-1]]
            else:
                padding = torch.zeros(current_focus.shape[-1] - voluntary_control.shape[-1])
                voluntary_control = torch.cat([voluntary_control, padding])
        
        # Apply control with limited strength
        control_strength = 0.4  # Voluntary control has limited influence
        modified_focus = (1 - control_strength) * current_focus + control_strength * voluntary_control
        
        return modified_focus
    
    def _determine_attention_mode(self, attention_focus: torch.Tensor) -> AttentionMode:
        """
        Determine current attention mode from focus pattern.
        
        Args:
            attention_focus: Current attention focus
            
        Returns:
            Attention mode
        """
        # Calculate focus distribution metrics
        focus_entropy = -torch.sum(torch.softmax(torch.abs(attention_focus), dim=0) * 
                                  torch.log_softmax(torch.abs(attention_focus), dim=0)).item()
        max_entropy = math.log(len(attention_focus))
        normalized_entropy = focus_entropy / max_entropy if max_entropy > 0 else 0
        
        focus_peak = torch.max(torch.abs(attention_focus)).item()
        focus_std = torch.std(attention_focus).item()
        
        # Determine mode based on distribution characteristics
        if focus_peak > 1.5 and normalized_entropy < 0.3:
            return AttentionMode.FOCUSED
        elif normalized_entropy > 0.8:
            return AttentionMode.DIFFUSE
        elif focus_std > 0.5:
            return AttentionMode.DIVIDED
        else:
            return AttentionMode.SELECTIVE
    
    def _compute_modality_weights(self, sensory_inputs: Dict[ModalityType, torch.Tensor]) -> Dict[ModalityType, float]:
        """
        Compute attention weights for each modality.
        
        Args:
            sensory_inputs: Dictionary of sensory inputs
            
        Returns:
            Dictionary of modality weights
        """
        weights = {}
        total_salience = 0.0
        
        # Calculate salience for each modality
        saliences = {}
        for modality_type, input_tensor in sensory_inputs.items():
            salience = self._compute_input_salience(input_tensor, modality_type)
            saliences[modality_type] = salience
            total_salience += salience
        
        # Normalize to weights
        if total_salience > 0:
            for modality_type, salience in saliences.items():
                weights[modality_type] = salience / total_salience
        else:
            # Equal weights if no salience
            uniform_weight = 1.0 / len(sensory_inputs)
            weights = {mod: uniform_weight for mod in sensory_inputs.keys()}
        
        return weights
    
    def _update_self_monitoring(self) -> float:
        """
        Update self-monitoring strength.
        
        Returns:
            Updated self-monitoring strength
        """
        # Base monitoring on metacognitive awareness
        base_monitoring = self.attention_model.get_meta_cognitive_awareness()
        
        # Add prediction accuracy component
        prediction_component = self.attention_model.prediction_accuracy
        
        # Add schema confidence component
        schema_component = self.schema_confidence
        
        # Weighted combination
        monitoring_strength = (0.4 * base_monitoring + 
                             0.3 * prediction_component + 
                             0.3 * schema_component)
        
        # Exponential moving average update
        self.self_monitoring_strength = ((1 - self.self_monitoring_rate) * self.self_monitoring_strength + 
                                       self.self_monitoring_rate * monitoring_strength)
        
        # Update history
        self.monitoring_history.append(self.self_monitoring_strength)
        if len(self.monitoring_history) > 100:
            self.monitoring_history.pop(0)
        
        return self.self_monitoring_strength
    
    def _update_attention_schema(self):
        """Update the internal attention schema."""
        # Current attention pattern
        current_pattern = self.current_attention_state.attention_focus
        
        # Update schema with exponential moving average
        self.attention_schema = ((1 - self.schema_update_rate) * self.attention_schema + 
                               self.schema_update_rate * current_pattern)
        
        # Update schema confidence based on prediction accuracy
        if len(self.attention_model.error_history) > 0:
            recent_error = np.mean(self.attention_model.error_history[-5:])
            accuracy = max(0.0, 1.0 - recent_error)
            
            # Update confidence with moving average
            alpha = 0.1
            self.schema_confidence = (1 - alpha) * self.schema_confidence + alpha * accuracy
        
        # Track schema accuracy
        self.schema_accuracy_history.append(self.schema_confidence)
        if len(self.schema_accuracy_history) > 100:
            self.schema_accuracy_history.pop(0)
    
    def predict_attention_trajectory(self, steps: int = 5) -> List[torch.Tensor]:
        """
        Predict future attention trajectory.
        
        Args:
            steps: Number of future steps to predict
            
        Returns:
            List of predicted attention states
        """
        predictions = []
        current_state = self.current_attention_state.attention_focus.clone()
        
        for step in range(steps):
            # Use attention model to predict next state
            predicted_next, confidence = self.attention_model.update_model(current_state)
            predictions.append(predicted_next.clone())
            
            # Use prediction as input for next step
            current_state = predicted_next
        
        return predictions
    
    def get_metacognitive_report(self) -> Dict[str, Any]:
        """
        Generate metacognitive report about attention state.
        
        Returns:
            Dictionary containing metacognitive information
        """
        report = {
            'self_monitoring_strength': self.self_monitoring_strength,
            'metacognitive_awareness': self.current_attention_state.metacognitive_awareness,
            'attention_confidence': self.current_attention_state.attention_confidence,
            'prediction_accuracy': self.attention_model.prediction_accuracy,
            'attention_volatility': self.attention_model.attention_volatility,
            'schema_confidence': self.schema_confidence,
            'attention_mode': self.current_attention_state.attention_mode.value,
            'resource_allocation': self.current_attention_state.resource_allocation,
            'modality_weights': {mod.value: weight for mod, weight in 
                               self.current_attention_state.modality_weights.items()},
        }
        
        # Add temporal stability metrics
        if len(self.monitoring_history) > 5:
            report['monitoring_stability'] = 1.0 - np.std(self.monitoring_history[-10:])
        
        if len(self.schema_accuracy_history) > 5:
            report['schema_stability'] = 1.0 - np.std(self.schema_accuracy_history[-10:])
        
        # Add switching event summary
        recent_switches = [event for event in self.attention_controller.switching_events 
                          if hasattr(event, 'timestamp')]  # Would filter by recent timestamp
        report['recent_attention_switches'] = len(recent_switches)
        
        return report
    
    def reset_engine(self):
        """Reset the attention schema engine to initial state."""
        self.attention_model = AttentionModel(self.attention_dimension)
        self.attention_controller = AttentionController(self.attention_dimension)
        
        self.current_attention_state = AttentionState(
            attention_focus=torch.zeros(self.attention_dimension)
        )
        
        self.self_monitoring_strength = 0.0
        self.introspective_access = 0.0
        self.metacognitive_confidence = 0.0
        
        self.attention_schema = torch.zeros(self.attention_dimension)
        self.schema_confidence = 0.5
        
        # Clear histories
        self.monitoring_history.clear()
        self.schema_accuracy_history.clear()
        self.control_effectiveness_history.clear()
        
        logger.info("Attention Schema Engine reset to initial state")