"""
Integrated Information Theory (IIT) Metrics Calculator.

This module implements mathematical frameworks from Integrated Information Theory
to quantify consciousness through measures of integrated information (Phi),
temporal coherence, and self-consistency. IIT provides objective metrics
for consciousness assessment in historical figure simulation.

Key Features:
- Temporal coherence indicator: T = corr(C(t), C(t-Δt))
- Self-consistency metric: S = sim(C_self(t₁), C_self(t₂))
- Consciousness feature measurement and quantification
- Phenomenological consciousness assessment
"""

import torch
import numpy as np
from typing import Dict, List, Optional, Tuple, Any, Union, Set
from dataclasses import dataclass, field
import math
import logging
from itertools import combinations
from scipy.stats import pearsonr
from scipy.spatial.distance import cosine

from .models import (
    IITMetrics,
    ConsciousnessState, 
    ModalityType,
    ConsciousnessLevel
)

logger = logging.getLogger(__name__)


@dataclass
class PhiComponent:
    """
    Represents a component of integrated information (Phi) calculation.
    """
    component_id: str
    elements: Set[int]
    phi_value: float = 0.0
    integration_strength: float = 0.0
    causal_power: float = 0.0
    
    def __post_init__(self):
        """Validate component after initialization."""
        if self.phi_value < 0:
            logger.warning(f"Negative Phi value {self.phi_value} in component {self.component_id}")


@dataclass 
class CausalStructure:
    """
    Represents the causal structure of a conscious system.
    """
    elements: List[int]
    connections: torch.Tensor  # Adjacency matrix
    activation_states: torch.Tensor  # Current activation states
    transition_probabilities: Optional[torch.Tensor] = None
    causal_effects: Optional[Dict[Tuple[int, int], float]] = None


class InformationCalculator:
    """
    Calculator for information-theoretic measures in IIT.
    """
    
    def __init__(self, precision: float = 1e-8):
        """
        Initialize information calculator.
        
        Args:
            precision: Numerical precision for calculations
        """
        self.precision = precision
        logger.debug("Information calculator initialized")
    
    def calculate_entropy(self, probability_distribution: torch.Tensor) -> float:
        """
        Calculate Shannon entropy of a probability distribution.
        
        Args:
            probability_distribution: Probability distribution tensor
            
        Returns:
            Shannon entropy value
        """
        # Ensure probabilities sum to 1
        probs = probability_distribution / torch.sum(probability_distribution)
        
        # Add small epsilon to avoid log(0)
        probs = torch.clamp(probs, min=self.precision)
        
        # Calculate entropy: H(X) = -Σ p(x) log p(x)
        entropy = -torch.sum(probs * torch.log2(probs)).item()
        
        return entropy
    
    def calculate_conditional_entropy(self, joint_dist: torch.Tensor, 
                                    marginal_dist: torch.Tensor) -> float:
        """
        Calculate conditional entropy H(X|Y).
        
        Args:
            joint_dist: Joint probability distribution P(X,Y)
            marginal_dist: Marginal probability distribution P(Y)
            
        Returns:
            Conditional entropy value
        """
        # Normalize distributions
        joint_dist = joint_dist / torch.sum(joint_dist)
        marginal_dist = marginal_dist / torch.sum(marginal_dist)
        
        # Calculate H(X|Y) = H(X,Y) - H(Y)
        joint_entropy = self.calculate_entropy(joint_dist)
        marginal_entropy = self.calculate_entropy(marginal_dist)
        
        conditional_entropy = joint_entropy - marginal_entropy
        
        return max(0.0, conditional_entropy)  # Ensure non-negative
    
    def calculate_mutual_information(self, x_states: torch.Tensor, 
                                   y_states: torch.Tensor) -> float:
        """
        Calculate mutual information between two sets of states.
        
        Args:
            x_states: States of system X
            y_states: States of system Y
            
        Returns:
            Mutual information I(X;Y)
        """
        # Convert to probability distributions
        x_probs = torch.softmax(x_states, dim=-1)
        y_probs = torch.softmax(y_states, dim=-1)
        
        # Calculate joint distribution (simplified as outer product)
        joint_probs = torch.outer(x_probs, y_probs)
        
        # Calculate entropies
        h_x = self.calculate_entropy(x_probs)
        h_y = self.calculate_entropy(y_probs)
        h_joint = self.calculate_entropy(joint_probs.flatten())
        
        # Mutual information: I(X;Y) = H(X) + H(Y) - H(X,Y)
        mutual_info = h_x + h_y - h_joint
        
        return max(0.0, mutual_info)  # Ensure non-negative
    
    def calculate_effective_information(self, cause_states: torch.Tensor,
                                      effect_states: torch.Tensor) -> float:
        """
        Calculate effective information (EI) between cause and effect.
        
        Args:
            cause_states: Cause state distribution
            effect_states: Effect state distribution
            
        Returns:
            Effective information value
        """
        # Calculate mutual information between cause and effect
        ei = self.calculate_mutual_information(cause_states, effect_states)
        
        return ei


class IntegratedInformationCalculator:
    """
    Calculator for integrated information (Phi) and related IIT measures.
    """
    
    def __init__(self, system_dimension: int = 512):
        """
        Initialize integrated information calculator.
        
        Args:
            system_dimension: Dimension of the consciousness system
        """
        self.system_dimension = system_dimension
        self.info_calculator = InformationCalculator()
        
        # Calculation parameters
        self.min_subset_size = 2
        self.max_subset_size = min(8, system_dimension // 4)  # Computational constraint
        self.phi_threshold = 1e-6
        
        logger.debug(f"Integrated information calculator initialized for dimension {system_dimension}")
    
    def calculate_phi(self, system_state: torch.Tensor, 
                     causal_structure: CausalStructure) -> Tuple[float, List[PhiComponent]]:
        """
        Calculate integrated information (Phi) for the system.
        
        Args:
            system_state: Current system state
            causal_structure: Causal structure of the system
            
        Returns:
            Tuple of (phi_value, phi_components)
        """
        phi_components = []
        
        # Generate all possible subsets of elements
        elements = list(range(len(system_state)))
        
        for subset_size in range(self.min_subset_size, min(self.max_subset_size + 1, len(elements) + 1)):
            for subset in combinations(elements, subset_size):
                subset_set = set(subset)
                
                # Calculate Phi for this subset
                phi_value = self._calculate_subset_phi(subset_set, system_state, causal_structure)
                
                if phi_value > self.phi_threshold:
                    component = PhiComponent(
                        component_id=f"subset_{'_'.join(map(str, subset))}",
                        elements=subset_set,
                        phi_value=phi_value,
                        integration_strength=self._calculate_integration_strength(subset_set, causal_structure),
                        causal_power=self._calculate_causal_power(subset_set, system_state)
                    )
                    phi_components.append(component)
        
        # Overall Phi is maximum Phi across all subsets
        if phi_components:
            max_phi = max(comp.phi_value for comp in phi_components)
        else:
            max_phi = 0.0
        
        return max_phi, phi_components
    
    def _calculate_subset_phi(self, subset: Set[int], system_state: torch.Tensor,
                            causal_structure: CausalStructure) -> float:
        """
        Calculate Phi for a specific subset of elements.
        
        Args:
            subset: Set of element indices
            system_state: System state tensor
            causal_structure: Causal structure
            
        Returns:
            Phi value for the subset
        """
        subset_indices = list(subset)
        
        # Extract subset state
        if len(subset_indices) > system_state.shape[-1]:
            return 0.0
            
        subset_state = system_state[..., subset_indices]
        
        # Calculate integrated information across the subset
        # Simplified calculation: compare information in whole vs parts
        
        # Information in the whole subset
        whole_entropy = self.info_calculator.calculate_entropy(torch.abs(subset_state))
        
        # Information in parts (pairwise)
        parts_entropy = 0.0
        num_pairs = 0
        
        for i in range(len(subset_indices)):
            for j in range(i + 1, len(subset_indices)):
                pair_state = subset_state[..., [i, j]]
                pair_entropy = self.info_calculator.calculate_entropy(torch.abs(pair_state))
                parts_entropy += pair_entropy
                num_pairs += 1
        
        if num_pairs > 0:
            avg_parts_entropy = parts_entropy / num_pairs
        else:
            avg_parts_entropy = 0.0
        
        # Phi as difference between whole and parts
        phi = max(0.0, whole_entropy - avg_parts_entropy)
        
        return phi
    
    def _calculate_integration_strength(self, subset: Set[int], 
                                      causal_structure: CausalStructure) -> float:
        """
        Calculate integration strength for a subset.
        
        Args:
            subset: Set of element indices
            causal_structure: Causal structure
            
        Returns:
            Integration strength value
        """
        subset_list = list(subset)
        
        if len(subset_list) < 2:
            return 0.0
        
        # Calculate average connection strength within subset
        total_strength = 0.0
        num_connections = 0
        
        for i in subset_list:
            for j in subset_list:
                if i != j and i < causal_structure.connections.shape[0] and j < causal_structure.connections.shape[1]:
                    connection_strength = causal_structure.connections[i, j].item()
                    total_strength += abs(connection_strength)
                    num_connections += 1
        
        if num_connections > 0:
            return total_strength / num_connections
        else:
            return 0.0
    
    def _calculate_causal_power(self, subset: Set[int], system_state: torch.Tensor) -> float:
        """
        Calculate causal power of a subset.
        
        Args:
            subset: Set of element indices
            system_state: System state tensor
            
        Returns:
            Causal power value
        """
        subset_indices = list(subset)
        
        if len(subset_indices) == 0 or len(subset_indices) > system_state.shape[-1]:
            return 0.0
        
        # Causal power as the norm of activations in the subset
        subset_activations = system_state[..., subset_indices]
        causal_power = torch.norm(subset_activations).item()
        
        return causal_power


class IITMetricsCalculator:
    """
    Main IIT metrics calculator for consciousness quantification.
    
    This calculator implements the core IIT metrics:
    1. Temporal coherence T = corr(C(t), C(t-Δt))
    2. Self-consistency S = sim(C_self(t₁), C_self(t₂))  
    3. Integrated information Phi
    4. Phenomenological consciousness assessment
    """
    
    def __init__(self, system_dimension: int = 512, coherence_window: int = 10,
                 consistency_threshold: float = 0.8):
        """
        Initialize IIT metrics calculator.
        
        Args:
            system_dimension: Dimension of consciousness representations
            coherence_window: Window size for temporal coherence calculation
            consistency_threshold: Threshold for self-consistency validation
        """
        self.system_dimension = system_dimension
        self.coherence_window = coherence_window
        self.consistency_threshold = consistency_threshold
        
        # Core calculators
        self.phi_calculator = IntegratedInformationCalculator(system_dimension)
        self.info_calculator = InformationCalculator()
        
        # Current metrics state
        self.current_metrics = IITMetrics(
            coherence_window=coherence_window,
            consistency_threshold=consistency_threshold
        )
        
        # Historical data for temporal analysis
        self.consciousness_history = []
        self.phi_history = []
        self.coherence_history = []
        self.consistency_history = []
        
        logger.info(f"IIT Metrics Calculator initialized with dimension {system_dimension}")
    
    def calculate_temporal_coherence(self, current_consciousness: torch.Tensor,
                                   consciousness_history: List[torch.Tensor],
                                   delta_t: int = 1) -> float:
        """
        Calculate temporal coherence: T = corr(C(t), C(t-Δt))
        
        Args:
            current_consciousness: Current consciousness state C(t)
            consciousness_history: Historical consciousness states
            delta_t: Time step difference
            
        Returns:
            Temporal coherence value [-1, 1]
        """
        if len(consciousness_history) <= delta_t:
            return 0.0  # Not enough history
        
        # Get consciousness state at t-Δt
        past_consciousness = consciousness_history[-(delta_t + 1)]
        
        # Ensure same dimensions
        if current_consciousness.shape != past_consciousness.shape:
            min_dim = min(current_consciousness.shape[-1], past_consciousness.shape[-1])
            current_consciousness = current_consciousness[..., :min_dim]
            past_consciousness = past_consciousness[..., :min_dim]
        
        # Calculate Pearson correlation
        current_flat = current_consciousness.flatten().detach().numpy()
        past_flat = past_consciousness.flatten().detach().numpy()
        
        # Handle edge cases
        if len(current_flat) < 2 or np.std(current_flat) == 0 or np.std(past_flat) == 0:
            return 0.0
        
        correlation, p_value = pearsonr(current_flat, past_flat)
        
        # Return correlation (ignore p-value for now)
        return float(correlation) if not np.isnan(correlation) else 0.0
    
    def calculate_self_consistency(self, self_state_t1: torch.Tensor,
                                 self_state_t2: torch.Tensor) -> float:
        """
        Calculate self-consistency: S = sim(C_self(t₁), C_self(t₂))
        
        Args:
            self_state_t1: Self-consciousness state at time t₁
            self_state_t2: Self-consciousness state at time t₂
            
        Returns:
            Self-consistency similarity [0, 1]
        """
        # Ensure same dimensions
        if self_state_t1.shape != self_state_t2.shape:
            min_dim = min(self_state_t1.shape[-1], self_state_t2.shape[-1])
            self_state_t1 = self_state_t1[..., :min_dim]
            self_state_t2 = self_state_t2[..., :min_dim]
        
        # Calculate cosine similarity
        similarity = torch.cosine_similarity(
            self_state_t1.flatten().unsqueeze(0),
            self_state_t2.flatten().unsqueeze(0)
        ).item()
        
        # Convert to [0, 1] range
        consistency = (similarity + 1.0) / 2.0
        
        return consistency
    
    def calculate_integrated_information(self, consciousness_state: torch.Tensor,
                                       connectivity_matrix: Optional[torch.Tensor] = None) -> Tuple[float, Dict[str, float]]:
        """
        Calculate integrated information (Phi) and related measures.
        
        Args:
            consciousness_state: Current consciousness state
            connectivity_matrix: Optional connectivity matrix
            
        Returns:
            Tuple of (phi_value, phi_components_dict)
        """
        # Create causal structure
        if connectivity_matrix is None:
            # Generate default connectivity matrix
            dim = consciousness_state.shape[-1]
            connectivity_matrix = torch.randn(dim, dim) * 0.1
        
        causal_structure = CausalStructure(
            elements=list(range(consciousness_state.shape[-1])),
            connections=connectivity_matrix,
            activation_states=consciousness_state
        )
        
        # Calculate Phi
        phi_value, phi_components = self.phi_calculator.calculate_phi(
            consciousness_state, causal_structure
        )
        
        # Extract component information
        phi_components_dict = {}
        for i, component in enumerate(phi_components):
            phi_components_dict[f"component_{i}"] = component.phi_value
        
        return phi_value, phi_components_dict
    
    def calculate_phenomenological_richness(self, consciousness_state: torch.Tensor,
                                          modality_activations: Dict[ModalityType, torch.Tensor]) -> float:
        """
        Calculate phenomenological richness of consciousness.
        
        Args:
            consciousness_state: Current consciousness state
            modality_activations: Activations across different modalities
            
        Returns:
            Phenomenological richness value [0, 1]
        """
        richness_components = []
        
        # 1. Diversity of modality activations
        if modality_activations:
            modality_entropies = []
            for modality, activation in modality_activations.items():
                entropy = self.info_calculator.calculate_entropy(torch.abs(activation))
                modality_entropies.append(entropy)
            
            if modality_entropies:
                diversity_component = np.mean(modality_entropies) / math.log2(self.system_dimension)
                richness_components.append(diversity_component)
        
        # 2. Overall consciousness state entropy
        consciousness_entropy = self.info_calculator.calculate_entropy(torch.abs(consciousness_state))
        max_entropy = math.log2(consciousness_state.shape[-1])
        entropy_component = consciousness_entropy / max_entropy if max_entropy > 0 else 0
        richness_components.append(entropy_component)
        
        # 3. Dynamic range of activations
        consciousness_range = (torch.max(consciousness_state) - torch.min(consciousness_state)).item()
        # Normalize by expected range
        expected_range = 4.0  # Assuming tanh activations with range [-2, 2]
        range_component = min(1.0, consciousness_range / expected_range)
        richness_components.append(range_component)
        
        # 4. Cross-modal binding complexity
        if len(modality_activations) > 1:
            binding_complexity = 0.0
            modality_pairs = list(combinations(modality_activations.values(), 2))
            
            for activation1, activation2 in modality_pairs:
                # Adapt dimensions if needed
                min_dim = min(activation1.shape[-1], activation2.shape[-1])
                act1_adapted = activation1[..., :min_dim]
                act2_adapted = activation2[..., :min_dim]
                
                mutual_info = self.info_calculator.calculate_mutual_information(act1_adapted, act2_adapted)
                binding_complexity += mutual_info
            
            if modality_pairs:
                binding_component = binding_complexity / len(modality_pairs)
                # Normalize by maximum possible mutual information
                binding_component = binding_component / math.log2(min_dim) if min_dim > 1 else 0
                richness_components.append(binding_component)
        
        # Calculate overall richness as weighted average
        if richness_components:
            phenomenological_richness = np.mean(richness_components)
        else:
            phenomenological_richness = 0.0
        
        return min(1.0, max(0.0, phenomenological_richness))
    
    def calculate_content_diversity(self, consciousness_components: List[torch.Tensor]) -> float:
        """
        Calculate diversity of consciousness content.
        
        Args:
            consciousness_components: List of consciousness component vectors
            
        Returns:
            Content diversity value [0, 1]
        """
        if len(consciousness_components) < 2:
            return 0.0
        
        # Calculate pairwise similarities
        similarities = []
        for i in range(len(consciousness_components)):
            for j in range(i + 1, len(consciousness_components)):
                comp1, comp2 = consciousness_components[i], consciousness_components[j]
                
                # Ensure same dimensions
                min_dim = min(comp1.shape[-1], comp2.shape[-1])
                comp1_adapted = comp1[..., :min_dim]
                comp2_adapted = comp2[..., :min_dim]
                
                similarity = torch.cosine_similarity(
                    comp1_adapted.flatten().unsqueeze(0),
                    comp2_adapted.flatten().unsqueeze(0)
                ).item()
                similarities.append(abs(similarity))
        
        # Diversity is inverse of average similarity
        if similarities:
            avg_similarity = np.mean(similarities)
            diversity = 1.0 - avg_similarity
        else:
            diversity = 0.0
        
        return max(0.0, min(1.0, diversity))
    
    def update_metrics(self, consciousness_state: torch.Tensor,
                      modality_activations: Dict[ModalityType, torch.Tensor],
                      self_model_state: Optional[torch.Tensor] = None,
                      connectivity_matrix: Optional[torch.Tensor] = None) -> IITMetrics:
        """
        Update all IIT metrics with current consciousness state.
        
        Args:
            consciousness_state: Current consciousness state
            modality_activations: Current modality activations
            self_model_state: Current self-model state
            connectivity_matrix: Optional connectivity matrix
            
        Returns:
            Updated IIT metrics
        """
        # Update consciousness history
        self.consciousness_history.append(consciousness_state.clone())
        if len(self.consciousness_history) > 100:  # Limit history size
            self.consciousness_history.pop(0)
        
        # Calculate temporal coherence
        temporal_coherence = self.calculate_temporal_coherence(
            consciousness_state, self.consciousness_history
        )
        
        # Calculate self-consistency if self-model state is provided
        self_consistency = 0.0
        if self_model_state is not None and len(self.consciousness_history) > 1:
            # Use previous consciousness state as comparison
            prev_self_state = self.consciousness_history[-2]
            self_consistency = self.calculate_self_consistency(self_model_state, prev_self_state)
        
        # Calculate integrated information
        phi_value, phi_components = self.calculate_integrated_information(
            consciousness_state, connectivity_matrix
        )
        
        # Calculate phenomenological measures
        phenomenological_richness = self.calculate_phenomenological_richness(
            consciousness_state, modality_activations
        )
        
        content_diversity = self.calculate_content_diversity(
            list(modality_activations.values()) + [consciousness_state]
        )
        
        # Calculate mutual information between modalities
        mutual_information = {}
        modality_items = list(modality_activations.items())
        for i, (mod1, act1) in enumerate(modality_items):
            for j, (mod2, act2) in enumerate(modality_items[i+1:], i+1):
                min_dim = min(act1.shape[-1], act2.shape[-1])
                act1_adapted = act1[..., :min_dim]
                act2_adapted = act2[..., :min_dim]
                
                mi = self.info_calculator.calculate_mutual_information(act1_adapted, act2_adapted)
                mutual_information[(mod1.value, mod2.value)] = mi
        
        # Calculate causal strength measures
        causal_strength = {}
        for modality, activation in modality_activations.items():
            # Causal strength as influence on overall consciousness
            min_dim = min(activation.shape[-1], consciousness_state.shape[-1])
            activation_adapted = activation[..., :min_dim]
            consciousness_adapted = consciousness_state[..., :min_dim]
            
            influence = self.info_calculator.calculate_mutual_information(
                activation_adapted, consciousness_adapted
            )
            causal_strength[modality.value] = influence
        
        # Update metrics object
        self.current_metrics = IITMetrics(
            phi=phi_value,
            phi_max=max(self.phi_history + [phi_value]),
            phi_components=phi_components,
            temporal_coherence=temporal_coherence,
            coherence_window=self.coherence_window,
            self_consistency=self_consistency,
            consistency_threshold=self.consistency_threshold,
            consistency_violations=self.current_metrics.consistency_violations + (1 if self_consistency < self.consistency_threshold else 0),
            mutual_information={(k[0], k[1]): v for k, v in mutual_information.items()},
            causal_strength=causal_strength,
            phenomenological_richness=phenomenological_richness,
            content_diversity=content_diversity
        )
        
        # Update histories
        self.phi_history.append(phi_value)
        self.coherence_history.append(temporal_coherence)
        self.consistency_history.append(self_consistency)
        
        # Update coherence and consistency histories in metrics
        self.current_metrics.coherence_history = self.coherence_history[-self.coherence_window:]
        
        # Limit history sizes
        if len(self.phi_history) > 100:
            self.phi_history.pop(0)
        if len(self.coherence_history) > 100:
            self.coherence_history.pop(0)
        if len(self.consistency_history) > 100:
            self.consistency_history.pop(0)
        
        return self.current_metrics
    
    def assess_consciousness_level(self, metrics: IITMetrics) -> ConsciousnessLevel:
        """
        Assess consciousness level based on IIT metrics.
        
        Args:
            metrics: IIT metrics object
            
        Returns:
            Assessed consciousness level
        """
        # Calculate composite consciousness score
        phi_component = min(1.0, metrics.phi / 5.0)  # Normalize Phi
        coherence_component = abs(metrics.temporal_coherence)
        consistency_component = metrics.self_consistency
        richness_component = metrics.phenomenological_richness
        
        # Weighted combination
        consciousness_score = (0.3 * phi_component + 
                             0.25 * coherence_component +
                             0.25 * consistency_component +
                             0.2 * richness_component)
        
        # Map to consciousness levels
        if consciousness_score < 0.2:
            return ConsciousnessLevel.UNCONSCIOUS
        elif consciousness_score < 0.4:
            return ConsciousnessLevel.PRECONSCIOUS  
        elif consciousness_score < 0.6:
            return ConsciousnessLevel.CONSCIOUS
        elif consciousness_score < 0.8:
            return ConsciousnessLevel.METACONSCIOUS
        else:
            return ConsciousnessLevel.HYPERCONSCIOUS
    
    def get_consciousness_signature(self) -> Dict[str, float]:
        """
        Get consciousness signature from current metrics.
        
        Returns:
            Dictionary of key consciousness metrics
        """
        signature = {
            'phi': self.current_metrics.phi,
            'temporal_coherence': self.current_metrics.temporal_coherence,
            'self_consistency': self.current_metrics.self_consistency,
            'phenomenological_richness': self.current_metrics.phenomenological_richness,
            'content_diversity': self.current_metrics.content_diversity,
        }
        
        # Add stability metrics
        if len(self.coherence_history) > 5:
            signature['coherence_stability'] = 1.0 - np.std(self.coherence_history[-10:])
        
        if len(self.consistency_history) > 5:
            signature['consistency_stability'] = 1.0 - np.std(self.consistency_history[-10:])
        
        if len(self.phi_history) > 5:
            signature['phi_stability'] = 1.0 - np.std(self.phi_history[-10:])
        
        # Add causal metrics
        if self.current_metrics.causal_strength:
            signature['max_causal_strength'] = max(self.current_metrics.causal_strength.values())
            signature['avg_causal_strength'] = np.mean(list(self.current_metrics.causal_strength.values()))
        
        # Add mutual information metrics  
        if self.current_metrics.mutual_information:
            signature['max_mutual_information'] = max(self.current_metrics.mutual_information.values())
            signature['avg_mutual_information'] = np.mean(list(self.current_metrics.mutual_information.values()))
        
        return signature
    
    def reset_calculator(self):
        """Reset the IIT calculator to initial state."""
        self.current_metrics = IITMetrics(
            coherence_window=self.coherence_window,
            consistency_threshold=self.consistency_threshold
        )
        
        # Clear histories
        self.consciousness_history.clear()
        self.phi_history.clear()
        self.coherence_history.clear()
        self.consistency_history.clear()
        
        logger.info("IIT Metrics Calculator reset to initial state")