"""
Enhanced Cognitive Map Reasoning System
Complete implementation with comprehensive validation, error handling, and advanced features
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from typing import Union, List, Tuple, Optional, Dict, Any
import matplotlib.pyplot as plt
import sys


class CognitiveMapUtils:
    """
    Utility class for cognitive map operations with enhanced functionality
    """

    @staticmethod
    def binary_activation(x: torch.Tensor) -> torch.Tensor:
        """Binary transformation function: f(x) = 1 if x > 0, else 0"""
        return (x > 0).float()

    @staticmethod
    def trivalent_activation(x: torch.Tensor) -> torch.Tensor:
        """Trivalent transformation function: f(x) = 1, 0, or -1"""
        return torch.where(x > 0, torch.tensor(1.0),
                           torch.where(x < 0, torch.tensor(-1.0), torch.tensor(0.0)))

    @staticmethod
    def logistic_activation(x: torch.Tensor, c: float = 1.0) -> torch.Tensor:
        """Logistic transformation function with configurable slope"""
        return 1 / (1 + torch.exp(-c * x))

    @staticmethod
    def check_limit_cycle(state_history: List[torch.Tensor], cycle_length: int = 5) -> bool:
        """Enhanced limit cycle detection with multiple cycle lengths"""
        if len(state_history) < cycle_length * 2:
            return False

        # Check for cycles of different lengths
        for length in range(2, min(cycle_length + 1, len(state_history) // 2)):
            recent_states = state_history[-length:]
            if len(recent_states) < length * 2:
                continue

            is_cyclic = True
            for i in range(length):
                if not torch.allclose(recent_states[i], recent_states[i + length], atol=1e-6):
                    is_cyclic = False
                    break

            if is_cyclic:
                return True
        return False

    @staticmethod
    def validate_weight_matrix(weight_matrix: torch.Tensor) -> torch.Tensor:
        """Comprehensive weight matrix validation"""
        weight_matrix = weight_matrix.clone()

        # Ensure diagonal is zero (j≠i constraint)
        weight_matrix.fill_diagonal_(0)

        # Check for NaN or Inf values
        if torch.any(torch.isnan(weight_matrix)) or torch.any(torch.isinf(weight_matrix)):
            raise ValueError("Weight matrix contains NaN or infinite values")

        return weight_matrix

    @staticmethod
    def analyze_weight_matrix(weight_matrix: torch.Tensor) -> Dict[str, Any]:
        """Comprehensive weight matrix analysis"""
        weight_data = weight_matrix.cpu().numpy()

        return {
            'positive_count': np.sum(weight_data > 0),
            'negative_count': np.sum(weight_data < 0),
            'zero_count': np.sum(weight_data == 0),
            'max_weight': float(np.max(weight_data)),
            'min_weight': float(np.min(weight_data)),
            'mean_weight': float(np.mean(weight_data)),
            'std_weight': float(np.std(weight_data)),
            'density': float(np.sum(weight_data != 0) / weight_data.size)
        }

    @staticmethod
    def plot_state_evolution(history: List[torch.Tensor],
                             node_names: List[str] = None,
                             title: str = "Cognitive Map State Evolution",
                             figsize: Tuple[int, int] = (12, 8)):
        """Enhanced state evolution plotting with multiple visualization options"""
        if not history:
            print("No history data to plot")
            return

        if node_names is None:
            node_names = [f"Node_{i + 1}" for i in range(history[0].shape[0])]

        history_array = torch.stack(history).numpy()

        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize)

        # Plot individual node states
        for i, name in enumerate(node_names):
            ax1.plot(history_array[:, i], label=name, marker='o', linewidth=2, markersize=4)

        ax1.set_xlabel('Iteration Step')
        ax1.set_ylabel('Node State Value')
        ax1.set_title(f'{title} - Individual States')
        ax1.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
        ax1.grid(True, alpha=0.3)

        # Plot system energy (sum of squared states)
        system_energy = np.sum(history_array ** 2, axis=1)
        ax2.plot(system_energy, 'r-', linewidth=2, label='System Energy')
        ax2.set_xlabel('Iteration Step')
        ax2.set_ylabel('System Energy')
        ax2.set_title('System Energy Evolution')
        ax2.grid(True, alpha=0.3)
        ax2.legend()

        plt.tight_layout()
        plt.show()

    @staticmethod
    def create_standard_example() -> Dict[str, Any]:
        """Create standard social problem example for testing"""
        node_names = ["Population", "Crime", "Economic_Situation", "Poverty", "Unemployment"]

        weight_matrix = np.array([
            [0, 0, -1, 0, 1],
            [0, 0, 0, -1, 0],
            [0, -1, 0, 0, -1],
            [-1, 1, 0, 0, 0],
            [0, 0, 0, 1, 1]
        ], dtype=np.float32)

        init_state = torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0])

        return {
            'num_nodes': 5,
            'node_names': node_names,
            'init_state': init_state,
            'weight_matrix': weight_matrix
        }


class EnhancedCognitiveMap(nn.Module):
    """
    Enhanced Cognitive Map Reasoning System
    Complete implementation with comprehensive validation and advanced features
    """

    def __init__(
            self,
            num_nodes: int,
            node_names: List[str],
            init_state: torch.Tensor,
            weight_matrix: Union[torch.Tensor, np.ndarray],
            activation_fn: str = 'sigmoid',
            max_iterations: int = 50,
            convergence_threshold: float = 1e-6,
            device: str = 'cpu'
    ):
        """
        Initialize Enhanced Cognitive Map with comprehensive validation

        Args:
            num_nodes: Number of nodes in the cognitive map
            node_names: List of names for each node
            init_state: Initial state vector
            weight_matrix: Weight matrix representing causal relationships
            activation_fn: Activation function type
            max_iterations: Maximum number of iterations
            convergence_threshold: Threshold for convergence detection
            device: Computation device
        """
        super(EnhancedCognitiveMap, self).__init__()

        self.utils = CognitiveMapUtils()

        # Comprehensive input validation
        self._validate_construction_parameters(
            num_nodes, node_names, init_state, weight_matrix,
            activation_fn, max_iterations, convergence_threshold
        )

        # Set core parameters
        self.num_nodes = num_nodes
        self.node_names = node_names
        self.device = device
        self.activation_fn = activation_fn
        self.max_iterations = max_iterations
        self.convergence_threshold = convergence_threshold

        # Initialize weight matrix
        self.weight_matrix = nn.Parameter(
            torch.zeros(num_nodes, num_nodes, device=device)
        )
        self._initialize_weight_matrix(weight_matrix)

        # Initialize initial state
        self.init_state = init_state.to(device)

        # Activation function registry
        self.activation_registry = {
            'binary': self.utils.binary_activation,
            'trivalent': self.utils.trivalent_activation,
            'sigmoid': F.sigmoid,
            'tanh': torch.tanh,
            'logistic': lambda x: self.utils.logistic_activation(x, c=1.0)
        }

        # Reasoning state tracking
        self._reset_reasoning_state()

        print(f"Enhanced Cognitive Map initialized with {self.num_nodes} nodes")
        self._print_system_summary()

    def _validate_construction_parameters(self, num_nodes: int, node_names: List[str],
                                          init_state: torch.Tensor,
                                          weight_matrix: Union[torch.Tensor, np.ndarray],
                                          activation_fn: str, max_iterations: int,
                                          convergence_threshold: float):
        """Comprehensive validation of all construction parameters"""

        # Validate num_nodes
        if not isinstance(num_nodes, int) or num_nodes <= 0:
            raise ValueError(f"num_nodes must be positive integer, got {num_nodes}")

        # Validate node_names
        if not isinstance(node_names, list) or len(node_names) != num_nodes:
            raise ValueError(f"node_names must be list of length {num_nodes}, got {len(node_names)}")

        if not all(isinstance(name, str) for name in node_names):
            raise ValueError("All node names must be strings")

        # Validate init_state
        if not isinstance(init_state, torch.Tensor):
            raise ValueError("init_state must be torch.Tensor")

        if init_state.dim() != 1 or init_state.shape[0] != num_nodes:
            raise ValueError(f"init_state must be 1D tensor with length {num_nodes}")

        # Validate weight_matrix
        if isinstance(weight_matrix, np.ndarray):
            weight_matrix = torch.from_numpy(weight_matrix).float()
        elif not isinstance(weight_matrix, torch.Tensor):
            raise ValueError("weight_matrix must be torch.Tensor or numpy.ndarray")

        if weight_matrix.shape != (num_nodes, num_nodes):
            raise ValueError(f"weight_matrix must be {num_nodes}x{num_nodes}, got {weight_matrix.shape}")

        # Validate activation function
        valid_activations = ['binary', 'trivalent', 'sigmoid', 'tanh', 'logistic']
        if activation_fn not in valid_activations:
            raise ValueError(f"activation_fn must be one of {valid_activations}")

        # Validate iteration parameters
        if not isinstance(max_iterations, int) or max_iterations <= 0:
            raise ValueError("max_iterations must be positive integer")

        if convergence_threshold <= 0:
            raise ValueError("convergence_threshold must be positive")

    def _initialize_weight_matrix(self, weight_matrix: torch.Tensor):
        """Initialize and validate weight matrix"""
        try:
            validated_matrix = self.utils.validate_weight_matrix(weight_matrix)
            self.weight_matrix.data = validated_matrix.to(self.device)
        except Exception as e:
            raise ValueError(f"Weight matrix validation failed: {str(e)}")

    def _reset_reasoning_state(self):
        """Reset all reasoning state variables"""
        self.reasoning_history = []
        self.iteration_differences = []
        self.convergence_info = {
            'is_converged': False,
            'convergence_type': None,
            'convergence_iteration': None,
            'final_state_norm': 0.0
        }
        self.performance_metrics = {
            'total_iterations': 0,
            'computation_time': 0.0,
            'memory_usage': 0.0
        }

    def _print_system_summary(self):
        """Print comprehensive system summary"""
        weight_stats = self.utils.analyze_weight_matrix(self.weight_matrix.data)

        print("\n" + "=" * 60)
        print("COGNITIVE MAP SYSTEM SUMMARY")
        print("=" * 60)
        print(f"Number of nodes: {self.num_nodes}")
        print(f"Node names: {self.node_names}")
        print(f"Activation function: {self.activation_fn}")
        print(f"Max iterations: {self.max_iterations}")
        print(f"Convergence threshold: {self.convergence_threshold}")
        print(f"Computation device: {self.device}")
        print(f"Weight matrix density: {weight_stats['density']:.3f}")
        print(f"Positive weights: {weight_stats['positive_count']}")
        print(f"Negative weights: {weight_stats['negative_count']}")
        print("=" * 60)

    def forward(self,
                initial_state: Optional[torch.Tensor] = None,
                activation_fn: Optional[str] = None,
                max_iterations: Optional[int] = None,
                convergence_threshold: Optional[float] = None,
                verbose: bool = True) -> Tuple[torch.Tensor, List[torch.Tensor]]:
        """
        Enhanced forward reasoning with comprehensive state tracking

        Args:
            initial_state: Optional initial state (uses default if None)
            activation_fn: Optional activation function override
            max_iterations: Optional iteration limit override
            convergence_threshold: Optional convergence threshold override
            verbose: Whether to print progress information

        Returns:
            Final state and reasoning history
        """
        import time

        # Use defaults if not provided
        if initial_state is None:
            initial_state = self.init_state
        if activation_fn is None:
            activation_fn = self.activation_fn
        if max_iterations is None:
            max_iterations = self.max_iterations
        if convergence_threshold is None:
            convergence_threshold = self.convergence_threshold

        # Validate inputs
        self._validate_reasoning_inputs(initial_state, activation_fn,
                                        max_iterations, convergence_threshold)

        # Get activation function
        activation_function = self.activation_registry[activation_fn]

        # Prepare initial state
        if initial_state.dim() == 1:
            initial_state = initial_state.unsqueeze(0)

        # Reset state and start timing
        self._reset_reasoning_state()
        start_time = time.time()

        if verbose:
            print(f"\nStarting reasoning with {activation_fn} activation...")
            print(f"Initial state: {initial_state.squeeze().cpu().numpy()}")

        # Initialize reasoning
        current_state = initial_state.clone().to(self.device)
        self.reasoning_history = [current_state.detach().cpu()]

        # Main reasoning loop
        for iteration in range(max_iterations):
            # Core reasoning: x_i(t+1) = f(∑_{j≠i} w_ij * x_j(t))
            next_state = torch.matmul(current_state, self.weight_matrix.T)
            next_state = activation_function(next_state)

            # Calculate state difference
            state_diff = torch.norm(next_state - current_state, dim=1)
            max_diff = state_diff.max().item()
            self.iteration_differences.append(max_diff)

            # Check convergence conditions
            convergence_reached = torch.all(state_diff < convergence_threshold)
            limit_cycle_detected = self.utils.check_limit_cycle(self.reasoning_history)

            if convergence_reached:
                self._handle_convergence('fixed_point', iteration + 1, next_state)
                break
            elif limit_cycle_detected:
                self._handle_convergence('limit_cycle', iteration + 1, next_state)
                break

            current_state = next_state
            self.reasoning_history.append(current_state.detach().cpu())

            # Progress reporting
            if verbose and (iteration + 1) % 10 == 0:
                print(f"Iteration {iteration + 1}: Max state change = {max_diff:.6f}")

        # Handle non-convergence
        if not self.convergence_info['is_converged']:
            self.convergence_info.update({
                'is_converged': False,
                'convergence_type': 'max_iterations_reached',
                'convergence_iteration': max_iterations
            })
            if verbose:
                print(f"Reached maximum iteration limit: {max_iterations}")

        # Calculate performance metrics
        self.performance_metrics.update({
            'total_iterations': len(self.reasoning_history),
            'computation_time': time.time() - start_time,
            'final_state_norm': torch.norm(current_state).item()
        })

        if verbose:
            self._print_reasoning_summary()

        return current_state.squeeze(), self.reasoning_history

    def _validate_reasoning_inputs(self, initial_state: torch.Tensor,
                                   activation_fn: str, max_iterations: int,
                                   convergence_threshold: float):
        """Validate reasoning input parameters"""
        if initial_state.shape[-1] != self.num_nodes:
            raise ValueError("Initial state dimension doesn't match number of nodes")

        if activation_fn not in self.activation_registry:
            raise ValueError(f"Unsupported activation function: {activation_fn}")

        if max_iterations <= 0:
            raise ValueError("max_iterations must be positive")

        if convergence_threshold <= 0:
            raise ValueError("convergence_threshold must be positive")

    def _handle_convergence(self, convergence_type: str, iteration: int,
                            final_state: torch.Tensor):
        """Handle convergence event"""
        self.convergence_info.update({
            'is_converged': True,
            'convergence_type': convergence_type,
            'convergence_iteration': iteration,
            'final_state_norm': torch.norm(final_state).item()
        })

        self.reasoning_history.append(final_state.detach().cpu())

    def _print_reasoning_summary(self):
        """Print reasoning process summary"""
        print(f"\nReasoning completed:")
        print(f"  Total iterations: {self.performance_metrics['total_iterations']}")
        print(f"  Convergence: {self.convergence_info['convergence_type']}")
        print(f"  Computation time: {self.performance_metrics['computation_time']:.4f}s")
        print(f"  Final state norm: {self.convergence_info['final_state_norm']:.4f}")

    def analyze_results(self, detailed: bool = False):
        """Comprehensive results analysis"""
        if not self.reasoning_history:
            print("No reasoning results available. Run forward() first.")
            return

        final_state = self.reasoning_history[-1]

        print("\n" + "=" * 60)
        print("COMPREHENSIVE RESULTS ANALYSIS")
        print("=" * 60)

        print(f"Convergence Analysis:")
        print(f"  Type: {self.convergence_info['convergence_type']}")
        print(f"  Iteration: {self.convergence_info['convergence_iteration']}")
        print(f"  Total iterations: {len(self.reasoning_history)}")

        print(f"\nFinal State Values:")
        for i, name in enumerate(self.node_names):
            print(f"  {name}: {final_state[i].item():.4f}")

        if detailed and self.iteration_differences:
            print(f"\nConvergence Metrics:")
            print(f"  Max difference: {max(self.iteration_differences):.6f}")
            print(f"  Min difference: {min(self.iteration_differences):.6f}")
            print(f"  Avg difference: {np.mean(self.iteration_differences):.6f}")

            weight_stats = self.utils.analyze_weight_matrix(self.weight_matrix.data)
            print(f"\nWeight Matrix Analysis:")
            print(f"  Density: {weight_stats['density']:.3f}")
            print(f"  Mean weight: {weight_stats['mean_weight']:.3f}")

        print("=" * 60)

    def get_node_state(self, node_name: str, iteration: int = -1) -> float:
        """Get state of specific node at specific iteration"""
        if iteration < 0:
            iteration = len(self.reasoning_history) + iteration

        if iteration >= len(self.reasoning_history) or iteration < 0:
            raise ValueError(f"Iteration {iteration} out of range")

        node_index = self.node_names.index(node_name)
        return self.reasoning_history[iteration][node_index].item()

    def plot_evolution(self, title: str = "Cognitive Map State Evolution"):
        """Plot state evolution with enhanced visualization"""
        self.utils.plot_state_evolution(self.reasoning_history, self.node_names, title)

    def get_performance_metrics(self) -> Dict[str, Any]:
        """Get comprehensive performance metrics"""
        metrics = self.performance_metrics.copy()
        metrics.update(self.convergence_info)
        metrics.update(self.utils.analyze_weight_matrix(self.weight_matrix.data))
        return metrics


def demo_enhanced_cognitive_map():
    """Comprehensive demonstration of enhanced cognitive map functionality"""
    print("ENHANCED COGNITIVE MAP DEMONSTRATION")
    print("=" * 70)

    # Create standard example
    example_data = CognitiveMapUtils.create_standard_example()

    # Test different activation functions
    activation_functions = ['sigmoid', 'binary', 'tanh']

    results = {}

    for activation_fn in activation_functions:
        print(f"\n--- Testing {activation_fn} activation ---")

        # Create cognitive map instance
        cm = EnhancedCognitiveMap(
            num_nodes=example_data['num_nodes'],
            node_names=example_data['node_names'],
            init_state=example_data['init_state'],
            weight_matrix=example_data['weight_matrix'],
            activation_fn=activation_fn,
            max_iterations=30,
            convergence_threshold=1e-6,
            device='cpu'
        )

        # Perform reasoning
        final_state, history = cm(verbose=True)

        # Analyze results
        cm.analyze_results(detailed=True)

        # Store results for comparison
        results[activation_fn] = {
            'final_state': final_state,
            'convergence_type': cm.convergence_info['convergence_type'],
            'iterations': len(history)
        }

    # Comparative analysis
    print("\n" + "=" * 70)
    print("COMPARATIVE ANALYSIS")
    print("=" * 70)

    for activation, result in results.items():
        print(f"{activation:8}: {result['convergence_type']:20} "
              f"({result['iterations']:2} iterations)")

    return results


if __name__ == "__main__":
    """Main execution with comprehensive testing"""

    # Run enhanced demonstration
    results = demo_enhanced_cognitive_map()

    print("\n" + "=" * 70)
    print("DEMONSTRATION COMPLETED")
    print("=" * 70)




