"""
RINN pretrainer implementation - Stage 1: Covariance-driven basis orthogonalization.
Based on Paper 2506.17654, Algorithm 3.1.
"""

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib
matplotlib.use('Agg')  # Use non-interactive backend
import matplotlib.pyplot as plt
from typing import Dict, Optional, Tuple, List
from pielm.solver import PIELMNetwork


class RINNPretrainer:
    """
    RINN Stage 1: Covariance-driven basis orthogonalization.
    Optimizes hidden layer parameters to make basis functions orthogonal.
    """
    
    def __init__(self, 
                 network: PIELMNetwork,
                 epsilon: float = 1e-2,
                 learning_rate: float = 1e-3,
                 optimizer_type: str = 'adam'):
        """
        Initialize RINN pretrainer.
        
        Args:
            network: PIELM network to pretrain
            epsilon: Regularization coefficient for diagonal loss
            learning_rate: Learning rate for optimization
            optimizer_type: Optimizer type ('adam', 'sgd', 'rmsprop')
        """
        self.network = network
        self.epsilon = epsilon
        self.learning_rate = learning_rate
        
        # Ensure hidden layer is trainable
        self.network.unfreeze_hidden_layer()
        
        # Initialize optimizer for hidden layer parameters only
        hidden_params = list(self.network.hidden_layer.parameters())
        
        if optimizer_type.lower() == 'adam':
            self.optimizer = optim.Adam(hidden_params, lr=learning_rate)
        elif optimizer_type.lower() == 'sgd':
            self.optimizer = optim.SGD(hidden_params, lr=learning_rate)
        elif optimizer_type.lower() == 'rmsprop':
            self.optimizer = optim.RMSprop(hidden_params, lr=learning_rate)
        else:
            raise ValueError(f"Unsupported optimizer: {optimizer_type}")
        
        # Training history
        self.history = {
            'total_loss': [],
            'ortho_loss': [],
            'diag_loss': [],
            'epoch': []
        }
        
    def compute_covariance_matrix(self, points: torch.Tensor) -> torch.Tensor:
        """
        Compute sample covariance matrix C = (1/(K-1)) * Φ^T * Φ.
        
        Args:
            points: Collocation points (K, input_dim)
            
        Returns:
            torch.Tensor: Covariance matrix (hidden_dim, hidden_dim)
        """
        # Get basis function values
        phi = self.network.get_basis_functions(points)  # (K, hidden_dim)
        K = phi.shape[0]
        
        # Compute sample covariance matrix
        C = torch.matmul(phi.T, phi) / (K - 1)  # (hidden_dim, hidden_dim)
        
        return C
    
    def compute_orthogonality_loss(self, C: torch.Tensor) -> torch.Tensor:
        """
        Compute orthogonality loss as defined in Eq. 2.11:
        L_ortho = ||C ⊙ (J - E)||_F
        
        Args:
            C: Covariance matrix
            
        Returns:
            torch.Tensor: Orthogonality loss
        """
        hidden_dim = C.shape[0]
        
        # J - E: all-ones matrix minus identity
        J = torch.ones_like(C)
        E = torch.eye(hidden_dim, device=C.device, dtype=C.dtype)
        J_minus_E = J - E
        
        # Element-wise product (Hadamard product)
        hadamard_product = C * J_minus_E
        
        # Frobenius norm
        ortho_loss = torch.norm(hadamard_product, p='fro')
        
        return ortho_loss
    
    def compute_diagonal_loss(self, C: torch.Tensor) -> torch.Tensor:
        """
        Compute diagonal loss to encourage unit norm:
        L_diag = Σ |log10(C_ii^2)|
        
        Args:
            C: Covariance matrix
            
        Returns:
            torch.Tensor: Diagonal loss
        """
        # Extract diagonal elements
        diagonal = torch.diagonal(C)
        
        # Compute |log10(C_ii^2)|
        # Add small epsilon to prevent log(0)
        log_terms = torch.abs(torch.log10(diagonal**2 + 1e-12))
        
        diag_loss = torch.sum(log_terms)
        
        return diag_loss
    
    def compute_total_loss(self, points: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        Compute total loss: L_total = ε * L_diag + L_ortho
        
        Args:
            points: Collocation points
            
        Returns:
            Tuple of (total_loss, loss_components)
        """
        # Compute covariance matrix
        C = self.compute_covariance_matrix(points)
        
        # Compute loss components
        ortho_loss = self.compute_orthogonality_loss(C)
        diag_loss = self.compute_diagonal_loss(C)
        
        # Total loss
        total_loss = self.epsilon * diag_loss + ortho_loss
        
        loss_components = {
            'total': total_loss,
            'orthogonality': ortho_loss,
            'diagonal': diag_loss,
            'covariance_matrix': C
        }
        
        return total_loss, loss_components
    
    def train_step(self, points: torch.Tensor) -> Dict[str, float]:
        """
        Perform one training step.
        
        Args:
            points: Collocation points
            
        Returns:
            dict: Loss values for this step
        """
        # Zero gradients
        self.optimizer.zero_grad()
        
        # Compute loss
        total_loss, loss_components = self.compute_total_loss(points)
        
        # Backward pass
        total_loss.backward()
        
        # Update parameters
        self.optimizer.step()
        
        # Return scalar losses
        return {
            'total_loss': total_loss.item(),
            'ortho_loss': loss_components['orthogonality'].item(),
            'diag_loss': loss_components['diagonal'].item()
        }
    
    def train(self, 
              points: torch.Tensor,
              max_epochs: int = 1000,
              print_every: int = 100,
              tolerance: float = 1e-6,
              patience: Optional[int] = None) -> Dict[str, List]:
        """
        Train the network to orthogonalize basis functions.
        
        Args:
            points: Collocation points for computing covariance
            max_epochs: Maximum number of training epochs
            print_every: Print progress every N epochs
            tolerance: Convergence tolerance
            patience: Early stopping patience (None to disable)
            
        Returns:
            dict: Training history
        """
        print(f"Starting RINN pretraining for {max_epochs} epochs...")
        print(f"Using {len(points)} collocation points")
        print(f"Network: {self.network.input_dim} -> {self.network.hidden_dim} -> {self.network.output_dim}")
        
        best_loss = float('inf')
        no_improve = 0
        
        for epoch in range(max_epochs):
            # Training step
            losses = self.train_step(points)
            
            # Update history
            self.history['epoch'].append(epoch)
            self.history['total_loss'].append(losses['total_loss'])
            self.history['ortho_loss'].append(losses['ortho_loss'])
            self.history['diag_loss'].append(losses['diag_loss'])
            
            # Print progress
            if epoch % print_every == 0:
                print(f"Epoch {epoch:6d}: "
                      f"Total={losses['total_loss']:.6e}, "
                      f"Ortho={losses['ortho_loss']:.6e}, "
                      f"Diag={losses['diag_loss']:.6e}")
            
            # Check convergence
            if losses['total_loss'] < tolerance:
                print(f"Converged at epoch {epoch} with loss {losses['total_loss']:.6e}")
                break
            
            # Early stopping
            if patience is not None:
                if losses['total_loss'] < best_loss:
                    best_loss = losses['total_loss']
                    no_improve = 0
                else:
                    no_improve += 1
                    if no_improve >= patience:
                        print(f"Early stopping at epoch {epoch} (no improvement for {patience} epochs)")
                        break
        
        # Freeze hidden layer after pretraining
        self.network.freeze_hidden_layer()
        
        print(f"Pretraining completed. Final loss: {losses['total_loss']:.6e}")
        
        return self.history
    
    def evaluate_orthogonality(self, points: torch.Tensor) -> Dict[str, float]:
        """
        Evaluate the orthogonality of basis functions.
        
        Args:
            points: Points to evaluate orthogonality
            
        Returns:
            dict: Orthogonality metrics
        """
        with torch.no_grad():
            C = self.compute_covariance_matrix(points)
            
            # Compute off-diagonal norm (measure of non-orthogonality)
            mask = ~torch.eye(C.shape[0], dtype=torch.bool, device=C.device)
            off_diag_norm = torch.norm(C[mask])
            
            # Compute diagonal statistics
            diagonal = torch.diagonal(C)
            diag_mean = torch.mean(diagonal)
            diag_std = torch.std(diagonal)
            
            # Condition number
            condition_number = torch.linalg.cond(C)
            
            metrics = {
                'off_diagonal_norm': off_diag_norm.item(),
                'diagonal_mean': diag_mean.item(),
                'diagonal_std': diag_std.item(),
                'condition_number': condition_number.item(),
                'covariance_frobenius_norm': torch.norm(C, p='fro').item()
            }
            
        return metrics
    
    def visualize_covariance_matrix(self, points: torch.Tensor, save_path: Optional[str] = None):
        """
        Visualize the covariance matrix.
        
        Args:
            points: Points to compute covariance
            save_path: Path to save plot
        """
        
        with torch.no_grad():
            C = self.compute_covariance_matrix(points)
            C_np = C.detach().cpu().numpy()
        
        fig, axes = plt.subplots(1, 2, figsize=(12, 5))
        
        # Full covariance matrix
        im1 = axes[0].imshow(C_np, cmap='RdBu_r', aspect='equal')
        axes[0].set_title('Covariance Matrix')
        axes[0].set_xlabel('Basis Function Index')
        axes[0].set_ylabel('Basis Function Index')
        plt.colorbar(im1, ax=axes[0])
        
        # Off-diagonal elements (should be close to zero after training)
        C_off_diag = C_np.copy()
        np.fill_diagonal(C_off_diag, 0)
        im2 = axes[1].imshow(C_off_diag, cmap='RdBu_r', aspect='equal')
        axes[1].set_title('Off-Diagonal Elements')
        axes[1].set_xlabel('Basis Function Index')
        axes[1].set_ylabel('Basis Function Index')
        plt.colorbar(im2, ax=axes[1])
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"Covariance matrix visualization saved to {save_path}")
        else:
            default_path = "covariance_matrix.png"
            plt.savefig(default_path, dpi=300, bbox_inches='tight')
            print(f"Covariance matrix visualization saved to {default_path}")
        
        plt.close()
    
    def get_training_summary(self) -> str:
        """Get a summary of the training process."""
        if not self.history['total_loss']:
            return "No training performed yet."
        
        initial_loss = self.history['total_loss'][0]
        final_loss = self.history['total_loss'][-1]
        total_epochs = len(self.history['total_loss'])
        reduction = (initial_loss - final_loss) / initial_loss * 100
        
        summary = f"""
RINN Pretraining Summary:
========================
Total epochs: {total_epochs}
Initial loss: {initial_loss:.6e}
Final loss: {final_loss:.6e}
Loss reduction: {reduction:.2f}%
Regularization ε: {self.epsilon}
Learning rate: {self.learning_rate}
        """
        
        return summary