"""
RINN with Early Stopping (RINN-es) implementation.
Based on Paper 2506.17654, Algorithm 4.1.
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Callable, Optional, Tuple, List
import copy

from pielm.solver import PIELMNetwork, PIELMSolver
from rinn.pretrainer import RINNPretrainer
from common.metrics import compute_total_pde_loss, MetricsTracker


class RINNEarlyStopping:
    """
    RINN with Early Stopping implementation.
    Alternates between basis orthogonalization and PDE residual evaluation.
    """
    
    def __init__(self,
                 network: PIELMNetwork,
                 pretrainer: Optional[RINNPretrainer] = None,
                 pielm_solver: Optional[PIELMSolver] = None,
                 patience: int = 50):
        """
        Initialize RINN-es solver.
        
        Args:
            network: PIELM network
            pretrainer: RINN pretrainer (will create if None)
            pielm_solver: PIELM solver (will create if None)
            patience: Early stopping patience
        """
        self.network = network
        self.patience = patience
        
        # Initialize components
        if pretrainer is None:
            self.pretrainer = RINNPretrainer(network)
        else:
            self.pretrainer = pretrainer
            
        if pielm_solver is None:
            self.pielm_solver = PIELMSolver(network)
        else:
            self.pielm_solver = pielm_solver
        
        # Best model state
        self.best_network_state = None
        self.best_loss = float('inf')
        self.best_epoch = 0
        
        # Training history
        self.metrics_tracker = MetricsTracker()
        
    def train_single_epoch(self,
                          collocation_points: torch.Tensor,
                          boundary_points: torch.Tensor,
                          pde_operator: Callable,
                          pde_rhs: torch.Tensor,
                          boundary_values: torch.Tensor,
                          boundary_operator: Callable = None) -> Dict[str, float]:
        """
        Perform one epoch of RINN-es training.
        
        Args:
            collocation_points: Interior collocation points
            boundary_points: Boundary points
            pde_operator: PDE operator function
            pde_rhs: Right-hand side values
            boundary_values: Boundary condition values
            boundary_operator: Boundary operator (default: identity)
            
        Returns:
            dict: Metrics for this epoch
        """
        # Stage 1: One step of basis orthogonalization
        self.network.unfreeze_hidden_layer()
        ortho_losses = self.pretrainer.train_step(collocation_points)
        
        # Stage 2: Solve for output weights using current hidden parameters
        self.network.freeze_hidden_layer()
        pielm_info = self.pielm_solver.train(
            collocation_points=collocation_points,
            boundary_points=boundary_points,
            pde_operator=pde_operator,
            pde_rhs=pde_rhs,
            boundary_values=boundary_values,
            boundary_operator=boundary_operator
        )
        
        # Stage 3: Compute PDE residual loss for early stopping
        all_points = torch.cat([collocation_points, boundary_points], dim=0)
        ic_points = None  # No initial conditions for steady-state problems
        ic_values = None
        
        pde_losses = compute_total_pde_loss(
            network=self.network,
            res_points=collocation_points,
            bc_points=boundary_points,
            bc_values=boundary_values,
            pde_operator=pde_operator,
            target_values=pde_rhs,
            ic_points=ic_points,
            ic_values=ic_values
        )
        
        # Combine metrics
        epoch_metrics = {
            'ortho_total_loss': ortho_losses['total_loss'],
            'ortho_ortho_loss': ortho_losses['ortho_loss'],
            'ortho_diag_loss': ortho_losses['diag_loss'],
            'pielm_residual': pielm_info['residual'],
            'pielm_relative_residual': pielm_info['relative_residual'],
            'pielm_condition_number': pielm_info['condition_number'],
            'pde_total_loss': pde_losses['L_pde'].item(),
            'pde_residual_loss': pde_losses['L_res'].item(),
            'pde_boundary_loss': pde_losses['L_bcs'].item(),
        }
        
        return epoch_metrics
    
    def should_stop_early(self, current_pde_loss: float, epoch: int) -> bool:
        """
        Check if training should stop early.
        
        Args:
            current_pde_loss: Current PDE loss
            epoch: Current epoch
            
        Returns:
            bool: True if should stop early
        """
        if current_pde_loss < self.best_loss:
            self.best_loss = current_pde_loss
            self.best_epoch = epoch
            # Save best model state
            self.best_network_state = copy.deepcopy(self.network.state_dict())
            self.no_improve = 0
            return False
        else:
            self.no_improve += 1
            return self.no_improve >= self.patience
    
    def restore_best_model(self):
        """Restore the best model from training."""
        if self.best_network_state is not None:
            self.network.load_state_dict(self.best_network_state)
            print(f"Restored best model from epoch {self.best_epoch} with PDE loss {self.best_loss:.6e}")
    
    def train(self,
              collocation_points: torch.Tensor,
              boundary_points: torch.Tensor,
              pde_operator: Callable,
              pde_rhs: torch.Tensor,
              boundary_values: torch.Tensor,
              boundary_operator: Callable = None,
              max_epochs: int = 1000,
              print_every: int = 10) -> Dict:
        """
        Train RINN-es with early stopping.
        
        Args:
            collocation_points: Interior collocation points
            boundary_points: Boundary points
            pde_operator: PDE operator function
            pde_rhs: Right-hand side values
            boundary_values: Boundary condition values
            boundary_operator: Boundary operator
            max_epochs: Maximum number of epochs
            print_every: Print progress every N epochs
            
        Returns:
            dict: Training results and history
        """
        print(f"Starting RINN-es training for up to {max_epochs} epochs...")
        print(f"Early stopping patience: {self.patience}")
        print(f"Interior points: {len(collocation_points)}, Boundary points: {len(boundary_points)}")
        
        # Initialize tracking variables
        self.best_loss = float('inf')
        self.best_epoch = 0
        self.no_improve = 0
        
        # Training loop
        for epoch in range(max_epochs):
            try:
                # Perform one epoch of training
                epoch_metrics = self.train_single_epoch(
                    collocation_points=collocation_points,
                    boundary_points=boundary_points,
                    pde_operator=pde_operator,
                    pde_rhs=pde_rhs,
                    boundary_values=boundary_values,
                    boundary_operator=boundary_operator
                )
                
                # Update metrics tracker
                self.metrics_tracker.update(epoch_metrics, epoch)
                
                # Print progress
                if epoch % print_every == 0:
                    print(f"Epoch {epoch:6d}: "
                          f"PDE Loss={epoch_metrics['pde_total_loss']:.6e}, "
                          f"PDE Res={epoch_metrics['pde_residual_loss']:.6e}, "
                          f"PDE BC={epoch_metrics['pde_boundary_loss']:.6e}, "
                          f"Ortho={epoch_metrics['ortho_total_loss']:.6e}")
                
                # Early stopping check
                if self.should_stop_early(epoch_metrics['pde_total_loss'], epoch):
                    print(f"\nEarly stopping at epoch {epoch}")
                    print(f"No improvement for {self.patience} consecutive epochs")
                    break
                    
            except Exception as e:
                print(f"Error at epoch {epoch}: {str(e)}")
                break
        
        # Restore best model
        self.restore_best_model()
        
        # Training summary
        training_results = {
            'total_epochs': epoch + 1,
            'best_epoch': self.best_epoch,
            'best_pde_loss': self.best_loss,
            'final_pde_loss': epoch_metrics['pde_total_loss'],
            'training_history': self.metrics_tracker.history,
            'stopped_early': epoch < max_epochs - 1
        }
        
        print(f"\nTraining completed:")
        print(f"  Total epochs: {training_results['total_epochs']}")
        print(f"  Best epoch: {training_results['best_epoch']}")
        print(f"  Best PDE loss: {training_results['best_pde_loss']:.6e}")
        print(f"  Stopped early: {training_results['stopped_early']}")
        
        return training_results
    
    def evaluate_final_state(self, 
                           test_points: torch.Tensor,
                           true_solution: Optional[torch.Tensor] = None) -> Dict[str, float]:
        """
        Evaluate the final trained model.
        
        Args:
            test_points: Test points for evaluation
            true_solution: True solution values (if available)
            
        Returns:
            dict: Evaluation metrics
        """
        from ..common.metrics import evaluate_model_comprehensive
        
        if true_solution is not None:
            metrics = evaluate_model_comprehensive(
                network=self.network,
                test_points=test_points,
                true_solution=true_solution
            )
        else:
            # Basic evaluation without true solution
            with torch.no_grad():
                predictions = self.network(test_points)
            
            metrics = {
                'prediction_mean': torch.mean(predictions).item(),
                'prediction_std': torch.std(predictions).item(),
                'prediction_min': torch.min(predictions).item(),
                'prediction_max': torch.max(predictions).item()
            }
        
        # Add orthogonality metrics
        ortho_metrics = self.pretrainer.evaluate_orthogonality(test_points)
        metrics.update(ortho_metrics)
        
        return metrics
    
    def get_training_summary(self) -> str:
        """Get comprehensive training summary."""
        if not hasattr(self, 'best_loss'):
            return "No training performed yet."
        
        # Get latest metrics
        latest_ortho = self.metrics_tracker.get_latest('ortho_total_loss')
        latest_pde = self.metrics_tracker.get_latest('pde_total_loss')
        
        summary = f"""
RINN-es Training Summary:
========================
Best PDE Loss: {self.best_loss:.6e} (epoch {self.best_epoch})
Latest PDE Loss: {latest_pde:.6e if latest_pde else 'N/A'}
Latest Ortho Loss: {latest_ortho:.6e if latest_ortho else 'N/A'}
Patience: {self.patience}
Network Architecture: {self.network.input_dim} -> {self.network.hidden_dim} -> {self.network.output_dim}
        """
        
        return summary


def create_rinn_es_solver(input_dim: int,
                         hidden_dim: int,
                         activation: str = 'tanh',
                         epsilon: float = 1e-2,
                         learning_rate: float = 1e-3,
                         patience: int = 50,
                         seed: Optional[int] = None) -> RINNEarlyStopping:
    """
    Create and initialize a RINN-es solver.
    
    Args:
        input_dim: Input dimension
        hidden_dim: Number of hidden neurons
        activation: Activation function
        epsilon: Regularization coefficient for diagonal loss
        learning_rate: Learning rate for pretrainer
        patience: Early stopping patience
        seed: Random seed
        
    Returns:
        RINNEarlyStopping: Initialized solver
    """
    if seed is not None:
        torch.manual_seed(seed)
    
    # Create network
    network = PIELMNetwork(input_dim, hidden_dim, activation=activation)
    
    # Create pretrainer
    pretrainer = RINNPretrainer(
        network=network,
        epsilon=epsilon,
        learning_rate=learning_rate
    )
    
    # Create PIELM solver
    pielm_solver = PIELMSolver(network)
    
    # Create RINN-es solver
    rinn_es = RINNEarlyStopping(
        network=network,
        pretrainer=pretrainer,
        pielm_solver=pielm_solver,
        patience=patience
    )
    
    return rinn_es


class RINNComparison:
    """Utility class for comparing PIELM, RINN, and RINN-es methods."""
    
    @staticmethod
    def run_comparison(problem_config: Dict,
                      network_configs: List[Dict],
                      n_trials: int = 5) -> Dict:
        """
        Run comparison between PIELM, RINN, and RINN-es.
        
        Args:
            problem_config: Problem configuration
            network_configs: List of network configurations to try
            n_trials: Number of random trials
            
        Returns:
            dict: Comparison results
        """
        from ..pielm.solver import create_pielm_solver
        
        methods = ['PIELM', 'RINN-es']
        results = {method: [] for method in methods}
        
        for trial in range(n_trials):
            print(f"\n--- Trial {trial + 1}/{n_trials} ---")
            
            for config in network_configs:
                hidden_dim = config['hidden_dim']
                seed = trial * 42  # Different seed for each trial
                
                # PIELM
                print(f"Running PIELM (hidden_dim={hidden_dim})...")
                pielm_solver = create_pielm_solver(
                    input_dim=2,
                    hidden_dim=hidden_dim,
                    seed=seed
                )
                
                pielm_info = pielm_solver.train(**problem_config)
                
                # Evaluate PIELM
                test_points = problem_config.get('test_points')
                true_solution = problem_config.get('true_solution')
                
                if test_points is not None and true_solution is not None:
                    from ..common.metrics import evaluate_model_comprehensive
                    pielm_metrics = evaluate_model_comprehensive(
                        pielm_solver.network, test_points, true_solution
                    )
                else:
                    pielm_metrics = {'rel_l2_error': float('inf')}
                
                results['PIELM'].append({
                    'hidden_dim': hidden_dim,
                    'trial': trial,
                    'rel_l2_error': pielm_metrics['rel_l2_error'],
                    'training_info': pielm_info
                })
                
                # RINN-es
                print(f"Running RINN-es (hidden_dim={hidden_dim})...")
                rinn_es = create_rinn_es_solver(
                    input_dim=2,
                    hidden_dim=hidden_dim,
                    seed=seed
                )
                
                # Extract problem config for RINN-es
                rinn_config = problem_config.copy()
                rinn_config.pop('test_points', None)
                rinn_config.pop('true_solution', None)
                
                rinn_results = rinn_es.train(**rinn_config)
                
                # Evaluate RINN-es
                if test_points is not None and true_solution is not None:
                    rinn_metrics = evaluate_model_comprehensive(
                        rinn_es.network, test_points, true_solution
                    )
                else:
                    rinn_metrics = {'rel_l2_error': float('inf')}
                
                results['RINN-es'].append({
                    'hidden_dim': hidden_dim,
                    'trial': trial,
                    'rel_l2_error': rinn_metrics['rel_l2_error'],
                    'training_info': rinn_results
                })
        
        return results
    
    @staticmethod
    def summarize_results(results: Dict) -> None:
        """Print summary of comparison results."""
        print("\n" + "="*60)
        print("COMPARISON SUMMARY")
        print("="*60)
        
        for method, method_results in results.items():
            if not method_results:
                continue
                
            errors = [r['rel_l2_error'] for r in method_results]
            errors = [e for e in errors if e != float('inf')]
            
            if errors:
                mean_error = np.mean(errors)
                std_error = np.std(errors)
                min_error = np.min(errors)
                max_error = np.max(errors)
                
                print(f"\n{method}:")
                print(f"  Mean L2 Error: {mean_error:.6e} ± {std_error:.6e}")
                print(f"  Min L2 Error:  {min_error:.6e}")
                print(f"  Max L2 Error:  {max_error:.6e}")
                print(f"  Success Rate:  {len(errors)}/{len(method_results)}")
            else:
                print(f"\n{method}: No successful runs")