"""
Error metrics and evaluation functions for RINN implementation.
Based on equations from Paper 2506.17654 (Eq. 2.7).
"""

import torch
import numpy as np


def relative_l2_error(u_true, u_pred):
    """
    Compute relative L2 error as defined in Eq. 2.7:
    E_{L^2} = sqrt(sum|u_true - u_pred|^2) / sqrt(sum|u_true|^2)
    
    Args:
        u_true: True solution values
        u_pred: Predicted solution values
        
    Returns:
        float: Relative L2 error
    """
    if isinstance(u_true, torch.Tensor):
        u_true = u_true.detach().cpu().numpy()
    if isinstance(u_pred, torch.Tensor):
        u_pred = u_pred.detach().cpu().numpy()
    
    numerator = np.sqrt(np.sum(np.abs(u_true - u_pred)**2))
    denominator = np.sqrt(np.sum(np.abs(u_true)**2))
    
    if denominator == 0:
        return float('inf') if numerator > 0 else 0.0
    
    return numerator / denominator


def mean_absolute_error(u_true, u_pred):
    """
    Compute mean absolute error as defined in Eq. 2.7:
    E_{L^1} = (1/N) * sum|u_true - u_pred|
    
    Args:
        u_true: True solution values
        u_pred: Predicted solution values
        
    Returns:
        float: Mean absolute error
    """
    if isinstance(u_true, torch.Tensor):
        u_true = u_true.detach().cpu().numpy()
    if isinstance(u_pred, torch.Tensor):
        u_pred = u_pred.detach().cpu().numpy()
    
    return np.mean(np.abs(u_true - u_pred))


def max_absolute_error(u_true, u_pred):
    """
    Compute maximum absolute error.
    
    Args:
        u_true: True solution values
        u_pred: Predicted solution values
        
    Returns:
        float: Maximum absolute error
    """
    if isinstance(u_true, torch.Tensor):
        u_true = u_true.detach().cpu().numpy()
    if isinstance(u_pred, torch.Tensor):
        u_pred = u_pred.detach().cpu().numpy()
    
    return np.max(np.abs(u_true - u_pred))


def root_mean_square_error(u_true, u_pred):
    """
    Compute root mean square error.
    
    Args:
        u_true: True solution values
        u_pred: Predicted solution values
        
    Returns:
        float: RMSE
    """
    if isinstance(u_true, torch.Tensor):
        u_true = u_true.detach().cpu().numpy()
    if isinstance(u_pred, torch.Tensor):
        u_pred = u_pred.detach().cpu().numpy()
    
    return np.sqrt(np.mean((u_true - u_pred)**2))


def compute_pde_residual_loss(network, points, pde_operator, target_values=None):
    """
    Compute PDE residual loss as defined in Eq. 3.1:
    L_res = sqrt((1/K_res) * sum|A*u(x) - f(x)|^2)
    
    Args:
        network: Neural network
        points: Collocation points
        pde_operator: Function that applies PDE operator
        target_values: Target values f(x), if None assumes homogeneous
        
    Returns:
        torch.Tensor: PDE residual loss
    """
    points.requires_grad_(True)
    u = network(points)
    
    # Apply PDE operator (e.g., Laplacian for Poisson equation)
    residual = pde_operator(network, points)
    
    if target_values is not None:
        residual = residual - target_values
    
    # Compute RMSE of residual
    loss = torch.sqrt(torch.mean(residual**2))
    return loss


def compute_boundary_loss(network, boundary_points, boundary_values):
    """
    Compute boundary condition loss as defined in Eq. 3.1:
    L_bcs = sqrt((1/K_bcs) * sum|B*u(x) - g(x)|^2)
    
    Args:
        network: Neural network
        boundary_points: Points on domain boundary
        boundary_values: Target boundary values
        
    Returns:
        torch.Tensor: Boundary condition loss
    """
    u_boundary = network(boundary_points)
    loss = torch.sqrt(torch.mean((u_boundary - boundary_values)**2))
    return loss


def compute_initial_condition_loss(network, initial_points, initial_values):
    """
    Compute initial condition loss as defined in Eq. 3.1:
    L_ics = sqrt((1/K_ics) * sum|u(x) - h(x)|^2)
    
    Args:
        network: Neural network
        initial_points: Points for initial conditions
        initial_values: Target initial values
        
    Returns:
        torch.Tensor: Initial condition loss
    """
    u_initial = network(initial_points)
    loss = torch.sqrt(torch.mean((u_initial - initial_values)**2))
    return loss


def compute_total_pde_loss(network, res_points, bc_points, bc_values, 
                          pde_operator, target_values=None, ic_points=None, ic_values=None):
    """
    Compute total PDE loss as defined in Eq. 3.1:
    L_pde = L_res + L_bcs + L_ics
    
    Args:
        network: Neural network
        res_points: Residual (interior) points
        bc_points: Boundary points
        bc_values: Boundary values
        pde_operator: PDE operator function
        target_values: Target values for PDE (f in Au=f)
        ic_points: Initial condition points (optional)
        ic_values: Initial condition values (optional)
        
    Returns:
        dict: Dictionary with individual and total losses
    """
    # Residual loss
    loss_res = compute_pde_residual_loss(network, res_points, pde_operator, target_values)
    
    # Boundary loss
    loss_bcs = compute_boundary_loss(network, bc_points, bc_values)
    
    # Initial condition loss (if provided)
    loss_ics = torch.tensor(0.0, dtype=torch.float32)
    if ic_points is not None and ic_values is not None:
        loss_ics = compute_initial_condition_loss(network, ic_points, ic_values)
    
    # Total PDE loss
    loss_total = loss_res + loss_bcs + loss_ics
    
    return {
        'L_res': loss_res,
        'L_bcs': loss_bcs,
        'L_ics': loss_ics,
        'L_pde': loss_total
    }


def evaluate_model_comprehensive(network, test_points, true_solution, 
                                pde_operator=None, bc_points=None, bc_values=None):
    """
    Comprehensive model evaluation with multiple metrics.
    
    Args:
        network: Trained neural network
        test_points: Test points for evaluation
        true_solution: True solution values at test points
        pde_operator: PDE operator for residual computation (optional)
        bc_points: Boundary points (optional)
        bc_values: Boundary values (optional)
        
    Returns:
        dict: Dictionary with comprehensive evaluation metrics
    """
    with torch.no_grad():
        predictions = network(test_points)
    
    metrics = {
        'rel_l2_error': relative_l2_error(true_solution, predictions),
        'mae': mean_absolute_error(true_solution, predictions),
        'max_error': max_absolute_error(true_solution, predictions),
        'rmse': root_mean_square_error(true_solution, predictions)
    }
    
    # Add PDE residual if operator provided
    if pde_operator is not None:
        pde_residual = compute_pde_residual_loss(network, test_points, pde_operator)
        metrics['pde_residual'] = pde_residual.item()
    
    # Add boundary error if boundary conditions provided
    if bc_points is not None and bc_values is not None:
        bc_loss = compute_boundary_loss(network, bc_points, bc_values)
        metrics['boundary_error'] = bc_loss.item()
    
    return metrics


def print_metrics(metrics, title="Model Evaluation"):
    """Print metrics in a formatted table."""
    print(f"\n{title}")
    print("=" * len(title))
    for name, value in metrics.items():
        if isinstance(value, (int, float)):
            print(f"{name:<20}: {value:.6e}")
        else:
            print(f"{name:<20}: {value}")
    print()


class MetricsTracker:
    """Class to track metrics during training."""
    
    def __init__(self):
        self.history = {}
    
    def update(self, metrics_dict, epoch):
        """Update metrics for given epoch."""
        for name, value in metrics_dict.items():
            if name not in self.history:
                self.history[name] = []
            self.history[name].append((epoch, value))
    
    def get_best(self, metric_name, minimize=True):
        """Get best value and epoch for a metric."""
        if metric_name not in self.history:
            return None, None
        
        values = [v for _, v in self.history[metric_name]]
        epochs = [e for e, _ in self.history[metric_name]]
        
        if minimize:
            best_idx = np.argmin(values)
        else:
            best_idx = np.argmax(values)
        
        return values[best_idx], epochs[best_idx]
    
    def get_latest(self, metric_name):
        """Get latest value for a metric."""
        if metric_name not in self.history or len(self.history[metric_name]) == 0:
            return None
        return self.history[metric_name][-1][1]