"""
Multi-term loss function for AE-PINN with PDE residual, boundary, and interface conditions.

This module implements the comprehensive loss function from Equation 2.9 of paper 2506.18332:

L = τ (1/N_I) Σ |∇(α∇u_nn) - f|² 
  + τ_b (1/N_b) Σ |u_nn - h|²
  + τ_Γ¹ (1/N_Γ¹) Σ |[[u_nn]] - β|²
  + τ_Γ² (1/N_Γ²) Σ |[[α∇u_nn]]·n - ρ|²

where:
- First term: PDE residual loss in interior
- Second term: Boundary condition loss  
- Third term: Interface jump condition loss for solution
- Fourth term: Interface flux jump condition loss
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Callable, Dict, List, Tuple, Optional


class AEPINNLoss(nn.Module):
    """
    Multi-term loss function for Attention-Enhanced Physics-Informed Neural Networks.
    
    Implements all loss components for elliptic interface problems including:
    - PDE residual in each subdomain
    - Dirichlet boundary conditions
    - Interface jump conditions for solution and flux
    """
    
    def __init__(self, alpha_func, f_func, level_set_func, 
                 tau_pde=1.0, tau_bc=100.0, tau_interface_u=100.0, tau_interface_flux=100.0):
        """
        Initialize AE-PINN loss function.
        
        Args:
            alpha_func (callable): Diffusion coefficient α(x,y) (can be discontinuous)
            f_func (callable): Source term f(x,y)
            level_set_func (callable): Level-set function φ(x,y) defining interface
            tau_pde (float): Weight for PDE residual loss
            tau_bc (float): Weight for boundary condition loss
            tau_interface_u (float): Weight for interface jump condition (solution)
            tau_interface_flux (float): Weight for interface flux jump condition
        """
        super(AEPINNLoss, self).__init__()
        
        self.alpha_func = alpha_func
        self.f_func = f_func
        self.level_set_func = level_set_func
        
        # Loss weights
        self.tau_pde = tau_pde
        self.tau_bc = tau_bc
        self.tau_interface_u = tau_interface_u
        self.tau_interface_flux = tau_interface_flux
        
        # Store loss components for monitoring
        self.loss_components = {}
    
    def pde_residual_loss(self, model, x_interior):
        """
        Compute PDE residual loss: |∇(α∇u) - f|²
        
        For the elliptic PDE: ∇·(α∇u) = f
        
        Args:
            model: Neural network model (composite AE-PINN)
            x_interior (torch.Tensor): Interior collocation points
            
        Returns:
            torch.Tensor: PDE residual loss
        """
        x_interior.requires_grad_(True)
        
        # Get network output and derivatives
        u, u_x, u_y, u_xx, u_yy, u_xy = model.compute_gradients(x_interior)
        
        # Evaluate diffusion coefficient α(x,y)
        alpha_vals = self.alpha_func(x_interior[:, 0], x_interior[:, 1]).unsqueeze(1)
        
        # Compute α∇u
        alpha_u_x = alpha_vals * u_x
        alpha_u_y = alpha_vals * u_y
        
        # Compute gradients of α if α is not constant
        # For discontinuous α, we need ∇α which may have jumps
        try:
            alpha_x = torch.autograd.grad(
                alpha_vals, x_interior, grad_outputs=torch.ones_like(alpha_vals),
                create_graph=True, retain_graph=True, allow_unused=True
            )[0]
            
            if alpha_x is not None:
                alpha_x_val = alpha_x[:, 0:1]
                alpha_y_val = alpha_x[:, 1:2]
            else:
                # Constant α case
                alpha_x_val = torch.zeros_like(u_x)
                alpha_y_val = torch.zeros_like(u_y)
        except:
            # Fallback for constant α
            alpha_x_val = torch.zeros_like(u_x)
            alpha_y_val = torch.zeros_like(u_y)
        
        # Compute ∇·(α∇u) = α∇²u + ∇α·∇u
        div_alpha_grad_u = (alpha_vals * (u_xx + u_yy) + 
                           alpha_x_val * u_x + alpha_y_val * u_y)
        
        # Evaluate source term f(x,y)
        f_vals = self.f_func(x_interior[:, 0], x_interior[:, 1]).unsqueeze(1)
        
        # PDE residual: ∇·(α∇u) - f
        pde_residual = div_alpha_grad_u - f_vals
        
        # Mean squared residual
        loss_pde = torch.mean(pde_residual**2)
        
        return loss_pde
    
    def boundary_condition_loss(self, model, x_boundary, u_boundary):
        """
        Compute boundary condition loss: |u - h|²
        
        Args:
            model: Neural network model
            x_boundary (torch.Tensor): Boundary points
            u_boundary (torch.Tensor): Boundary values h(x)
            
        Returns:
            torch.Tensor: Boundary condition loss
        """
        # Network prediction at boundary
        u_pred = model(x_boundary)
        
        # Boundary condition residual
        bc_residual = u_pred - u_boundary
        
        # Mean squared error
        loss_bc = torch.mean(bc_residual**2)
        
        return loss_bc
    
    def interface_jump_loss(self, model, x_interface, jump_values):
        """
        Compute interface jump condition loss: |[[u]] - β|²
        
        The jump [[u]] = u⁺ - u⁻ across the interface.
        
        Args:
            model: Neural network model
            x_interface (torch.Tensor): Interface points
            jump_values (torch.Tensor): Prescribed jump values β
            
        Returns:
            torch.Tensor: Interface jump loss
        """
        # For simplicity, we evaluate the jump using finite differences
        # In practice, this could be done more accurately with separate
        # evaluations on each side of the interface
        
        # Small perturbation for finite difference
        eps = 1e-6
        
        # Compute interface normal (gradient of level-set function)
        x_interface.requires_grad_(True)
        phi_vals = self.level_set_func(x_interface[:, 0], x_interface[:, 1])
        
        phi_grad = torch.autograd.grad(
            phi_vals, x_interface, grad_outputs=torch.ones_like(phi_vals),
            create_graph=True, retain_graph=True
        )[0]
        
        # Normalize normal vector
        normal_norm = torch.sqrt(phi_grad[:, 0:1]**2 + phi_grad[:, 1:2]**2 + 1e-10)
        normal_x = phi_grad[:, 0:1] / normal_norm
        normal_y = phi_grad[:, 1:2] / normal_norm
        
        # Points slightly on each side of interface
        x_plus = x_interface + eps * torch.cat([normal_x, normal_y], dim=1)
        x_minus = x_interface - eps * torch.cat([normal_x, normal_y], dim=1)
        
        # Evaluate solution on both sides
        u_plus = model(x_plus)
        u_minus = model(x_minus)
        
        # Compute jump [[u]] = u⁺ - u⁻
        jump_computed = u_plus - u_minus
        
        # Jump condition residual
        jump_residual = jump_computed - jump_values
        
        # Mean squared error
        loss_jump = torch.mean(jump_residual**2)
        
        return loss_jump
    
    def interface_flux_jump_loss(self, model, x_interface, flux_jump_values):
        """
        Compute interface flux jump condition loss: |[[α∇u]]·n - ρ|²
        
        The flux jump [[α∇u]]·n = (α∇u)⁺·n - (α∇u)⁻·n across the interface.
        
        Args:
            model: Neural network model
            x_interface (torch.Tensor): Interface points
            flux_jump_values (torch.Tensor): Prescribed flux jump values ρ
            
        Returns:
            torch.Tensor: Interface flux jump loss
        """
        # Small perturbation for finite difference
        eps = 1e-6
        
        # Compute interface normal
        x_interface.requires_grad_(True)
        phi_vals = self.level_set_func(x_interface[:, 0], x_interface[:, 1])
        
        phi_grad = torch.autograd.grad(
            phi_vals, x_interface, grad_outputs=torch.ones_like(phi_vals),
            create_graph=True, retain_graph=True
        )[0]
        
        # Normalize normal vector
        normal_norm = torch.sqrt(phi_grad[:, 0:1]**2 + phi_grad[:, 1:2]**2 + 1e-10)
        normal_x = phi_grad[:, 0:1] / normal_norm
        normal_y = phi_grad[:, 1:2] / normal_norm
        
        # Points slightly on each side of interface
        x_plus = x_interface + eps * torch.cat([normal_x, normal_y], dim=1)
        x_minus = x_interface - eps * torch.cat([normal_x, normal_y], dim=1)
        
        # Compute gradients on both sides
        _, u_x_plus, u_y_plus, _, _, _ = model.compute_gradients(x_plus)
        _, u_x_minus, u_y_minus, _, _, _ = model.compute_gradients(x_minus)
        
        # Evaluate α on both sides
        alpha_plus = self.alpha_func(x_plus[:, 0], x_plus[:, 1]).unsqueeze(1)
        alpha_minus = self.alpha_func(x_minus[:, 0], x_minus[:, 1]).unsqueeze(1)
        
        # Compute normal flux on both sides: α∇u·n
        flux_plus = alpha_plus * (u_x_plus * normal_x + u_y_plus * normal_y)
        flux_minus = alpha_minus * (u_x_minus * normal_x + u_y_minus * normal_y)
        
        # Compute flux jump [[α∇u]]·n = flux⁺ - flux⁻
        flux_jump_computed = flux_plus - flux_minus
        
        # Flux jump condition residual
        flux_residual = flux_jump_computed - flux_jump_values
        
        # Mean squared error
        loss_flux = torch.mean(flux_residual**2)
        
        return loss_flux
    
    def forward(self, model, training_data):
        """
        Compute total loss function.
        
        Args:
            model: Neural network model
            training_data (dict): Dictionary containing:
                - 'x_interior': interior collocation points
                - 'x_boundary': boundary points  
                - 'u_boundary': boundary values
                - 'x_interface': interface points
                - 'jump_values': interface jump values β
                - 'flux_jump_values': interface flux jump values ρ
                
        Returns:
            torch.Tensor: Total weighted loss
        """
        total_loss = 0.0
        self.loss_components = {}
        
        # PDE residual loss
        if 'x_interior' in training_data:
            loss_pde = self.pde_residual_loss(model, training_data['x_interior'])
            self.loss_components['pde'] = loss_pde.item()
            total_loss += self.tau_pde * loss_pde
        
        # Boundary condition loss
        if 'x_boundary' in training_data and 'u_boundary' in training_data:
            loss_bc = self.boundary_condition_loss(
                model, training_data['x_boundary'], training_data['u_boundary']
            )
            self.loss_components['boundary'] = loss_bc.item()
            total_loss += self.tau_bc * loss_bc
        
        # Interface jump condition loss
        if 'x_interface' in training_data and 'jump_values' in training_data:
            loss_interface = self.interface_jump_loss(
                model, training_data['x_interface'], training_data['jump_values']
            )
            self.loss_components['interface_jump'] = loss_interface.item()
            total_loss += self.tau_interface_u * loss_interface
        
        # Interface flux jump condition loss
        if 'x_interface' in training_data and 'flux_jump_values' in training_data:
            loss_flux = self.interface_flux_jump_loss(
                model, training_data['x_interface'], training_data['flux_jump_values']
            )
            self.loss_components['flux_jump'] = loss_flux.item()
            total_loss += self.tau_interface_flux * loss_flux
        
        self.loss_components['total'] = total_loss.item()
        
        return total_loss


class AdaptiveLossWeights(nn.Module):
    """
    Adaptive loss weight scheduler for better training dynamics.
    
    Automatically adjusts loss weights during training based on
    the relative magnitudes of different loss components.
    """
    
    def __init__(self, initial_weights, adaptation_rate=0.1, min_weight=1e-3, max_weight=1e3):
        """
        Initialize adaptive loss weights.
        
        Args:
            initial_weights (dict): Initial loss weights
            adaptation_rate (float): Rate of weight adaptation
            min_weight (float): Minimum weight value
            max_weight (float): Maximum weight value
        """
        super(AdaptiveLossWeights, self).__init__()
        
        self.weights = initial_weights.copy()
        self.adaptation_rate = adaptation_rate
        self.min_weight = min_weight
        self.max_weight = max_weight
        
        # Track loss history for adaptation
        self.loss_history = {key: [] for key in initial_weights.keys()}
    
    def update_weights(self, loss_components):
        """
        Update loss weights based on current loss components.
        
        Args:
            loss_components (dict): Current loss component values
        """
        # Update loss history
        for key, value in loss_components.items():
            if key in self.loss_history:
                self.loss_history[key].append(value)
                
                # Keep only recent history
                if len(self.loss_history[key]) > 100:
                    self.loss_history[key] = self.loss_history[key][-100:]
        
        # Adapt weights based on relative loss magnitudes
        if len(self.loss_history['pde']) > 10:  # Wait for some history
            avg_losses = {}
            for key in ['pde', 'boundary', 'interface_jump', 'flux_jump']:
                if key in self.loss_history and len(self.loss_history[key]) > 0:
                    avg_losses[key] = np.mean(self.loss_history[key][-10:])
            
            # Adjust weights to balance loss components
            if len(avg_losses) > 1:
                target_loss = np.mean(list(avg_losses.values()))
                
                for key in avg_losses:
                    if key in self.weights:
                        ratio = target_loss / (avg_losses[key] + 1e-10)
                        adjustment = 1.0 + self.adaptation_rate * (ratio - 1.0)
                        
                        self.weights[key] *= adjustment
                        self.weights[key] = np.clip(
                            self.weights[key], self.min_weight, self.max_weight
                        )


def create_sample_problem_functions():
    """
    Create sample problem functions for testing.
    
    Returns:
        tuple: (alpha_func, f_func, level_set_func) for a simple test problem
    """
    def alpha_func(x, y):
        """Piecewise constant diffusion coefficient."""
        # Create level-set
        phi = torch.sqrt(x**2 + y**2) - 0.5
        
        # α = 1 inside circle, α = 2 outside
        alpha = torch.where(phi < 0, 1.0, 2.0)
        return alpha
    
    def f_func(x, y):
        """Constant source term."""
        return torch.ones_like(x)
    
    def level_set_func(x, y):
        """Circular interface."""
        return torch.sqrt(x**2 + y**2) - 0.5
    
    return alpha_func, f_func, level_set_func


if __name__ == "__main__":
    # Test the loss function implementation
    print("Testing AE-PINN loss function...")
    
    # Create sample functions
    alpha_func, f_func, level_set_func = create_sample_problem_functions()
    
    # Create loss function
    loss_fn = AEPINNLoss(alpha_func, f_func, level_set_func)
    
    # Create dummy model and data
    from networks.fcnn import FCNN
    from networks.ia_nn import IANN, CompositeAEPINN
    
    fcnn = FCNN(input_dim=2, output_dim=1, hidden_layers=[20, 20])
    ia_nn = IANN(input_dim=2, hidden_dim=20, num_attention_layers=2)
    ia_nn.set_level_set_function(level_set_func)
    
    model = CompositeAEPINN(fcnn, ia_nn)
    
    # Create sample training data
    training_data = {
        'x_interior': torch.randn(100, 2, requires_grad=True),
        'x_boundary': torch.randn(50, 2, requires_grad=True),
        'u_boundary': torch.randn(50, 1),
        'x_interface': torch.randn(30, 2, requires_grad=True),
        'jump_values': torch.randn(30, 1),
        'flux_jump_values': torch.randn(30, 1)
    }
    
    # Compute loss
    total_loss = loss_fn(model, training_data)
    print(f"Total loss: {total_loss.item():.6f}")
    print(f"Loss components: {loss_fn.loss_components}")
    
    # Test adaptive weights
    adaptive_weights = AdaptiveLossWeights({
        'pde': 1.0, 'boundary': 100.0, 
        'interface_jump': 100.0, 'flux_jump': 100.0
    })
    
    adaptive_weights.update_weights(loss_fn.loss_components)
    print(f"Updated weights: {adaptive_weights.weights}")
    
    print("Loss function tests completed successfully!")