"""
Interface-Attention Neural Network (IA-NN) for the discontinuous component ω in AE-PINN.

This module implements the core innovation of AE-PINN: an attention-enhanced neural network
that can capture discontinuous behavior across interfaces in elliptic problems.

Based on Equation 2.6 from paper 2506.18332:
H^0 = x
Q^n = W^n_Q H^(n-1) + b^n_Q
K^n = W^n_K H^(n-1) + b^n_K  
V^n = W^n_V H^(n-1) + b^n_V
Z^n = σ(W^n(σ(Q^n ⊗ K^n) ⊗ V^n) + b^n)
H^n = (1-Z^n) ⊗ T(φ(x)) + Z^n ⊗ H^(n-1)
u_IA(x) = W^(M+1) H^M + b^(M+1)

where T(φ(x)) is the transmitter network and φ(x) is the level-set function.
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Callable, Optional


class TransmitterNetwork(nn.Module):
    """
    Transmitter network T(φ(x)) that processes the level-set function.
    
    The transmitter network encodes information about the interface geometry
    through the level-set function φ(x).
    """
    
    def __init__(self, input_dim=1, output_dim=50, hidden_layers=None, activation='tanh'):
        """
        Initialize the transmitter network.
        
        Args:
            input_dim (int): Input dimension (1 for level-set function φ(x))
            output_dim (int): Output dimension (should match IA-NN hidden dimension)
            hidden_layers (list): List of hidden layer sizes
            activation (str): Activation function
        """
        super(TransmitterNetwork, self).__init__()
        
        if hidden_layers is None:
            hidden_layers = [20, 30]  # Default small network
            
        layers = []
        layer_sizes = [input_dim] + hidden_layers + [output_dim]
        
        for i in range(len(layer_sizes) - 1):
            layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
            
        self.layers = nn.ModuleList(layers)
        self.activation = self._get_activation(activation)
        
        # Initialize weights
        self._initialize_weights()
    
    def _get_activation(self, activation):
        """Get activation function."""
        if activation == 'tanh':
            return torch.tanh
        elif activation == 'relu':
            return torch.relu
        elif activation == 'sigmoid':
            return torch.sigmoid
        elif activation == 'sin':
            return torch.sin
        else:
            raise ValueError(f"Unsupported activation function: {activation}")
    
    def _initialize_weights(self):
        """Initialize network weights."""
        for layer in self.layers[:-1]:
            nn.init.xavier_normal_(layer.weight)
            nn.init.zeros_(layer.bias)
        
        # Output layer
        nn.init.xavier_normal_(self.layers[-1].weight, gain=0.1)
        nn.init.zeros_(self.layers[-1].bias)
    
    def forward(self, phi):
        """
        Forward pass: T(φ(x)) = σ(W^T φ(x) + b^T)
        
        Args:
            phi (torch.Tensor): Level-set function values, shape (batch_size, 1)
            
        Returns:
            torch.Tensor: Transmitted features, shape (batch_size, output_dim)
        """
        h = phi
        
        # Hidden layers
        for i in range(len(self.layers) - 1):
            h = self.layers[i](h)
            h = self.activation(h)
        
        # Output layer
        output = self.layers[-1](h)
        
        return output


class AttentionModule(nn.Module):
    """
    Single attention module for the IA-NN.
    
    Implements one layer of the attention mechanism:
    Q^n = W^n_Q H^(n-1) + b^n_Q
    K^n = W^n_K H^(n-1) + b^n_K
    V^n = W^n_V H^(n-1) + b^n_V
    Z^n = σ(W^n(σ(Q^n ⊗ K^n) ⊗ V^n) + b^n)
    H^n = (1-Z^n) ⊗ T(φ(x)) + Z^n ⊗ H^(n-1)
    """
    
    def __init__(self, hidden_dim, activation='tanh'):
        """
        Initialize attention module.
        
        Args:
            hidden_dim (int): Hidden dimension
            activation (str): Activation function
        """
        super(AttentionModule, self).__init__()
        
        self.hidden_dim = hidden_dim
        
        # Q, K, V linear transformations
        self.W_Q = nn.Linear(hidden_dim, hidden_dim)
        self.W_K = nn.Linear(hidden_dim, hidden_dim)
        self.W_V = nn.Linear(hidden_dim, hidden_dim)
        
        # Gate network for fusion weight Z^n
        self.gate_network = nn.Linear(hidden_dim, hidden_dim)
        
        # Activation function
        self.activation = self._get_activation(activation)
        
        # Initialize weights
        self._initialize_weights()
    
    def _get_activation(self, activation):
        """Get activation function."""
        if activation == 'tanh':
            return torch.tanh
        elif activation == 'relu':
            return torch.relu
        elif activation == 'sigmoid':
            return torch.sigmoid
        elif activation == 'sin':
            return torch.sin
        else:
            raise ValueError(f"Unsupported activation function: {activation}")
    
    def _initialize_weights(self):
        """Initialize weights with careful scaling."""
        nn.init.xavier_normal_(self.W_Q.weight)
        nn.init.xavier_normal_(self.W_K.weight)
        nn.init.xavier_normal_(self.W_V.weight)
        nn.init.xavier_normal_(self.gate_network.weight, gain=0.1)
        
        nn.init.zeros_(self.W_Q.bias)
        nn.init.zeros_(self.W_K.bias)
        nn.init.zeros_(self.W_V.bias)
        nn.init.zeros_(self.gate_network.bias)
    
    def forward(self, H_prev, T_phi):
        """
        Forward pass through attention module.
        
        Args:
            H_prev (torch.Tensor): Previous hidden state H^(n-1)
            T_phi (torch.Tensor): Transmitter output T(φ(x))
            
        Returns:
            torch.Tensor: Updated hidden state H^n
        """
        # Compute Q, K, V
        Q = self.W_Q(H_prev)  # Q^n = W^n_Q H^(n-1) + b^n_Q
        K = self.W_K(H_prev)  # K^n = W^n_K H^(n-1) + b^n_K
        V = self.W_V(H_prev)  # V^n = W^n_V H^(n-1) + b^n_V
        
        # Attention-like computation: σ(Q^n ⊗ K^n) ⊗ V^n
        QK = self.activation(Q * K)  # Element-wise product and activation
        attention_features = QK * V   # Element-wise product with V
        
        # Compute gate weights: Z^n = σ(W^n(...) + b^n)
        Z = torch.sigmoid(self.gate_network(attention_features))
        
        # Feature fusion: H^n = (1-Z^n) ⊗ T(φ(x)) + Z^n ⊗ H^(n-1)
        H_new = (1 - Z) * T_phi + Z * H_prev
        
        return H_new


class IANN(nn.Module):
    """
    Interface-Attention Neural Network (IA-NN) for discontinuous component.
    
    This network captures the discontinuous behavior ω across interfaces
    using attention mechanisms and level-set information.
    """
    
    def __init__(self, input_dim=2, hidden_dim=50, num_attention_layers=3, 
                 output_dim=1, level_set_func=None, activation='tanh'):
        """
        Initialize IA-NN.
        
        Args:
            input_dim (int): Input spatial dimension (typically 2)
            hidden_dim (int): Hidden layer dimension
            num_attention_layers (int): Number of attention modules M
            output_dim (int): Output dimension (typically 1)
            level_set_func (callable): Level-set function φ(x, y)
            activation (str): Activation function
        """
        super(IANN, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.num_attention_layers = num_attention_layers
        self.output_dim = output_dim
        self.level_set_func = level_set_func
        
        # Input projection: maps x to initial hidden state H^0
        self.input_projection = nn.Linear(input_dim, hidden_dim)
        
        # Transmitter network T(φ(x))
        self.transmitter = TransmitterNetwork(
            input_dim=1, 
            output_dim=hidden_dim,
            activation=activation
        )
        
        # Stack of attention modules
        self.attention_modules = nn.ModuleList([
            AttentionModule(hidden_dim, activation) 
            for _ in range(num_attention_layers)
        ])
        
        # Output layer
        self.output_layer = nn.Linear(hidden_dim, output_dim)
        
        # Initialize weights
        self._initialize_weights()
    
    def _initialize_weights(self):
        """Initialize network weights."""
        nn.init.xavier_normal_(self.input_projection.weight)
        nn.init.zeros_(self.input_projection.bias)
        
        nn.init.xavier_normal_(self.output_layer.weight, gain=0.1)
        nn.init.zeros_(self.output_layer.bias)
    
    def set_level_set_function(self, level_set_func):
        """
        Set the level-set function φ(x, y).
        
        Args:
            level_set_func (callable): Function that takes (x, y) and returns φ(x, y)
        """
        self.level_set_func = level_set_func
    
    def forward(self, x):
        """
        Forward pass through IA-NN.
        
        Args:
            x (torch.Tensor): Input coordinates, shape (batch_size, input_dim)
            
        Returns:
            torch.Tensor: Network output u_IA(x), shape (batch_size, output_dim)
        """
        if self.level_set_func is None:
            raise ValueError("Level-set function not set. Use set_level_set_function().")
        
        batch_size = x.shape[0]
        
        # Compute level-set function φ(x)
        if x.requires_grad:
            phi_vals = self.level_set_func(x[:, 0], x[:, 1]).unsqueeze(1)
        else:
            with torch.no_grad():
                phi_vals = self.level_set_func(x[:, 0], x[:, 1]).unsqueeze(1)
        
        # Transmitter network: T(φ(x))
        T_phi = self.transmitter(phi_vals)  # Shape: (batch_size, hidden_dim)
        
        # Initial hidden state: H^0 = projection(x)
        H = self.input_projection(x)  # Shape: (batch_size, hidden_dim)
        
        # Apply attention modules sequentially
        for attention_module in self.attention_modules:
            H = attention_module(H, T_phi)  # H^n = attention(H^(n-1), T(φ))
        
        # Output layer: u_IA(x) = W^(M+1) H^M + b^(M+1)
        output = self.output_layer(H)
        
        return output
    
    def compute_gradients(self, x):
        """
        Compute gradients of IA-NN output for PDE residual computation.
        
        Args:
            x (torch.Tensor): Input points, shape (batch_size, input_dim)
            
        Returns:
            tuple: (u, u_x, u_y, u_xx, u_yy, u_xy)
        """
        x.requires_grad_(True)
        
        # Forward pass
        u = self.forward(x)
        
        # First derivatives
        u_x = torch.autograd.grad(
            u, x, grad_outputs=torch.ones_like(u),
            create_graph=True, retain_graph=True
        )[0]
        
        u_x_val = u_x[:, 0:1]  # ∂u/∂x
        u_y_val = u_x[:, 1:2]  # ∂u/∂y
        
        # Second derivatives
        u_xx = torch.autograd.grad(
            u_x_val, x, grad_outputs=torch.ones_like(u_x_val),
            create_graph=True, retain_graph=True
        )[0][:, 0:1]  # ∂²u/∂x²
        
        u_yy = torch.autograd.grad(
            u_y_val, x, grad_outputs=torch.ones_like(u_y_val),
            create_graph=True, retain_graph=True
        )[0][:, 1:2]  # ∂²u/∂y²
        
        u_xy = torch.autograd.grad(
            u_x_val, x, grad_outputs=torch.ones_like(u_x_val),
            create_graph=True, retain_graph=True
        )[0][:, 1:2]  # ∂²u/∂x∂y
        
        return u, u_x_val, u_y_val, u_xx, u_yy, u_xy


class CompositeAEPINN(nn.Module):
    """
    Composite AE-PINN model: u_nn = μ_nn + u_IA
    
    This combines the continuous FCNN and discontinuous IA-NN components
    to form the complete AE-PINN solution.
    """
    
    def __init__(self, fcnn, ia_nn):
        """
        Initialize composite model.
        
        Args:
            fcnn (FCNN): Fully connected network for continuous component
            ia_nn (IANN): Interface-attention network for discontinuous component
        """
        super(CompositeAEPINN, self).__init__()
        
        self.fcnn = fcnn      # μ_nn: continuous component
        self.ia_nn = ia_nn    # u_IA: discontinuous component
    
    def forward(self, x):
        """
        Forward pass: u_nn = μ_nn + u_IA
        
        Args:
            x (torch.Tensor): Input coordinates
            
        Returns:
            torch.Tensor: Combined network output
        """
        mu = self.fcnn(x)      # Continuous component
        u_ia = self.ia_nn(x)   # Discontinuous component
        
        return mu + u_ia       # u_nn = μ_nn + u_IA
    
    def compute_gradients(self, x):
        """
        Compute gradients of the composite model.
        
        Args:
            x (torch.Tensor): Input points
            
        Returns:
            tuple: Combined gradients (u, u_x, u_y, u_xx, u_yy, u_xy)
        """
        # Get gradients from both components
        mu, mu_x, mu_y, mu_xx, mu_yy, mu_xy = self.fcnn.compute_gradients(x)
        u_ia, u_ia_x, u_ia_y, u_ia_xx, u_ia_yy, u_ia_xy = self.ia_nn.compute_gradients(x)
        
        # Sum the components: u = μ + u_IA
        u = mu + u_ia
        u_x = mu_x + u_ia_x
        u_y = mu_y + u_ia_y
        u_xx = mu_xx + u_ia_xx
        u_yy = mu_yy + u_ia_yy
        u_xy = mu_xy + u_ia_xy
        
        return u, u_x, u_y, u_xx, u_yy, u_xy


def create_star_shaped_level_set(r0=0.5, betas=None, etas=None, thetas=None):
    """
    Create level-set function for star-shaped interface from Equation 3.17.
    
    φ(x,y) = √(x² + y²) - r₀(1 + Σ βₖ cos(ηₖ(arctan(y/x) - θₖ)))
    
    Args:
        r0 (float): Base radius
        betas (list): Amplitude coefficients [β₁, β₂, β₃]
        etas (list): Frequency coefficients [η₁, η₂, η₃]  
        thetas (list): Phase coefficients [θ₁, θ₂, θ₃]
        
    Returns:
        callable: Level-set function φ(x, y)
    """
    if betas is None:
        betas = [0.3, 0.2, 0.1]
    if etas is None:
        etas = [3, 4, 5]
    if thetas is None:
        thetas = [0, np.pi/4, np.pi/2]
    
    def level_set_func(x, y):
        """Star-shaped level-set function."""
        # Handle the case where x = 0 to avoid division by zero
        angle = torch.atan2(y, x + 1e-10)  # Add small epsilon for numerical stability
        
        # Compute radius modulation
        radius_mod = r0 * torch.ones_like(x)
        for beta, eta, theta in zip(betas, etas, thetas):
            radius_mod += r0 * beta * torch.cos(eta * (angle - theta))
        
        # Distance from origin
        r = torch.sqrt(x**2 + y**2)
        
        # Level-set function
        phi = r - radius_mod
        
        return phi
    
    return level_set_func


if __name__ == "__main__":
    # Test the IA-NN implementation
    print("Testing IA-NN implementation...")
    
    # Create test data
    batch_size = 50
    x = torch.randn(batch_size, 2, requires_grad=True)
    
    # Create star-shaped level-set function
    level_set_func = create_star_shaped_level_set()
    
    # Test IA-NN
    ia_nn = IANN(
        input_dim=2, 
        hidden_dim=30, 
        num_attention_layers=3,
        level_set_func=level_set_func
    )
    print(f"IA-NN created with {sum(p.numel() for p in ia_nn.parameters())} parameters")
    
    # Test forward pass
    u_ia = ia_nn(x)
    print(f"IA-NN output shape: {u_ia.shape}")
    
    # Test gradient computation
    u, u_x, u_y, u_xx, u_yy, u_xy = ia_nn.compute_gradients(x)
    print(f"Gradient computation successful")
    
    # Test composite model
    from fcnn import FCNN
    fcnn = FCNN(input_dim=2, output_dim=1, hidden_layers=[20, 20])
    composite = CompositeAEPINN(fcnn, ia_nn)
    
    u_total = composite(x)
    print(f"Composite model output shape: {u_total.shape}")
    
    print("IA-NN tests completed successfully!")