"""
Fully Connected Neural Network (FCNN) for the continuous component μ in AE-PINN.

This module implements the standard multi-layer perceptron (MLP) used to approximate
the continuous component μ of the solution decomposition u = μ + ω.

Based on Equation 2.7 from paper 2506.18332:
Φ^0(x) = x
Φ^n(x) = σ(W^n Φ^(n-1)(x) + b^n), 1 ≤ n ≤ N
μ_nn(x, W, b) = W^(N+1) Φ^N(x) + b^(N+1)
"""

import torch
import torch.nn as nn
import numpy as np


class FCNN(nn.Module):
    """
    Fully Connected Neural Network for continuous component approximation.
    
    This network approximates the continuous part μ of the solution u = μ + ω
    in the AE-PINN decomposition for elliptic interface problems.
    """
    
    def __init__(self, input_dim=2, output_dim=1, hidden_layers=None, activation='tanh'):
        """
        Initialize the FCNN.
        
        Args:
            input_dim (int): Dimension of input (typically 2 for 2D problems)
            output_dim (int): Dimension of output (typically 1 for scalar problems)
            hidden_layers (list): List of hidden layer sizes [n1, n2, ..., nN]
            activation (str): Activation function ('tanh', 'relu', 'sigmoid', 'sin')
        """
        super(FCNN, self).__init__()
        
        if hidden_layers is None:
            hidden_layers = [50, 50, 50]  # Default architecture
            
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.hidden_layers = hidden_layers
        self.activation_name = activation
        
        # Build the network layers
        layers = []
        layer_sizes = [input_dim] + hidden_layers + [output_dim]
        
        for i in range(len(layer_sizes) - 1):
            layers.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
            
        self.layers = nn.ModuleList(layers)
        
        # Set activation function
        self.activation = self._get_activation(activation)
        
        # Initialize weights
        self._initialize_weights()
    
    def _get_activation(self, activation):
        """Get activation function."""
        if activation == 'tanh':
            return torch.tanh
        elif activation == 'relu':
            return torch.relu
        elif activation == 'sigmoid':
            return torch.sigmoid
        elif activation == 'sin':
            return torch.sin
        else:
            raise ValueError(f"Unsupported activation function: {activation}")
    
    def _initialize_weights(self):
        """Initialize network weights using Xavier initialization."""
        for layer in self.layers[:-1]:  # Hidden layers
            nn.init.xavier_normal_(layer.weight)
            nn.init.zeros_(layer.bias)
        
        # Output layer - smaller initialization
        nn.init.xavier_normal_(self.layers[-1].weight, gain=0.1)
        nn.init.zeros_(self.layers[-1].bias)
    
    def forward(self, x):
        """
        Forward pass through the network.
        
        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, input_dim)
            
        Returns:
            torch.Tensor: Output tensor of shape (batch_size, output_dim)
        """
        # Φ^0(x) = x
        phi = x
        
        # Hidden layers: Φ^n(x) = σ(W^n Φ^(n-1)(x) + b^n)
        for i in range(len(self.layers) - 1):
            phi = self.layers[i](phi)
            phi = self.activation(phi)
        
        # Output layer: μ_nn(x) = W^(N+1) Φ^N(x) + b^(N+1)
        output = self.layers[-1](phi)
        
        return output
    
    def compute_gradients(self, x):
        """
        Compute first and second derivatives of the network output.
        
        This is essential for computing PDE residuals in PINN.
        
        Args:
            x (torch.Tensor): Input points, shape (batch_size, input_dim)
            
        Returns:
            tuple: (u, u_x, u_y, u_xx, u_yy, u_xy) where:
                - u: network output
                - u_x, u_y: first derivatives
                - u_xx, u_yy, u_xy: second derivatives
        """
        x.requires_grad_(True)
        
        # Forward pass
        u = self.forward(x)
        
        # First derivatives
        u_x = torch.autograd.grad(
            u, x, grad_outputs=torch.ones_like(u), 
            create_graph=True, retain_graph=True
        )[0]
        
        u_x_val = u_x[:, 0:1]  # ∂u/∂x
        u_y_val = u_x[:, 1:2]  # ∂u/∂y
        
        # Second derivatives
        u_xx = torch.autograd.grad(
            u_x_val, x, grad_outputs=torch.ones_like(u_x_val),
            create_graph=True, retain_graph=True
        )[0][:, 0:1]  # ∂²u/∂x²
        
        u_yy = torch.autograd.grad(
            u_y_val, x, grad_outputs=torch.ones_like(u_y_val),
            create_graph=True, retain_graph=True
        )[0][:, 1:2]  # ∂²u/∂y²
        
        u_xy = torch.autograd.grad(
            u_x_val, x, grad_outputs=torch.ones_like(u_x_val),
            create_graph=True, retain_graph=True
        )[0][:, 1:2]  # ∂²u/∂x∂y
        
        return u, u_x_val, u_y_val, u_xx, u_yy, u_xy


class AdaptiveFCNN(FCNN):
    """
    Adaptive FCNN with automatic architecture scaling.
    
    This variant can automatically adjust its architecture based on
    problem complexity or training performance.
    """
    
    def __init__(self, input_dim=2, output_dim=1, base_width=50, num_layers=4, 
                 activation='tanh', adaptive_scaling=True):
        """
        Initialize adaptive FCNN.
        
        Args:
            base_width (int): Base width for hidden layers
            num_layers (int): Number of hidden layers
            adaptive_scaling (bool): Whether to use adaptive layer scaling
        """
        if adaptive_scaling:
            # Scale layer widths: wider in middle, narrower towards output
            hidden_layers = []
            for i in range(num_layers):
                # Parabolic scaling: wider in the middle
                scale = 1.0 + 0.5 * (1.0 - 4 * (i / (num_layers - 1) - 0.5)**2)
                width = int(base_width * scale)
                hidden_layers.append(width)
        else:
            hidden_layers = [base_width] * num_layers
            
        super().__init__(input_dim, output_dim, hidden_layers, activation)
        
        self.base_width = base_width
        self.num_layers = num_layers
        self.adaptive_scaling = adaptive_scaling


def create_fcnn(config):
    """
    Factory function to create FCNN with configuration dictionary.
    
    Args:
        config (dict): Configuration dictionary with keys:
            - input_dim: input dimension
            - output_dim: output dimension  
            - hidden_layers: list of layer sizes or None for default
            - activation: activation function name
            - adaptive: whether to use adaptive architecture
            
    Returns:
        FCNN: Configured FCNN instance
    """
    if config.get('adaptive', False):
        return AdaptiveFCNN(
            input_dim=config.get('input_dim', 2),
            output_dim=config.get('output_dim', 1),
            base_width=config.get('base_width', 50),
            num_layers=config.get('num_layers', 4),
            activation=config.get('activation', 'tanh'),
            adaptive_scaling=config.get('adaptive_scaling', True)
        )
    else:
        return FCNN(
            input_dim=config.get('input_dim', 2),
            output_dim=config.get('output_dim', 1),
            hidden_layers=config.get('hidden_layers', None),
            activation=config.get('activation', 'tanh')
        )


if __name__ == "__main__":
    # Test the FCNN implementation
    print("Testing FCNN implementation...")
    
    # Create test data
    batch_size = 100
    x = torch.randn(batch_size, 2, requires_grad=True)
    
    # Test basic FCNN
    net = FCNN(input_dim=2, output_dim=1, hidden_layers=[20, 30, 20])
    print(f"Network architecture: {net}")
    
    # Test forward pass
    u = net(x)
    print(f"Output shape: {u.shape}")
    
    # Test gradient computation
    u, u_x, u_y, u_xx, u_yy, u_xy = net.compute_gradients(x)
    print(f"Gradient shapes: u_x={u_x.shape}, u_y={u_y.shape}")
    print(f"Second derivative shapes: u_xx={u_xx.shape}, u_yy={u_yy.shape}, u_xy={u_xy.shape}")
    
    # Test adaptive FCNN
    adaptive_net = AdaptiveFCNN(base_width=30, num_layers=5)
    print(f"Adaptive network layers: {[layer.weight.shape for layer in adaptive_net.layers]}")
    
    print("FCNN tests completed successfully!")