#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Physics Informed Extreme Learning Machine (PIELM) solver implementation.
Based on Paper 2506.17654 - RINN for solving linear PDEs.
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Callable, Tuple, Optional


class PIELMNetwork(nn.Module):
    """
    Physics Informed Extreme Learning Machine network.
    Hidden layer parameters are frozen after initialization.
    Only output weights are learned through least-squares.
    """
    
    def __init__(self, input_dim: int, hidden_dim: int, output_dim: int = 1, 
                 activation: str = 'tanh', bias: bool = True):
        """
        Initialize PIELM network.
        
        Args:
            input_dim: Input dimension (typically 2 for 2D problems)
            hidden_dim: Number of hidden neurons
            output_dim: Output dimension (typically 1 for scalar PDEs)
            activation: Activation function ('tanh', 'sigmoid', 'relu')
            bias: Whether to include bias terms
        """
        super(PIELMNetwork, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        
        # Hidden layer (frozen after initialization)
        self.hidden_layer = nn.Linear(input_dim, hidden_dim, bias=bias)
        
        # Output layer (learned via least-squares)
        self.output_layer = nn.Linear(hidden_dim, output_dim, bias=False)
        
        # Activation function
        if activation == 'tanh':
            self.activation = torch.tanh
        elif activation == 'sigmoid':
            self.activation = torch.sigmoid
        elif activation == 'relu':
            self.activation = torch.relu
        else:
            raise ValueError(f"Unsupported activation: {activation}")
        
        # Initialize hidden layer parameters
        self._initialize_hidden_layer()
        
    def _initialize_hidden_layer(self):
        """Initialize hidden layer parameters randomly."""
        nn.init.uniform_(self.hidden_layer.weight, -1, 1)
        if self.hidden_layer.bias is not None:
            nn.init.uniform_(self.hidden_layer.bias, -1, 1)
    
    def get_basis_functions(self, x: torch.Tensor) -> torch.Tensor:
        """
        Compute basis functions Phi(theta, x).
        
        Args:
            x: Input points (batch_size, input_dim)
            
        Returns:
            torch.Tensor: Basis function values (batch_size, hidden_dim)
        """
        return self.activation(self.hidden_layer(x))
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass through the network.
        
        Args:
            x: Input points (batch_size, input_dim)
            
        Returns:
            torch.Tensor: Network output (batch_size, output_dim)
        """
        phi = self.get_basis_functions(x)
        return self.output_layer(phi)
    
    def freeze_hidden_layer(self):
        """Freeze hidden layer parameters."""
        for param in self.hidden_layer.parameters():
            param.requires_grad = False
    
    def unfreeze_hidden_layer(self):
        """Unfreeze hidden layer parameters."""
        for param in self.hidden_layer.parameters():
            param.requires_grad = True


class PIELMSolver:
    """
    PIELM solver for linear PDEs using least-squares approach.
    """
    
    def __init__(self, network: PIELMNetwork):
        """
        Initialize PIELM solver.
        
        Args:
            network: PIELM network instance
        """
        self.network = network
        self.is_trained = False
        
    def assemble_system_matrix(self, 
                             collocation_points: torch.Tensor,
                             boundary_points: torch.Tensor,
                             pde_operator: Callable,
                             boundary_operator: Callable = None) -> torch.Tensor:
        """
        Assemble the system matrix H as described in Eq. 2.5.
        H contains the application of PDE operators to basis functions.
        
        Args:
            collocation_points: Interior points for PDE residual
            boundary_points: Boundary points for boundary conditions
            pde_operator: Function that applies PDE operator to network
            boundary_operator: Function for boundary operator (default: identity)
            
        Returns:
            torch.Tensor: System matrix H
        """
        if boundary_operator is None:
            boundary_operator = lambda net, pts: net(pts)
        
        # Freeze hidden layer to prevent gradient computation
        self.network.freeze_hidden_layer()
        
        n_interior = collocation_points.shape[0]
        n_boundary = boundary_points.shape[0]
        n_total = n_interior + n_boundary
        
        # Initialize system matrix
        H = torch.zeros(n_total, self.network.hidden_dim)
        
        # Interior points: Apply PDE operator to each basis function
        for i in range(self.network.hidden_dim):
            # Create temporary network with single basis function
            temp_network = self._create_single_basis_network(i)
            
            # Apply PDE operator
            collocation_points.requires_grad_(True)
            pde_residual = pde_operator(temp_network, collocation_points)
            H[:n_interior, i] = pde_residual.flatten().detach()
        
        # Boundary points: Apply boundary operator to each basis function
        for i in range(self.network.hidden_dim):
            temp_network = self._create_single_basis_network(i)
            
            boundary_points.requires_grad_(True)
            boundary_residual = boundary_operator(temp_network, boundary_points)
            H[n_interior:, i] = boundary_residual.flatten().detach()
        
        return H
    
    def _create_single_basis_network(self, basis_index: int) -> nn.Module:
        """Create network that outputs only one basis function."""
        class SingleBasisNetwork(nn.Module):
            def __init__(self, base_network, index):
                super().__init__()
                self.base_network = base_network
                self.index = index
            
            def forward(self, x):
                phi = self.base_network.get_basis_functions(x)
                return phi[:, self.index:self.index+1]
        
        return SingleBasisNetwork(self.network, basis_index)
    
    def assemble_rhs_vector(self,
                           collocation_points: torch.Tensor,
                           boundary_points: torch.Tensor,
                           pde_rhs: torch.Tensor,
                           boundary_values: torch.Tensor) -> torch.Tensor:
        """
        Assemble right-hand side vector S.
        
        Args:
            collocation_points: Interior points
            boundary_points: Boundary points  
            pde_rhs: Right-hand side of PDE at interior points
            boundary_values: Boundary condition values
            
        Returns:
            torch.Tensor: RHS vector S
        """
        n_interior = collocation_points.shape[0]
        n_boundary = boundary_points.shape[0]
        
        S = torch.zeros(n_interior + n_boundary, 1)
        S[:n_interior, 0] = pde_rhs.flatten()
        S[n_interior:, 0] = boundary_values.flatten()
        
        return S
    
    def solve_least_squares(self, H: torch.Tensor, S: torch.Tensor) -> torch.Tensor:
        """
        Solve linear system H*beta = S using least-squares.
        
        Args:
            H: System matrix
            S: Right-hand side vector
            
        Returns:
            torch.Tensor: Solution weights beta
        """
        # Use PyTorch's least squares solver
        beta = torch.linalg.lstsq(H, S).solution
        return beta
    
    def train(self,
              collocation_points: torch.Tensor,
              boundary_points: torch.Tensor,
              pde_operator: Callable,
              pde_rhs: torch.Tensor,
              boundary_values: torch.Tensor,
              boundary_operator: Callable = None) -> Dict[str, float]:
        """
        Train PIELM by solving the least-squares problem.
        
        Args:
            collocation_points: Interior collocation points
            boundary_points: Boundary points
            pde_operator: PDE operator function
            pde_rhs: Right-hand side values at collocation points
            boundary_values: Boundary condition values
            boundary_operator: Boundary operator (default: identity)
            
        Returns:
            dict: Training information (residuals, condition number, etc.)
        """
        print("Assembling PIELM system matrix...")
        
        # Assemble system matrix H
        H = self.assemble_system_matrix(collocation_points, boundary_points, 
                                      pde_operator, boundary_operator)
        
        # Assemble RHS vector S
        S = self.assemble_rhs_vector(collocation_points, boundary_points, 
                                   pde_rhs, boundary_values)
        
        print(f"System size: {H.shape[0]} x {H.shape[1]}")
        
        # Compute condition number for diagnostics
        condition_number = torch.linalg.cond(H).item()
        
        # Solve least-squares problem
        print("Solving least-squares system...")
        beta = self.solve_least_squares(H, S)
        
        # Set output layer weights
        with torch.no_grad():
            self.network.output_layer.weight.data = beta.T
        
        # Compute residual
        residual = torch.norm(H @ beta - S).item()
        relative_residual = residual / torch.norm(S).item()
        
        self.is_trained = True
        
        training_info = {
            'residual': residual,
            'relative_residual': relative_residual,
            'condition_number': condition_number,
            'system_size': H.shape,
            'beta_norm': torch.norm(beta).item()
        }
        
        print(f"Training completed:")
        print(f"  Residual: {residual:.6e}")
        print(f"  Relative residual: {relative_residual:.6e}")
        print(f"  Condition number: {condition_number:.6e}")
        
        return training_info
    
    def predict(self, points: torch.Tensor) -> torch.Tensor:
        """
        Make predictions at given points.
        
        Args:
            points: Points to evaluate
            
        Returns:
            torch.Tensor: Predicted values
        """
        if not self.is_trained:
            raise RuntimeError("Model must be trained before making predictions")
        
        with torch.no_grad():
            return self.network(points)


def create_pielm_solver(input_dim: int, 
                       hidden_dim: int, 
                       activation: str = 'tanh',
                       seed: Optional[int] = None) -> PIELMSolver:
    """
    Create and initialize a PIELM solver.
    
    Args:
        input_dim: Input dimension
        hidden_dim: Number of hidden neurons
        activation: Activation function
        seed: Random seed for initialization
        
    Returns:
        PIELMSolver: Initialized solver
    """
    if seed is not None:
        torch.manual_seed(seed)
    
    network = PIELMNetwork(input_dim, hidden_dim, activation=activation)
    solver = PIELMSolver(network)
    
    return solver


# Example PDE operators for common equations

def laplacian_operator(network: nn.Module, points: torch.Tensor) -> torch.Tensor:
    """
    Laplacian operator for Poisson equation: nabla^2 u
    
    Args:
        network: Neural network
        points: Evaluation points
        
    Returns:
        torch.Tensor: Laplacian values
    """
    points.requires_grad_(True)
    u = network(points)
    
    # First derivatives
    grad = torch.autograd.grad(
        outputs=u, inputs=points,
        grad_outputs=torch.ones_like(u),
        create_graph=True, retain_graph=True
    )[0]
    
    u_x = grad[:, 0:1]
    u_y = grad[:, 1:2]
    
    # Second derivatives
    u_xx = torch.autograd.grad(
        outputs=u_x, inputs=points,
        grad_outputs=torch.ones_like(u_x),
        create_graph=True, retain_graph=True
    )[0][:, 0:1]
    
    u_yy = torch.autograd.grad(
        outputs=u_y, inputs=points,
        grad_outputs=torch.ones_like(u_y),
        create_graph=True, retain_graph=True
    )[0][:, 1:2]
    
    return u_xx + u_yy


def helmholtz_operator(k: float):
    """
    Helmholtz operator: nabla^2 u + k^2 u
    
    Args:
        k: Wave number
        
    Returns:
        Callable: Helmholtz operator function
    """
    def operator(network: nn.Module, points: torch.Tensor) -> torch.Tensor:
        laplacian = laplacian_operator(network, points)
        u = network(points)
        return laplacian + k**2 * u
    
    return operator