"""
Linear solver module for DLR-FEM implementation.

This module implements Algorithm 3.1 (Exact Low-Rank Integrator for the Linear Problem)
from the DLR-FEM paper, which handles the linear part of the Allen-Cahn equation
using exact matrix exponentials and generalized QR decomposition.
"""

import numpy as np
from scipy.linalg import expm
from typing import Tuple, Optional
import warnings


class LinearSolver:
    """
    Exact low-rank integrator for the linear part of Allen-Cahn equation.
    
    Implements Algorithm 3.1 from the paper, which integrates:
    dW/dt = ε²(L_x W + W L_y^T)
    
    The solution is represented in low-rank form W = U S V^T.
    """
    
    def __init__(self, M_x: np.ndarray, M_y: np.ndarray, 
                 L_x: np.ndarray, L_y: np.ndarray,
                 epsilon: float = 0.02):
        """
        Initialize linear solver.
        
        Args:
            M_x: Mass matrix in x-direction
            M_y: Mass matrix in y-direction  
            L_x: Linear operator in x-direction (-M_x^{-1} A_x)
            L_y: Linear operator in y-direction (-M_y^{-1} A_y)
            epsilon: Interface parameter
        """
        self.M_x = M_x
        self.M_y = M_y
        self.L_x = L_x
        self.L_y = L_y
        self.epsilon = epsilon
        
        # Precompute Cholesky factors for efficient GQR
        self._precompute_cholesky_factors()
    
    def _precompute_cholesky_factors(self):
        """Precompute Cholesky factors of mass matrices for efficiency."""
        try:
            self.chol_M_x = np.linalg.cholesky(self.M_x)
            self.chol_M_y = np.linalg.cholesky(self.M_y)
        except np.linalg.LinAlgError:
            # Add small regularization if matrices are not positive definite
            reg = 1e-12
            self.chol_M_x = np.linalg.cholesky(self.M_x + reg * np.eye(self.M_x.shape[0]))
            self.chol_M_y = np.linalg.cholesky(self.M_y + reg * np.eye(self.M_y.shape[0]))
        
        # Precompute inverse factors
        self.chol_M_x_inv = np.linalg.inv(self.chol_M_x)
        self.chol_M_y_inv = np.linalg.inv(self.chol_M_y)
    
    def step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray, 
             dt: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Perform one step of the exact low-rank integrator (Algorithm 3.1).
        
        Args:
            U: Left basis matrix (m × r)
            S: Core matrix (r × r) 
            V: Right basis matrix (n × r)
            dt: Time step
            
        Returns:
            (U_new, S_new, V_new): Updated low-rank factors
        """
        # Step 1: Update bases in parallel via Generalized QR (GQR) decomposition
        
        # Compute exp(τ ε² L_x) U_n
        exp_L_x_U = self._matrix_exponential_action(self.L_x, U, dt * self.epsilon**2)
        
        # Compute exp(τ ε² L_y) V_n  
        exp_L_y_V = self._matrix_exponential_action(self.L_y, V, dt * self.epsilon**2)
        
        # Generalized QR decomposition with mass matrix weighting
        U_new, R = self._generalized_qr(exp_L_x_U, self.M_x)
        V_new, P = self._generalized_qr(exp_L_y_V, self.M_y)
        
        # Step 2: Update the core tensor
        S_new = R @ S @ P.T
        
        return U_new, S_new, V_new
    
    def _matrix_exponential_action(self, A: np.ndarray, U: np.ndarray, 
                                  dt: float) -> np.ndarray:
        """
        Compute exp(dt * A) * U efficiently.
        
        Args:
            A: Matrix in the exponential
            U: Matrix to act on
            dt: Time step
            
        Returns:
            exp(dt * A) * U
        """
        # For moderate-sized matrices, use direct matrix exponential
        if A.shape[0] <= 200:
            exp_A = expm(dt * A)
            return exp_A @ U
        else:
            # For large matrices, could implement Krylov subspace methods
            # For now, use direct computation with warning
            warnings.warn(f"Computing matrix exponential for large matrix ({A.shape[0]}×{A.shape[0]}). "
                         "Consider implementing Krylov methods for better efficiency.")
            exp_A = expm(dt * A)
            return exp_A @ U
    
    def _generalized_qr(self, U: np.ndarray, M: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        Generalized QR decomposition with respect to mass matrix M.
        
        Computes U = Q R where Q^T M Q = I (orthogonal w.r.t. M-inner product).
        
        Args:
            U: Input matrix to decompose (m × r)
            M: Mass matrix (m × m, positive definite)
            
        Returns:
            (Q, R): Orthogonal factor Q and upper triangular factor R
        """
        # Use precomputed Cholesky factor
        if M is self.M_x:
            L = self.chol_M_x
            L_inv = self.chol_M_x_inv
        elif M is self.M_y:
            L = self.chol_M_y  
            L_inv = self.chol_M_y_inv
        else:
            # Fallback for other mass matrices
            L = np.linalg.cholesky(M)
            L_inv = np.linalg.inv(L)
        
        # Transform to standard inner product: Ũ = L * U
        U_tilde = L @ U
        
        # Standard QR decomposition
        Q_tilde, R = np.linalg.qr(U_tilde)
        
        # Transform back: Q = L^{-1} * Q̃
        Q = L_inv @ Q_tilde
        
        return Q, R
    
    def verify_orthogonality(self, Q: np.ndarray, M: np.ndarray, 
                           tol: float = 1e-12) -> bool:
        """
        Verify that Q is orthogonal with respect to M-inner product.
        
        Args:
            Q: Orthogonal matrix candidate
            M: Mass matrix
            tol: Tolerance for orthogonality check
            
        Returns:
            True if Q^T M Q ≈ I within tolerance
        """
        QtMQ = Q.T @ M @ Q
        identity = np.eye(Q.shape[1])
        error = np.linalg.norm(QtMQ - identity, 'fro')
        return error < tol
    
    def energy(self, U: np.ndarray, S: np.ndarray, V: np.ndarray) -> float:
        """
        Compute energy of the solution W = U S V^T.
        
        For Allen-Cahn equation: E = ε²/2 ∫|∇u|² + 1/4ε ∫(u²-1)²
        
        Args:
            U, S, V: Low-rank factors
            
        Returns:
            Energy value
        """
        # Reconstruct full matrix
        W = U @ S @ V.T
        
        # Gradient energy: ε²/2 ∫|∇u|²
        # Approximate using finite differences
        grad_energy = self._compute_gradient_energy(W)
        
        # Potential energy: 1/(4ε) ∫(u²-1)²  
        potential_energy = self._compute_potential_energy(W)
        
        return grad_energy + potential_energy
    
    def _compute_gradient_energy(self, W: np.ndarray) -> float:
        """Compute gradient energy contribution."""
        # Simple finite difference approximation
        grad_x = np.diff(W, axis=0)
        grad_y = np.diff(W, axis=1)
        
        # Approximate integral using trapezoidal rule
        energy_x = np.sum(grad_x**2) * self.L_x.shape[0]  # Scale by grid
        energy_y = np.sum(grad_y**2) * self.L_y.shape[0]
        
        return 0.5 * self.epsilon**2 * (energy_x + energy_y)
    
    def _compute_potential_energy(self, W: np.ndarray) -> float:
        """Compute potential energy contribution."""
        potential = (W**2 - 1)**2
        # Approximate integral
        energy = np.sum(potential) / W.size
        return energy / (4 * self.epsilon)


class AdaptiveLinearSolver(LinearSolver):
    """
    Adaptive version of linear solver with time step control.
    
    Extends the basic linear solver with adaptive time stepping based on
    energy conservation and rank growth monitoring.
    """
    
    def __init__(self, M_x: np.ndarray, M_y: np.ndarray,
                 L_x: np.ndarray, L_y: np.ndarray,
                 epsilon: float = 0.02,
                 max_rank: int = 50,
                 energy_tol: float = 1e-6):
        """
        Initialize adaptive linear solver.
        
        Args:
            M_x, M_y: Mass matrices
            L_x, L_y: Linear operators
            epsilon: Interface parameter
            max_rank: Maximum allowed rank
            energy_tol: Energy conservation tolerance
        """
        super().__init__(M_x, M_y, L_x, L_y, epsilon)
        self.max_rank = max_rank
        self.energy_tol = energy_tol
    
    def adaptive_step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray,
                     dt: float, min_dt: float = 1e-6) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
        """
        Perform adaptive time step with error control.
        
        Args:
            U, S, V: Current low-rank factors
            dt: Proposed time step
            min_dt: Minimum allowed time step
            
        Returns:
            (U_new, S_new, V_new, dt_used): Updated factors and actual time step used
        """
        # Compute initial energy
        energy_initial = self.energy(U, S, V)
        
        dt_current = dt
        
        while dt_current >= min_dt:
            # Try step with current time step
            U_new, S_new, V_new = self.step(U, S, V, dt_current)
            
            # Check energy conservation
            energy_final = self.energy(U_new, S_new, V_new)
            energy_error = abs(energy_final - energy_initial) / abs(energy_initial)
            
            # Check rank growth
            current_rank = S_new.shape[0]
            
            if energy_error < self.energy_tol and current_rank <= self.max_rank:
                # Accept step
                return U_new, S_new, V_new, dt_current
            else:
                # Reduce time step
                dt_current *= 0.5
                if energy_error >= self.energy_tol:
                    print(f"Reducing time step due to energy error: {energy_error:.2e}")
                if current_rank > self.max_rank:
                    print(f"Reducing time step due to rank growth: {current_rank}")
        
        # If we reach here, use minimum time step
        U_new, S_new, V_new = self.step(U, S, V, min_dt)
        print(f"Warning: Using minimum time step {min_dt}")
        return U_new, S_new, V_new, min_dt


def create_linear_solver(M_x: np.ndarray, M_y: np.ndarray,
                        L_x: np.ndarray, L_y: np.ndarray,
                        epsilon: float = 0.02,
                        adaptive: bool = False) -> LinearSolver:
    """
    Factory function to create linear solver.
    
    Args:
        M_x, M_y: Mass matrices
        L_x, L_y: Linear operators
        epsilon: Interface parameter
        adaptive: Whether to use adaptive time stepping
        
    Returns:
        LinearSolver instance
    """
    if adaptive:
        return AdaptiveLinearSolver(M_x, M_y, L_x, L_y, epsilon)
    else:
        return LinearSolver(M_x, M_y, L_x, L_y, epsilon)


if __name__ == "__main__":
    # Test linear solver
    print("Testing linear solver...")
    
    # Create simple test matrices
    n = 10
    M_x = np.eye(n)  # Identity mass matrix
    M_y = np.eye(n)
    
    # Simple diffusion operators
    L_x = -np.diag(np.ones(n-1), 1) + 2*np.eye(n) - np.diag(np.ones(n-1), -1)
    L_y = L_x.copy()
    
    # Create solver
    solver = create_linear_solver(M_x, M_y, L_x, L_y, epsilon=0.1)
    
    # Test with random low-rank initial condition
    rank = 3
    U = np.random.randn(n, rank)
    S = np.random.randn(rank, rank)
    V = np.random.randn(n, rank)
    
    print(f"Initial rank: {rank}")
    print(f"Initial shapes: U={U.shape}, S={S.shape}, V={V.shape}")
    
    # Perform one step
    dt = 0.01
    U_new, S_new, V_new = solver.step(U, S, V, dt)
    
    print(f"After step shapes: U={U_new.shape}, S={S_new.shape}, V={V_new.shape}")
    
    # Test orthogonality
    is_ortho_x = solver.verify_orthogonality(U_new, M_x)
    is_ortho_y = solver.verify_orthogonality(V_new, M_y)
    
    print(f"U orthogonal w.r.t. M_x: {is_ortho_x}")
    print(f"V orthogonal w.r.t. M_y: {is_ortho_y}")
    
    # Test energy computation
    energy = solver.energy(U, S, V)
    print(f"Energy: {energy:.6f}")
    
    print("Linear solver test completed.")