"""
BUG integrator module for DLR-FEM implementation.

This module implements the augmented BUG (Basis Update & Galerkin) integrator
for the nonlinear part of the Allen-Cahn equation, as described in the
DLR-FEM paper. This is the core of Algorithm 3.2.
"""

import numpy as np
from scipy.linalg import svd
from typing import Tuple, Optional, Callable
import warnings


class BUGIntegrator:
    """
    Augmented BUG integrator for the nonlinear part of Allen-Cahn equation.
    
    Implements the nonlinear step from Algorithm 3.2, which includes:
    1. Trapezoidal approximation 
    2. Galerkin step with augmented bases
    3. Truncation step via SVD
    """
    
    def __init__(self, M_x: np.ndarray, M_y: np.ndarray,
                 nonlinear_op: Optional[Callable] = None,
                 max_rank: int = 50,
                 truncation_tol: float = 1e-12):
        """
        Initialize BUG integrator.
        
        Args:
            M_x: Mass matrix in x-direction
            M_y: Mass matrix in y-direction
            nonlinear_op: Nonlinear operator function N(W)
            max_rank: Maximum rank for truncation
            truncation_tol: SVD truncation tolerance
        """
        self.M_x = M_x
        self.M_y = M_y
        self.max_rank = max_rank
        self.truncation_tol = truncation_tol
        
        # Default nonlinear operator for Allen-Cahn: N(W) = W - W^3
        if nonlinear_op is None:
            self.nonlinear_op = lambda W: W - np.power(W, 3)
        else:
            self.nonlinear_op = nonlinear_op
        
        # Precompute orthogonalization matrices
        self._precompute_qr_factors()
    
    def _precompute_qr_factors(self):
        """Precompute QR factors for efficient orthogonalization."""
        # For mass-lumped methods, we can use the square root of diagonal masses
        self.sqrt_M_x = np.sqrt(np.diag(self.M_x))
        self.sqrt_M_y = np.sqrt(np.diag(self.M_y))
        self.inv_sqrt_M_x = 1.0 / self.sqrt_M_x
        self.inv_sqrt_M_y = 1.0 / self.sqrt_M_y
    
    def step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray, 
             dt: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Perform one step of the augmented BUG integrator.
        
        This implements the nonlinear step from Algorithm 3.2.
        
        Args:
            U: Left basis matrix (m × r)
            S: Core matrix (r × r)
            V: Right basis matrix (n × r)
            dt: Time step τ
            
        Returns:
            (U_new, S_new, V_new): Updated low-rank factors
        """
        # Step 2a: Trapezoidal approximation
        U_2, S_2, V_2 = self._trapezoidal_step(U, S, V, dt)
        
        # Step 2b: Galerkin step with augmented bases
        U_3, S_3, V_3 = self._galerkin_step(U, S, V, U_2, S_2, V_2, dt)
        
        # Step 2c: Truncation step  
        U_final, S_final, V_final = self._truncation_step(U_3, S_3, V_3)
        
        return U_final, S_final, V_final
    
    def _trapezoidal_step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray,
                         dt: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Perform trapezoidal approximation (first-order augmented BUG step).
        
        This corresponds to one Euler step for the nonlinear problem.
        """
        # Reconstruct solution: W = U S V^T
        W_current = U @ S @ V.T
        
        # Apply nonlinear operator
        N_W = self.nonlinear_op(W_current)
        
        # Euler step: W_new = W + dt * N(W)
        W_new = W_current + dt * N_W
        
        # Decompose updated solution via SVD
        U_svd, s_svd, Vt_svd = svd(W_new, full_matrices=False)
        
        # Construct low-rank representation
        r = min(len(s_svd), self.max_rank)
        S_new = np.diag(s_svd[:r])
        U_new = U_svd[:, :r]
        V_new = Vt_svd[:r, :].T
        
        return U_new, S_new, V_new
    
    def _galerkin_step(self, U_1: np.ndarray, S_1: np.ndarray, V_1: np.ndarray,
                      U_2: np.ndarray, S_2: np.ndarray, V_2: np.ndarray,
                      dt: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Perform Galerkin step with augmented bases using SSP-RK2 scheme.
        
        This implements equations (3.22a) and (3.22b) from the paper.
        
        Args:
            U_1, S_1, V_1: Initial factors (from Step 1)
            U_2, S_2, V_2: Factors from trapezoidal step
            dt: Time step
            
        Returns:
            Updated low-rank factors
        """
        # Reconstruct solutions
        W_1 = U_1 @ S_1 @ V_1.T
        W_2 = U_2 @ S_2 @ V_2.T
        
        # Compute nonlinear operators
        N_W_1 = self.nonlinear_op(W_1)
        N_W_2 = self.nonlinear_op(W_2)
        
        # Assemble augmented bases following Algorithm 3.2
        # Note: q_m and q_n are canonical basis vectors (would be identity matrices)
        m, n = W_1.shape
        
        # Create augmented basis for U direction
        # U_aug = [U_1, dt * N(W_1) @ V_1, dt * N(W_2) @ V_2]
        term1_U = dt * N_W_1 @ V_1  # dt * N(W_1) * V_1
        term2_U = dt * N_W_2 @ V_2  # dt * N(W_2) * V_2
        U_aug = np.hstack([U_1, term1_U, term2_U])
        
        # Create augmented basis for V direction  
        # V_aug = [V_1, dt * N(W_1)^T @ U_1, dt * N(W_2)^T @ U_2]
        term1_V = dt * N_W_1.T @ U_1  # dt * N(W_1)^T * U_1
        term2_V = dt * N_W_2.T @ U_2  # dt * N(W_2)^T * U_2
        V_aug = np.hstack([V_1, term1_V, term2_V])
        
        # Orthonormalize augmented bases using generalized QR
        U_bar, _ = self._generalized_qr(U_aug, self.M_x)
        V_bar, _ = self._generalized_qr(V_aug, self.M_y)
        
        # Project initial core tensor
        S_bar_1 = (U_bar.T @ self.M_x @ U_1) @ S_1 @ (V_bar.T @ self.M_y @ V_1).T
        
        # SSP-RK2 scheme for the S-step (Equations 3.22a and 3.22b)
        
        # First RK stage: S_bar_2 = S_bar_1 + dt * projected_nonlinear_term
        W_bar_1 = U_bar @ S_bar_1 @ V_bar.T
        N_W_bar_1 = self.nonlinear_op(W_bar_1)
        projected_N_1 = U_bar.T @ self.M_x @ N_W_bar_1 @ self.M_y @ V_bar
        S_bar_2 = S_bar_1 + dt * projected_N_1
        
        # Second RK stage: S_bar_3 = 1/2 * S_bar_1 + 1/2 * S_bar_2 + dt/2 * projected_nonlinear_term_2
        W_bar_2 = U_bar @ S_bar_2 @ V_bar.T
        N_W_bar_2 = self.nonlinear_op(W_bar_2)
        projected_N_2 = U_bar.T @ self.M_x @ N_W_bar_2 @ self.M_y @ V_bar
        S_bar_3 = 0.5 * S_bar_1 + 0.5 * S_bar_2 + 0.5 * dt * projected_N_2
        
        return U_bar, S_bar_3, V_bar
    
    def _truncation_step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Perform truncation step via SVD to control rank growth.
        
        Args:
            U, S, V: Input low-rank factors
            
        Returns:
            Truncated low-rank factors
        """
        # Compute SVD of core matrix: S = R Σ P^T
        R, sigma, Pt = svd(S, full_matrices=False)
        
        # Determine truncation rank based on tolerance and max rank
        if len(sigma) > self.max_rank:
            truncation_rank = self.max_rank
        else:
            # Find rank based on truncation tolerance
            sigma_normalized = sigma / sigma[0] if sigma[0] > 0 else sigma
            truncation_rank = np.sum(sigma_normalized > self.truncation_tol)
            truncation_rank = max(1, min(truncation_rank, len(sigma)))
        
        # Truncate
        sigma_trunc = sigma[:truncation_rank]
        R_trunc = R[:, :truncation_rank]
        P_trunc = Pt[:truncation_rank, :]
        
        # Update bases
        U_new = U @ R_trunc
        V_new = V @ P_trunc.T
        S_new = np.diag(sigma_trunc)
        
        return U_new, S_new, V_new
    
    def _generalized_qr(self, A: np.ndarray, M: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        Generalized QR decomposition with respect to mass matrix M.
        
        For diagonal mass matrices (mass lumping), this simplifies significantly.
        
        Args:
            A: Matrix to decompose
            M: Mass matrix (assumed diagonal)
            
        Returns:
            (Q, R): Orthogonal and upper triangular factors
        """
        # For diagonal mass matrix, use weighted QR
        if np.allclose(M - np.diag(np.diag(M)), 0):
            # M is diagonal
            sqrt_M_diag = np.sqrt(np.diag(M))
            
            # Weight the matrix
            A_weighted = np.diag(sqrt_M_diag) @ A
            
            # Standard QR
            Q_weighted, R = np.linalg.qr(A_weighted)
            
            # Unweight  
            Q = np.diag(1.0 / sqrt_M_diag) @ Q_weighted
            
            return Q, R
        else:
            # General case: use Cholesky decomposition
            L = np.linalg.cholesky(M)
            L_inv = np.linalg.inv(L)
            
            # Transform to standard inner product
            A_tilde = L @ A
            
            # Standard QR
            Q_tilde, R = np.linalg.qr(A_tilde)
            
            # Transform back
            Q = L_inv @ Q_tilde
            
            return Q, R
    
    def compute_residual(self, U: np.ndarray, S: np.ndarray, V: np.ndarray,
                        dt: float, U_old: np.ndarray, S_old: np.ndarray, V_old: np.ndarray) -> float:
        """
        Compute residual for the nonlinear step.
        
        This can be used for adaptive time stepping or convergence monitoring.
        
        Args:
            U, S, V: Current solution
            dt: Time step
            U_old, S_old, V_old: Previous solution
            
        Returns:
            Residual norm
        """
        # Reconstruct matrices
        W = U @ S @ V.T
        W_old = U_old @ S_old @ V_old.T
        
        # Compute time derivative approximation
        dW_dt = (W - W_old) / dt
        
        # Compute nonlinear operator
        N_W = self.nonlinear_op(W)
        
        # Residual: dW/dt - N(W)
        residual = dW_dt - N_W
        
        return np.linalg.norm(residual, 'fro')
    
    def adaptive_step(self, U: np.ndarray, S: np.ndarray, V: np.ndarray,
                     dt: float, tol: float = 1e-6, 
                     min_dt: float = 1e-8) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
        """
        Adaptive step with time step control based on residual.
        
        Args:
            U, S, V: Current low-rank factors
            dt: Initial time step
            tol: Tolerance for adaptive control
            min_dt: Minimum allowed time step
            
        Returns:
            (U_new, S_new, V_new, dt_used): Updated factors and actual time step
        """
        dt_current = dt
        
        while dt_current >= min_dt:
            # Try step
            U_new, S_new, V_new = self.step(U, S, V, dt_current)
            
            # Compute residual
            residual = self.compute_residual(U_new, S_new, V_new, dt_current, U, S, V)
            
            if residual < tol:
                return U_new, S_new, V_new, dt_current
            else:
                dt_current *= 0.5
                print(f"Reducing nonlinear time step due to residual: {residual:.2e}")
        
        # Use minimum time step
        U_new, S_new, V_new = self.step(U, S, V, min_dt)
        print(f"Warning: Using minimum nonlinear time step {min_dt}")
        return U_new, S_new, V_new, min_dt


def create_bug_integrator(M_x: np.ndarray, M_y: np.ndarray,
                         nonlinear_op: Optional[Callable] = None,
                         max_rank: int = 50) -> BUGIntegrator:
    """
    Factory function to create BUG integrator.
    
    Args:
        M_x, M_y: Mass matrices
        nonlinear_op: Nonlinear operator (default: Allen-Cahn)
        max_rank: Maximum rank for truncation
        
    Returns:
        BUGIntegrator instance
    """
    return BUGIntegrator(M_x, M_y, nonlinear_op, max_rank)


if __name__ == "__main__":
    # Test BUG integrator
    print("Testing BUG integrator...")
    
    # Create test matrices
    m, n = 8, 8
    M_x = np.eye(m)
    M_y = np.eye(n)
    
    # Create integrator
    integrator = create_bug_integrator(M_x, M_y, max_rank=10)
    
    # Test with random initial condition
    rank = 3
    U = np.random.randn(m, rank)
    S = np.random.randn(rank, rank)
    V = np.random.randn(n, rank)
    
    print(f"Initial rank: {rank}")
    print(f"Initial shapes: U={U.shape}, S={S.shape}, V={V.shape}")
    
    # Test step
    dt = 0.01
    U_new, S_new, V_new = integrator.step(U, S, V, dt)
    
    print(f"After step shapes: U={U_new.shape}, S={S_new.shape}, V={V_new.shape}")
    print(f"New rank: {S_new.shape[0]}")
    
    # Test residual computation
    residual = integrator.compute_residual(U_new, S_new, V_new, dt, U, S, V)
    print(f"Residual: {residual:.2e}")
    
    # Test adaptive step
    U_adapt, S_adapt, V_adapt, dt_used = integrator.adaptive_step(U, S, V, dt)
    print(f"Adaptive step used dt = {dt_used:.2e}")
    print(f"Adaptive shapes: U={U_adapt.shape}, S={S_adapt.shape}, V={V_adapt.shape}")
    
    print("BUG integrator test completed.")