"""
Linear PDE solver using locELM method.

Solves time-independent and time-dependent linear PDEs using
domain decomposition and local extreme learning machines.
"""

import numpy as np
import tensorflow as tf
from scipy.linalg import lstsq
from typing import Callable, Dict, List, Optional, Tuple
from ..core.domain import Domain, CollocationPoints
from ..core.networks import MultiSubdomainNetwork
from ..core.continuity import ContinuityConditions


class LinearPDESolver:
    """Solver for linear PDEs using locELM."""

    def __init__(self,
                 domain: Domain,
                 networks: MultiSubdomainNetwork,
                 continuity_order: List[int],
                 collocation_type: str = 'uniform',
                 n_collocation_points: Optional[List[int]] = None):
        """
        Initialize linear PDE solver.

        Parameters
        ----------
        domain : Domain
            Domain decomposition
        networks : MultiSubdomainNetwork
            Multi-subdomain neural networks
        continuity_order : List[int]
            Continuity order for each dimension
        collocation_type : str
            Type of collocation points ('uniform', 'gll', 'random')
        n_collocation_points : Optional[List[int]]
            Number of collocation points per dimension per subdomain
        """
        self.domain = domain
        self.networks = networks
        self.continuity = ContinuityConditions(domain, networks, continuity_order)
        self.collocation_type = collocation_type
        self.n_collocation_points = n_collocation_points or [10] * domain.ndim

        # Generate collocation points for all subdomains
        self.collocation_points = self._generate_collocation_points()

    def _generate_collocation_points(self) -> Dict[int, np.ndarray]:
        """Generate collocation points for all subdomains."""
        points_dict = {}

        for idx in range(self.domain.n_total):
            region = self.domain.get_subdomain_region(idx)

            if self.collocation_type == 'uniform':
                points = CollocationPoints.uniform(region, self.n_collocation_points)
            elif self.collocation_type == 'gll':
                points = CollocationPoints.gauss_lobatto_legendre(region, self.n_collocation_points)
            elif self.collocation_type == 'random':
                n_total = np.prod(self.n_collocation_points)
                points = CollocationPoints.random(region, n_total)
            else:
                raise ValueError(f"Unknown collocation type: {self.collocation_type}")

            points_dict[idx] = points

        return points_dict

    def solve_steady_state(self,
                          pde_operator: Callable,
                          source_term: Callable,
                          boundary_conditions: Dict,
                          output_dim: int = 1) -> np.ndarray:
        """
        Solve time-independent linear PDE.

        Parameters
        ----------
        pde_operator : Callable
            Function that computes L[u] given network, points, and subdomain_idx
        source_term : Callable
            Function f(x) for the PDE: L[u] = f
        boundary_conditions : Dict
            Boundary condition specifications
        output_dim : int
            Number of output components

        Returns
        -------
        np.ndarray
            Solution weights for all subdomains
        """
        # Build PDE equations
        A_pde, b_pde = self._build_pde_equations(pde_operator, source_term, output_dim)

        # Build boundary condition equations
        A_bc, b_bc = self._build_boundary_equations(boundary_conditions, output_dim)

        # Build continuity equations
        A_cont, b_cont = self.continuity.build_continuity_equations(
            self.collocation_points, output_dim
        )

        # Stack all equations
        A = np.vstack([A_pde, A_bc, A_cont])
        b = np.hstack([b_pde, b_bc, b_cont])

        # Solve least squares problem
        weights, residual, rank, s = lstsq(A, b, lapack_driver='gelsy')

        # Set weights in networks
        self.networks.set_all_weights(weights)

        return weights

    def _build_pde_equations(self,
                            pde_operator: Callable,
                            source_term: Callable,
                            output_dim: int) -> Tuple[np.ndarray, np.ndarray]:
        """Build PDE equations for all subdomains."""
        equations = []
        rhs = []

        for subdomain_idx in range(self.domain.n_total):
            points = self.collocation_points[subdomain_idx]
            n_points = points.shape[0]

            # Get last hidden layer output
            network = self.networks.get_network(subdomain_idx)
            M = network.hidden_layers[-1]

            for comp in range(output_dim):
                # Compute operator on last hidden layer outputs
                # Returns (n_points, M+1) where last column is L[1] (operator on constant)
                LV_result = pde_operator(network, points, subdomain_idx)

                # Handle both old format (n_points, M) and new format (n_points, M+1)
                if LV_result.shape[1] == M:
                    # Old format without bias - add zero column
                    LV = np.hstack([LV_result, np.zeros((n_points, 1))])
                else:
                    LV = LV_result

                # Compute source term
                f = source_term(points)  # Shape: (n_points,)
                if f.ndim == 1:
                    f = f.reshape(-1, 1)

                # Build equations: sum_j w_j * LV_j + bias * LV[:, M] = f
                for i in range(n_points):
                    eq_row = np.zeros(self.networks.n_total_params)
                    # Index for this subdomain and component
                    # Each subdomain has network.n_params = (M+1)*output_dim total params
                    # For component comp, params start at subdomain_idx * network.n_params + comp * (M+1)
                    idx_start = subdomain_idx * network.n_params + comp * (M + 1)
                    # Coefficients for basis functions and bias
                    eq_row[idx_start:idx_start + M + 1] = LV[i, :]
                    equations.append(eq_row)
                    rhs.append(f[i, min(comp, f.shape[1]-1)])

        A = np.array(equations)
        b = np.array(rhs)

        return A, b

    def _build_boundary_equations(self,
                                 boundary_conditions: Dict,
                                 output_dim: int) -> Tuple[np.ndarray, np.ndarray]:
        """Build boundary condition equations."""
        equations = []
        rhs = []

        for subdomain_idx in range(self.domain.n_total):
            boundaries = self.continuity.interface_manager.get_boundary_interfaces(subdomain_idx)

            for boundary in boundaries:
                if boundary['dimension'] in boundary_conditions:
                    bc_spec = boundary_conditions[boundary['dimension']]

                    if boundary['type'] in bc_spec:
                        bc_func = bc_spec[boundary['type']]
                        points = self.collocation_points[subdomain_idx]

                        # Filter points on this boundary
                        dim = boundary['dimension']
                        coord = boundary['coordinate']
                        mask = np.abs(points[:, dim] - coord) < 1e-10
                        boundary_points = points[mask]

                        if boundary_points.shape[0] > 0:
                            network = self.networks.get_network(subdomain_idx)
                            M = network.hidden_layers[-1]

                            # Get last hidden layer output at boundary
                            V = network.compute_last_hidden_output(boundary_points)
                            # Add column of ones for bias
                            V_with_bias = np.hstack([V, np.ones((boundary_points.shape[0], 1))])

                            # Get boundary values
                            g = bc_func(boundary_points)
                            if g.ndim == 1:
                                g = g.reshape(-1, 1)

                            for comp in range(output_dim):
                                for i in range(boundary_points.shape[0]):
                                    eq_row = np.zeros(self.networks.n_total_params)
                                    idx_start = subdomain_idx * network.n_params + comp * (M + 1)
                                    eq_row[idx_start:idx_start + M + 1] = V_with_bias[i, :]
                                    equations.append(eq_row)
                                    rhs.append(g[i, min(comp, g.shape[1]-1)])

        if not equations:
            A = np.zeros((0, self.networks.n_total_params))
            b = np.zeros(0)
        else:
            A = np.array(equations)
            b = np.array(rhs)

        return A, b


def helmholtz_operator_1d(network, points, subdomain_idx, lam=1.0):
    """
    1D Helmholtz operator: d^2/dx^2 - lambda.

    Parameters
    ----------
    network : LocalELMNetwork
        Network for the subdomain
    points : np.ndarray
        Collocation points
    lam : float
        Helmholtz parameter lambda

    Returns
    -------
    np.ndarray
        Operator applied to last hidden layer outputs + constant basis
        Shape: (n_points, M+1) where last column is operator on constant 1
    """
    x = tf.constant(points, dtype=tf.float32)
    M = network.hidden_layers[-1]
    n_points = points.shape[0]

    LV = np.zeros((n_points, M + 1))

    for j in range(M):
        with tf.GradientTape(persistent=True) as tape2:
            tape2.watch(x)
            with tf.GradientTape() as tape1:
                tape1.watch(x)
                V = network.last_hidden_model(x, training=False)
                V_j = V[:, j]
            dV_dx = tape1.gradient(V_j, x)
        d2V_dx2 = tape2.gradient(dV_dx, x)

        # Helmholtz operator: d2V/dx2 - lambda * V
        LV[:, j] = d2V_dx2[:, 0].numpy() - lam * V_j.numpy()

    # Last column: operator on constant 1
    # d²(1)/dx² - λ*1 = 0 - λ = -λ
    LV[:, M] = -lam

    return LV


def helmholtz_operator_2d(network, points, subdomain_idx, lam=1.0):
    """
    2D Helmholtz operator: d^2/dx^2 + d^2/dy^2 - lambda.

    Parameters
    ----------
    network : LocalELMNetwork
        Network for the subdomain
    points : np.ndarray
        Collocation points
    lam : float
        Helmholtz parameter lambda

    Returns
    -------
    np.ndarray
        Operator applied to last hidden layer outputs
    """
    x = tf.constant(points, dtype=tf.float32)
    M = network.hidden_layers[-1]
    n_points = points.shape[0]

    LV = np.zeros((n_points, M))

    for j in range(M):
        # Compute d2V/dx2
        with tf.GradientTape(persistent=True) as tape2:
            tape2.watch(x)
            with tf.GradientTape() as tape1:
                tape1.watch(x)
                V = network.last_hidden_model(x, training=False)
                V_j = V[:, j]
            dV_dx = tape1.gradient(V_j, x)[:, 0]
        d2V_dx2 = tape2.gradient(dV_dx, x)[:, 0]

        # Compute d2V/dy2
        with tf.GradientTape(persistent=True) as tape2:
            tape2.watch(x)
            with tf.GradientTape() as tape1:
                tape1.watch(x)
                V = network.last_hidden_model(x, training=False)
                V_j = V[:, j]
            dV_dy = tape1.gradient(V_j, x)[:, 1]
        d2V_dy2 = tape2.gradient(dV_dy, x)[:, 1]

        # Helmholtz operator: d2V/dx2 + d2V/dy2 - lambda * V
        LV[:, j] = (d2V_dx2.numpy() + d2V_dy2.numpy() - lam * V_j.numpy())

    return LV
