from typing import Any
import torch
import numpy as np
import lightning.pytorch as pl
import torch.nn.functional as F
from utils.numerical_method import reaction_A

def fd_pad_diri_bc(x, pad=(1, 1, 1, 1), g = 0):
    x = F.pad(x, pad=pad, mode='constant', value=g)
    return x

class ReactionModule(pl.LightningModule):
    def __init__(self, net, method='imconv', n=32, 
                lr=1e-3, a=1, loss_fn=F.mse_loss, maxiter=20):
        super().__init__()
        self.net = net
        self.loss = loss_fn
        self.lr = lr
        self.n = n
        self.h = 2 * a / (self.n - 1)
        self.method = method
        self.padder = fd_pad_diri_bc
        self.maxiter = maxiter
        A = reaction_A(self.n).todense()
        D = np.diag(A.diagonal() ** -1)
        M = A - np.diag(A.diagonal())

        self.D = torch.from_numpy(D).float()
        self.M = torch.from_numpy(M).float()
        del A, M, D

    def _make_pinn_loss(self):
        
        def _jac(u, f):
            with torch.no_grad():
                v = self.padder(u).reshape(-1, self.n**2).transpose(0, 1)
                bs = f.reshape(-1, self.n**2).transpose(0, 1) * self.h **2 * 2
                for _ in range(self.maxiter):
                    new_u = self.D @ (bs - self.M @ v)
                new_u = new_u.transpose(0, 1).reshape(-1, 1, self.n, self.n)
            return new_u[..., 1:-1, 1:-1]

        funcs = {
            'imconv' : lambda u, f, ans: self.loss(_jac(u, f), u),
            'real' : lambda u, f, ans: self.loss(self.padder(u), ans)
        }
        return funcs[self.method]

    def forward(self, x):
        y = self.net(x)
        return y

    def training_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)

        loss_values = {
            f'{self.method}': self.pinn_loss(u, ans)}
        
        self.log_dict(loss_values)
        return {'loss' : loss_values[self.method]}

    def validation_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)
        loss_values = {
            'val_real': self.loss(self.padder(u), ans)}
        self.log_dict(loss_values)
        return loss_values
    
    def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
        return super().predict_step(batch, batch_idx, dataloader_idx)

    
    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        return [optimizer], [lr_scheduler]

if __name__ == '__main__':
    pass