
import torch
import numpy as np
import lightning.pytorch as pl
import torch.nn.functional as F
# from .utils import *

def fd_pad_diri_bc(x, pad=(1, 1, 1, 1), g = 0):
    x = F.pad(x, pad=pad, mode='constant', value=g)
    return x

def padconv_with_kernel(x, kernel = [[0, 0.25, 0], [0.25, 0, 0.25], [0, 0.25, 0]]):
    kernel = torch.tensor(kernel, dtype=torch.float)
    kernel = kernel.type_as(x).view(1, 1, 3, 3).repeat(1, 1, 1, 1)
    rhs = F.conv2d(fd_pad_diri_bc(x), kernel)
    return rhs

def conv_with_kernel(x, kernel = [[0, 0.25, 0], [0.25, 0, 0.25], [0, 0.25, 0]]):
    kernel = torch.tensor(kernel, dtype=torch.float)
    kernel = kernel.type_as(x).view(1, 1, 3, 3).repeat(1, 1, 1, 1)
    rhs = F.conv2d(x, kernel)
    return rhs


class ReactionModule(pl.LightningModule):
    def __init__(self, net, method='imconv', n=32, 
                lr=1e-3, a=1, loss_fn=F.mse_loss, maxiter=20):
        super().__init__()
        self.net = net
        self.loss = loss_fn
        self.lr = lr
        self.n = n
        self.h = 2 * a / (self.n - 1)
        self.method = method
        self.padder = fd_pad_diri_bc
        self.maxiter = maxiter
        
        self.pinn_loss = self._make_pinn_loss()

        path = f'./data/{n}'
        mat_a = np.load(f'{path}/a.npy')[np.newaxis, np.newaxis]
        mat_r = np.load(f'{path}/r.npy')[np.newaxis, np.newaxis, 1:-1, 1:-1]
        
        self.register_buffer('a', torch.from_numpy(mat_a).float())
        r = torch.from_numpy(mat_r).float()

        self.register_buffer('bottom', (conv_with_kernel(self.a, [[0, 1, 0], [1, 4, 1], [0, 1, 0]]) + 2*self.h**2 * r) ** -1)
        # self.bottom = self.bottom.float()

    def forward(self, x):
        y = self.net(x)
        return y

    def training_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)

        loss_values = {
            f'{self.method}': self.pinn_loss(u, f, ans)}
        self.log_dict(loss_values, on_step=True)
        return {'loss' : loss_values[self.method], }

    def validation_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)
        loss_values = {
            'ValLoss': F.mse_loss(self.padder(u), ans),
            'iter_times': float(self.maxiter)}
        self.log_dict(loss_values, on_step=True)
        return loss_values
    
    def _make_pinn_loss(self): 
        def _imconv(u, f):
            with torch.no_grad():
                new_u = self.padder(u)
                new_u = (2*self.h**2*f[..., 1:-1, 1:-1] + \
                        conv_with_kernel(new_u, [[0, 1, 0], [1, 0, 1], [0, 1, 0]]) * self.a[..., 1:-1, 1:-1] + \
                        conv_with_kernel(self.a * new_u, [[0, 1, 0], [1, 0, 1], [0, 1, 0]])) * self.bottom
                
                for _ in range(self.maxiter):
                    new_u = self.padder(new_u)
                    new_u = (2*self.h**2*f[..., 1:-1, 1:-1] + \
                        conv_with_kernel(new_u, [[0, 1, 0], [1, 0, 1], [0, 1, 0]]) * self.a[..., 1:-1, 1:-1] + \
                        conv_with_kernel(self.a * new_u, [[0, 1, 0], [1, 0, 1], [0, 1, 0]])) * self.bottom
                    
            return new_u
    
      
        funcs = {
            'imconv' : lambda u, f, ans: self.loss(_imconv(u, f), u),
            'real' : lambda u, cofs, ans: self.loss(self.padder(u), ans)
        }
        return funcs[self.method]
    
    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=self.lr, )
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        # lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', 0.5, patience=5)
        return [optimizer], [lr_scheduler]
