import torch
import numpy as np
import lightning.pytorch as pl
import torch.nn.functional as F
from .utils import *

class LapModule(pl.LightningModule):
    def __init__(self, net, method='exconv', n=32, 
                lr=1e-3, a=1, loss_fn=F.mse_loss, maxiter=20):
        super().__init__()
        self.net = net
        self.loss = loss_fn
        self.lr = lr
        self.n = n
        self.h = 2 * a / (self.n - 1)
        self.method = method
        self.padder = fd_pad_diri_bc
        self.maxiter = maxiter
        self.pinn_loss = self._make_pinn_loss()

    def forward(self, x):
        y = self.net(x)
        return y

    def training_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)

        loss_values = {
            f'{self.method}': self.pinn_loss(u, f, ans)}
        
        self.log_dict(loss_values, on_step=True)
        return {'loss' : loss_values[self.method]}

    def validation_step(self, batch, batch_idx):
        x, f, ans = batch
        u = self(x)

        loss_values = {
            'ValLoss': F.mse_loss(self.padder(u), ans),
            'iter_times': float(self.maxiter)}
        
        self.log_dict(loss_values, on_step=True)
        return loss_values
    
    def _make_pinn_loss(self):
        im_conver = implicit_convRhs(self.h)
        ex_conver = explicit_convRhs(self.h)
        
        def _imconv(u, f):
            new_u = im_conver(u, f)
            for _ in range(self.maxiter):
                new_u = im_conver(new_u, f)
            return new_u
        
        def _exconv(u, f):
            return ex_conver(u, f)
            
        funcs = {
            'exconv' : lambda u, f, ans: self.loss(_exconv(u, f), torch.zeros_like(u)),
            'imconv' : lambda u, f, ans: self.loss(_imconv(u, f), u),
            'real' : lambda u, f, ans: self.loss(self.padder(u), ans)
        }
        return funcs[self.method]
    
    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
        # optimizer = torch.optim.LBFGS(self.parameters())
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
        return [optimizer], [lr_scheduler]
