from model import CNN_BE,UNet
from dataset import Trainset_PDE_generated,Trainset_noise_generated,Trainset_random_generated,Trainset_random_generated0

import time
import math
import os
import torch

import numpy as np
import torch.optim as optim
from torch import linalg
from torch.optim.lr_scheduler import StepLR
from loss import loss_be
from FFT import numerical_all
from utils import save_checkpoints
from Numerical_Ju import numerical
import matplotlib.cm as cm
import matplotlib.pyplot as plt
torch.set_default_tensor_type(torch.DoubleTensor)

os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class Trainer():
    def __init__(self,args):
        self.device = args.device
        self.args = args
        self.height = args.height
        self.width = args.width
        self.intial_snap = args.intial_snap
        self.mid_channels = args.mid_channels
        self.deltaT = args.deltaT
        self.trainingBatch = args.trainingBatch
        self.testBatch = args.testBatch
        self.domain=args.domain
        self.type = args.type
        self.epsilon = args.epsilon
        self.TotalSnaps = args.TotalSnaps
        self.TotalTrainingBatch = args.TotalTrainingBatch
        self.model_type = args.model_type

        self.device = 'cuda'

        self.model_type = args.model_type

        if self.model_type == 'CNN':
            self.BECNN = CNN_BE(self.mid_channels)
        elif self.model_type == 'UNet':
            self.BECNN = UNet(self.intial_snap,1)


        self.PDETrainSet = Trainset_random_generated(self.trainingBatch,self.intial_snap, self.height, self.width)
        # self.PDETrainSet = Trainset_PDE_generated(self.intial_snap, self.height, self.width, self.epsilon,self.domain)
        self.Snap_Training = self.PDETrainSet()


        self.PDETrainSet0 =Trainset_random_generated0(self.testBatch,self.intial_snap, self.height, self.width)
        # self.PDETrainSet0 = Trainset_PDE_generated(self.intial_snap, self.height, self.width,self.epsilon,self.domain)
        self.Snap_Training0 = self.PDETrainSet0()


        print(self.BECNN)

        if self.device =='cuda':
            self.BECNN = self.BECNN.to(self.device)
            self.Snap_TrainingLB = self.Snap_Training.to(self.device)
            self.Snap_TrainingLB0 = self.Snap_Training0.to(self.device)

            Dataset = self.Snap_TrainingLB
            self.dataloader = torch.utils.data.DataLoader(Dataset, batch_size=4, shuffle=True)

        self.model_name = f'NS_{self.model_type}_deltaT{self.deltaT}_{self.trainingBatch}_{self.width}_{self.height}_{self.type}_{self.TotalTrainingBatch}Batch{self.TotalSnaps}Snaps_{self.mid_channels}Channels'
    def full(self,miss):

        full_left = torch.clone(miss[:, :, :, [0]])
        full = torch.cat((miss, full_left), 3)
        full_bottom = torch.clone(full[:, :, [-1], :])
        full = torch.cat((full_bottom, full), 2)

        return full

    def train(self):
        self.optimizer_Adam = optim.Adam(self.BECNN.parameters(), lr=0.001)
        self.lr_scheduler = StepLR(self.optimizer_Adam, step_size=1, gamma=0.6)

        best_loss = 1e10
        tt = time.time()
        self.BECNN.train()
        step = 0
        print('Training Start...')

        for k in range(2):
            epoch = k
            # Snap_pre = torch.clone(self.Snap_TrainingLB)
            for idx, (Snap_pre) in enumerate(self.dataloader):
                kk=idx
                self.optimizer_Adam.zero_grad()
                m = 500
                # Snap_pre1 = torch.clone(Snap_pre)
                for a in range(self.TotalTrainingBatch):
                    self.optimizer_Adam.zero_grad()
                    Snap_pre1 = torch.clone(Snap_pre)
                    for i in range(m):
                        self.optimizer_Adam.zero_grad()
                        # Snap_pre2 = torch.clone(Snap_pre1)
                        Snap_Next = self.BECNN(Snap_pre1)
                        residual = loss_be(Snap_pre1, Snap_Next, self.deltaT, self.domain,
                                           self.epsilon, self.type)
                        Loss = torch.nn.MSELoss()(residual, torch.zeros_like(residual))
                        Loss.backward()
                        self.optimizer_Adam.step()
                    # m = m - 10
                    if m <= 100:
                        m = 100
                    elif m > 100:
                        m=m-10
                    Snap_pre = torch.clone(Snap_Next.detach().cpu()).to('cuda')
                self.lr_scheduler.step()


                if (epoch+ kk + 1) % 1 == 0:
                    Valid_Loss = Loss * 2
                    print(
                        f'#{epoch +kk+ 1:5d}: TrainingLoss={Loss.item():.2e}, lr={self.lr_scheduler.get_last_lr()[0]:.2e}, time={time.time() - tt:.2f}s')
                    is_best = Valid_Loss < best_loss
                    state = {
                        'epoch': epoch,
                        'state_dict': self.BECNN.state_dict(),
                        'best_loss': best_loss
                    }
                    save_checkpoints(state, is_best, save_dir=f'{self.model_name}')
                    if is_best:
                        best_loss = Valid_Loss
                    tt = time.time()




        print('Training Finished!')
        self.BECNN.eval()
        Snap_init = torch.clone(self.Snap_TrainingLB0)
        # Snap_init0 = torch.clone(self.Snap_TrainingLB0)[0][0].cpu().detach().numpy()
        Snap_init0 = torch.clone(self.Snap_TrainingLB0)

        TimeSteps = 5000
        n=TimeSteps
        Pre = []
        with torch.no_grad():
            # start = time.time()
            for i in range(n):

                Snap_Test_Next = self.BECNN(Snap_init)
                snap_full = self.full(Snap_Test_Next)
                Snap_init = Snap_Test_Next

                predict = snap_full.mean(dim=0)[0].cpu().detach().numpy()
                Pre.append(predict)
                torch.cuda.empty_cache()
            # np.save(f'data/deltaT{self.deltaT}_{self.trainingBatch}_{self.height}_{self.TotalTrainingBatch}_{888}_Pre_full.npy',
            #         Pre)

        Snap_Test_Sum = np.zeros((self.height + 1, self.width + 1))
        Numerical_1 = [0] * n*10
        for i in range(self.testBatch):
            Snap_init1 = torch.unsqueeze(Snap_init0[i], 0)
            Snap_Numerical_Out0, Numerical = numerical_all(Snap_init1, self.domain, self.deltaT*n/0.01, 0.01,
                                                           self.epsilon,
                                                           type='CN')
            # Snap_init1 = Snap_Numerical_Out0
            Numerical_1 = [a + b for a, b in zip(Numerical, Numerical_1)]

            Snap_Test_Sum = np.add(Snap_Test_Sum, Snap_Numerical_Out0)
        Snap_Numerical_Out = Snap_Test_Sum / self.testBatch
        Numerical_all = [a / self.testBatch for a in (Numerical_1)]
        # #
        # np.save(f'data/deltaT{1e-2}_{self.trainingBatch}_{self.height}_{self.TotalTrainingBatch}_{888}_Numerical_all.npy',
        #         Numerical_all)


        # Num = self.Num
        Num=Numerical_all
        RL2_error_all = []
        # a=int(0.01/self.deltaT)
        for i in range(n):
            error = Num[10 * (i + 1) - 1] - Pre[i]
            RL2error = np.linalg.norm(error) / self.width
            RL2_error_all.append(RL2error)

        np.save(
            f'data/deltaT{self.deltaT}_{self.height}_3R_{self.mid_channels}_{self.TotalTrainingBatch}_{888}_error.npy',
            RL2_error_all)
        plt.plot(np.linspace(0.1, 0.1 * len(RL2_error_all), len(RL2_error_all)), RL2_error_all, label='0.1')
        plt.legend()
        plt.title("Error")
        plt.show()

if __name__ == '__main__':
    trainer = Trainer()
    trainer.train()
