# -*- coding: utf-8 -*-
# Jie Jiang

import os
from time import time
from collections import OrderedDict
from shutil import copyfile
import sys
import numpy as np
import scipy
import torch
from matplotlib import pyplot as plt
from scipy import io
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter

device = 'cuda' if torch.cuda.is_available() else 'cpu'


# Enable GPU support if available and set the floating point precision for MG
# set_up_backend("torch", data_type="float32")

# define a self activation function
class Act_fun(nn.Module):
    def __init__(self):
        super(Act_fun, self).__init__()

    def forward(self, x):
        x = torch.sin(x)
        return x


#
def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        nn.init.constant_(m.bias, 1)


# the deep neural network
class DNN(torch.nn.Module):
    def __init__(self, layers):
        super(DNN, self).__init__()

        # parameters
        self.depth = len(layers) - 1

        # set up layer order dict
        self.activation1 = Act_fun
        self.activation2 = torch.nn.ReLU
        layer_list = list()
        for i in range(self.depth - 1):
            layer_list.append(
                ('layer_%d' % i, torch.nn.Linear(layers[i], layers[i + 1]))
            )
            if i < 1:
                layer_list.append(('activation_%d' % i, self.activation1()))
            else:
                layer_list.append(('activation_%d' % i, self.activation1()))

        layer_list.append(
            ('layer_%d_fin' % (self.depth - 1), torch.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)

        # deploy layers
        self.layers = torch.nn.Sequential(layerDict)


    def forward(self, x):
        out1 = self.layers(x)
        return out1


class Mydataset(Dataset):
    def __init__(self, T, V):
        super(Mydataset, self).__init__()
        self.imgs = T
        self.targets = V

    def __getitem__(self, index):
        return self.imgs[index], self.targets[index]

    def __len__(self):
        return len(self.imgs)


class PhysicsInformedNN:
    def __init__(self, grid, layers, case_ID: int, norm_sol, true_kappa, true_K_coe, batch_size, num_epoch1, num_epoch2, num_epoch3, lr2, File_name, Write_into_file):
        # data
        self.true_K_coe = torch.Tensor(true_K_coe).to(device)
        self.kappa = torch.Tensor(true_kappa/1.0).to(device)
        self.norm_sol = norm_sol
        self.case_ID = case_ID
        self.file_name = File_name
        self.Write_into_file = Write_into_file
        self.grid = torch.Tensor(grid).to(device)
        self.point_num = 6 * int(true_kappa) +1

        ## using in the forward
        self.help1 = torch.linspace(-1, 1, self.point_num).repeat([batch_size, 1]).to(device)
        self.help2 = torch.linspace(-1, 1, self.point_num).view([-1, 1]).to(device)
        self.time_list = [1., 1., 1., 1.]
        self.train_list = [1., 1., 1., 1.]
        self.val_list = [1., 1., 1., 1.]
        self.test_loss = [1., 1., 1., 1.]
        self.RE = [1., 1., 1., 1.]

        self.Writer = SummaryWriter('../logs')

        # multi_deep neural networks
        layer1 = np.hstack((layers[0:3], layers[-1]))
        layer2 = np.hstack((layers[0:5], layers[-1]))
        layer3 = np.hstack((layers[0:9], layers[-1]))

        self.dnn1 = DNN(layer1)
        self.dnn1.apply(weight_init)
        self.opt1 = torch.optim.Adam(self.dnn1.parameters(), lr=1e-2, weight_decay=lr2)
        self.lr1 = torch.optim.lr_scheduler.ExponentialLR(self.opt1, gamma=1e-5 ** (1/num_epoch1))

        self.dnn2 = DNN(layer2)
        self.dnn2.apply(weight_init)
        i = -4
        for p in self.dnn2.parameters():
            i += 1
            if i <= 0:
                p.requires_grad = False
        self.opt2 = torch.optim.Adam(filter(lambda p: p.requires_grad, self.dnn2.parameters()), lr=1e-2,
                                     weight_decay=lr2)
        self.lr2 = torch.optim.lr_scheduler.ExponentialLR(self.opt2, gamma=1e-5 ** (1/num_epoch2))

        self.dnn3 = DNN(layer3)
        self.dnn3.apply(weight_init)
        i = -8
        for p in self.dnn3.parameters():
            i += 1
            if i <= 0:
                p.requires_grad = False
        self.opt3 = torch.optim.Adam(filter(lambda p: p.requires_grad, self.dnn3.parameters()), lr=1e-2,
                                     weight_decay=lr2)
        self.lr3 = torch.optim.lr_scheduler.ExponentialLR(self.opt3, gamma=1e-5 ** (1/num_epoch3))


        self.loss_fn = torch.nn.MSELoss()

        self.time = []
        self.time_val_loss = []
        self.time_train_loss = []
        self.index = 0

    def yh(self, imgs):
        return torch.view_as_complex(self.cur_dnn(imgs)).view([-1, 1])

    def forward(self, imgs):
        temp1 = self.yh(imgs)
        temp2 = self.kappa*torch.abs(imgs.repeat([1, self.point_num])-self.help1)
        temp2 = torch.exp(1j*temp2)
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = self.true_K_coe[0, 0] * torch.trapz(y=temp2*cha, dx=2/(self.point_num-1))
        return torch.view_as_real(temp1.view([-1]) - temp2)

    def left_forward(self, imgs):
        temp1 = self.yh(imgs)
        row = temp1.shape[0]
        temp2 = self.kappa*torch.abs(imgs.repeat([1, self.point_num])-torch.linspace(-1, 1, self.point_num).repeat([row, 1]).to(device))
        temp2 = torch.exp(1j*temp2)
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = self.true_K_coe[0, 0] * torch.trapz(y=temp2*cha, dx=2/(self.point_num-1))
        return torch.view_as_real(temp1.view([-1]) - temp2)

    def cal_error2(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        temp2 = np.sum(target ** 2, 1)
        return np.sqrt((2*np.sum(temp)-temp[0]-temp[-1]))/np.sqrt((2*np.sum(temp2)-temp[0]-temp[-1]))

    def cal_error(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        return np.sqrt((2*np.sum(temp)-temp[0]-temp[-1])/20481)/self.norm_sol



    def train_dnn1(self, nEpochs, mydataloader, val_dataloader):
        start_time = time()
        self.index = 0
        self.cur_dnn = self.dnn1.to(device)
        self.cur_opt = self.opt1
        self.cur_lr = self.lr1
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('--------------------------- learn dnn1 ----------------------------')
        iter = 0
        show_gap = int(nEpochs / 8)
        init_epoch = 1
        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
                if (epoch+1) == init_epoch:
                    init_epoch += show_gap
                    now_time = time()
                    total_val_loss = 0
                    with torch.no_grad():
                        for img, target in val_dataloader:
                            img = img.to(device)
                            target = target.to(device)
                            output = self.forward(img)
                            val_loss = self.loss_fn(target, output)
                            total_val_loss += val_loss.item()
                    with open(self.file_name, 'a') as file:
                        if self.Write_into_file == True:
                            sys.stdout = file
                        print(
                            "The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" %
                            (epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                             self.cur_opt.param_groups[0]['lr']))
                    start_time += time() - now_time
                    self.time.append(time() - start_time)
                    self.time_val_loss.append(total_val_loss / len(val_dataloader))
                    self.time_train_loss.append(loss.item())
                    self.train_list[1] = loss.item()
                    self.val_list[1] = total_val_loss / len(val_dataloader)
                    self.Writer.add_scalar('dnn1_train_loss', loss.item(), iter)
                    self.Writer.add_scalar('dnn1_val_loss', total_val_loss / len(val_dataloader), iter)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn1.pth' % self.case_ID)
        end_time = time()
        self.time_list[1] = end_time - start_time


    def train_dnn2(self, nEpochs, mydataloader, val_dataloader):
        start_time = time()
        self.dnn2.load_state_dict(torch.load('../models/case%d_dnn1.pth' % self.case_ID), strict=False)
        self.cur_dnn = self.dnn2.to(device)
        self.cur_opt = self.opt2
        self.cur_lr = self.lr2
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('--------------------------- learn dnn2 ----------------------------')
        iter = 0
        show_gap = int(nEpochs/10)
        init_epoch = 1
        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
                if (epoch+1) == init_epoch:
                    init_epoch += show_gap
                    now_time = time()
                    total_val_loss = 0
                    with torch.no_grad():
                        for img, target in val_dataloader:
                            img = img.to(device)
                            target = target.to(device)
                            output = self.forward(img)
                            val_loss = self.loss_fn(target, output)
                            total_val_loss += val_loss.item()
                    with open(self.file_name, 'a') as file:
                        if self.Write_into_file == True:
                            sys.stdout = file
                        print(
                            "The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" %
                            (epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                             self.cur_opt.param_groups[0]['lr']))
                    start_time += time() - now_time
                    self.time.append(time() - start_time)
                    self.time_val_loss.append(total_val_loss / len(val_dataloader))
                    self.time_train_loss.append(loss.item())
                    self.train_list[2] = loss.item()
                    self.val_list[2] = total_val_loss / len(val_dataloader)
                    self.Writer.add_scalar('dnn2_train_loss', loss.item(), iter)
                    self.Writer.add_scalar('dnn2_val_loss', total_val_loss / len(val_dataloader), iter)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn2.pth' % self.case_ID)
        end_time = time()
        self.time_list[2] = end_time - start_time

    def train_dnn3(self, nEpochs, mydataloader, val_dataloader):
        start_time = time()
        self.dnn3.load_state_dict(torch.load('../models/case%d_dnn2.pth' % self.case_ID), strict=False)
        self.cur_dnn = self.dnn3.to(device)
        self.cur_opt = self.opt3
        self.cur_lr = self.lr3
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('--------------------------- learn dnn3 ----------------------------')
        iter = 0
        show_gap = int(nEpochs / 10)
        init_epoch = 1
        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
                if (epoch+1) == init_epoch:
                    init_epoch += show_gap
                    now_time = time()
                    total_val_loss = 0
                    with torch.no_grad():
                        for img, target in val_dataloader:
                            img = img.to(device)
                            target = target.to(device)
                            output = self.forward(img)
                            val_loss = self.loss_fn(target, output)
                            total_val_loss += val_loss.item()
                    with open(self.file_name, 'a') as file:
                        if self.Write_into_file == True:
                            sys.stdout = file
                        print(
                            "The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" %
                            (epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                             self.cur_opt.param_groups[0]['lr']))
                    start_time += time() - now_time
                    self.time.append(time() - start_time)
                    self.time_val_loss.append(total_val_loss / len(val_dataloader))
                    self.time_train_loss.append(loss.item())
                    self.train_list[3] = loss.item()
                    self.val_list[3] = total_val_loss / len(val_dataloader)
                    self.Writer.add_scalar('dnn3_train_loss', loss.item(), iter)
                    self.Writer.add_scalar('dnn3_val_loss', total_val_loss / len(val_dataloader), iter)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn3.pth' % self.case_ID)
        end_time = time()
        self.time_list[3] = end_time - start_time



    def eval_model(self, u):

        self.cur_dnn = self.dnn1
        self.cur_dnn.eval()
        u_dnn1 = torch.view_as_real(self.yh(torch.linspace(-1, 1, 20481).to(device).view([-1, 1])).view([-1])).detach().cpu().numpy()
        self.RE[1] = self.cal_error(u, u_dnn1)

        self.cur_dnn = self.dnn2
        self.cur_dnn.eval()
        u_dnn2 = torch.view_as_real(self.yh(torch.linspace(-1, 1, 20481).to(device).view([-1, 1])).view([-1])).detach().cpu().numpy() + u_dnn1
        self.RE[2] = self.cal_error(u, u_dnn2)

        self.cur_dnn = self.dnn3
        self.cur_dnn.eval()
        u_dnn3 = torch.view_as_real(self.yh(torch.linspace(-1, 1, 20481).to(device).view([-1, 1]).to(device)).view([-1])).detach().cpu().numpy() + u_dnn2
        self.RE[3] = self.cal_error(u, u_dnn3)



        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file

            print("------------------------------------time-------------------------------------")
            print(self.time_list)
            print(np.sum(self.time_list[1:]))
            print("------------------------------------train_loss-------------------------------------")
            print(self.train_list)
            print("------------------------------------val_loss-------------------------------------")
            print(self.val_list)
            print("------------------------------------RE-------------------------------------")
            print(self.RE)
        return self.val_list[3], self.RE[3]


if __name__ == '__main__':
    best_u_loss = 100
    Case_ID = 2
    fin_batch_size = 0
    fin_lr = -1
    file_name = '../result/result_mgdl.txt'
    write_into_file = True



    # train multi grade
    num_epoch1 = 500
    num_epoch2 = 1000
    num_epoch3 = 2000


    kappa_list = [200]
    num_train_list = (np.array(kappa_list)*12 + 1).tolist()
    batch_size_list = [128]
    lr2_list = [0]
    for i in range(len(num_train_list)):
        num_train_val = num_train_list[i]
        best_val_loss = 100
        kappa_val = kappa_list[i]
        for l in range(len(batch_size_list)):
                batch_size = batch_size_list[l]
                for r in range(len(lr2_list)):
                    lr2 = lr2_list[r]
                    with open(file_name, 'a') as file:
                        if write_into_file == True:
                            sys.stdout = file
                        print('*****************************************************')
                        print('num_train:%d, kappa:%d, batch_size:%d, lr2:%e' % (num_train_val, kappa_val, batch_size, lr2))

                    for num in range(5):
                        ## read and prepare data
                        case_basename = 'simple_example_kappa_%d_N_%d.mat' % (kappa_val, num_train_val)
                        rev_filepath = os.path.join('../dataset', case_basename)
                        mat_file = io.loadmat(rev_filepath)

                        real_u = mat_file['real_y'].astype(np.float32)
                        real_f = mat_file['real_f'].astype(np.float32)
                        img_u = mat_file['img_y'].astype(np.float32)
                        img_f = mat_file['img_f'].astype(np.float32)
                        num_train = mat_file['num_train'][0, 0]
                        num_val = mat_file['num_val'][0, 0]
                        x_lim = mat_file['x_lim'].astype(np.float32)
                        norm_y = mat_file['norm_y']
                        kappa = mat_file['kappa']
                        K_coe = mat_file['K_coe']

                        f = np.hstack([real_f, img_f])
                        u = np.hstack([real_u, img_u])

                        train_imgs = x_lim[0: num_train, :]
                        val_imgs = x_lim[num_train: num_train + num_val, :]
                        test_imgs = x_lim[num_val + num_train:, :]
                        train_targets = f[0:num_train, :]
                        val_targets = f[num_train:num_val + num_train, :]
                        test_targets = f[num_val + num_train:, :]

                        train_data = Mydataset(train_imgs, train_targets)
                        train_dataLoader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
                        val_data = Mydataset(val_imgs, val_targets)
                        val_dataLoader = DataLoader(val_data, batch_size=batch_size, shuffle=True, drop_last=True)

                        ## train model
                        layers = [1, 256, 256, 128, 128, 64, 64, 32, 32, 2]
                        PINNs = PhysicsInformedNN(
                            train_imgs, layers, Case_ID, norm_y, kappa, K_coe, batch_size, num_epoch1,
                            num_epoch2, num_epoch3,  lr2, file_name, write_into_file)

                        # train dnn1
                        PINNs.train_dnn1(num_epoch1, train_dataLoader, val_dataLoader)

                        temp_train = np.zeros_like(train_targets)
                        temp_val = np.zeros_like(val_targets)

                        # create the dataset for dnn2
                        with torch.no_grad():
                            for i in range(int(num_train / batch_size)):
                                temp_train[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                    torch.Tensor(train_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                        device)).detach().cpu().numpy()
                            if int(num_train / batch_size) != num_train / batch_size:
                                temp_train[int(num_train / batch_size)*batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(train_imgs[int(num_train / batch_size)*batch_size:, :]).to(
                                        device)).detach().cpu().numpy()

                            for i in range(int(num_val / batch_size)):
                                temp_val[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                    torch.Tensor(val_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                        device)).detach().cpu().numpy()
                            if int(num_val / batch_size) != num_val / batch_size:
                                temp_train[int(num_val / batch_size)*batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(val_imgs[int(num_val / batch_size)*batch_size:, :]).to(
                                        device)).detach().cpu().numpy()

                            train2_targets = train_targets - temp_train
                            val2_targets = val_targets - temp_val
                            new_train_data = Mydataset(train_imgs, train2_targets)
                            new_train_dataLoader = DataLoader(new_train_data, batch_size=batch_size, shuffle=True,
                                                              drop_last=True)
                            new_val_data = Mydataset(val_imgs, val2_targets)
                            new_val_dataLoader = DataLoader(new_val_data, batch_size=batch_size, shuffle=True,
                                                            drop_last=True)

                        # train dnn2
                        PINNs.train_dnn2(num_epoch2, new_train_dataLoader, new_val_dataLoader)
                        #
                        # #train dnn3
                        with torch.no_grad():
                            for i in range(int(num_train / batch_size)):
                                temp_train[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                    torch.Tensor(train_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                        device)).detach().cpu().numpy()
                            if int(num_train / batch_size) != num_train / batch_size:
                                temp_train[int(num_train / batch_size)*batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(train_imgs[int(num_train / batch_size)*batch_size:, :]).to(
                                        device)).detach().cpu().numpy()

                            for i in range(int(num_val / batch_size)):
                                temp_val[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                    torch.Tensor(val_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                        device)).detach().cpu().numpy()
                            if int(num_val / batch_size) != num_val / batch_size:
                                temp_train[int(num_val / batch_size)*batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(val_imgs[int(num_val / batch_size)*batch_size:, :]).to(
                                        device)).detach().cpu().numpy()

                            train3_targets = train2_targets - temp_train
                            val3_targets = val2_targets - temp_val
                            new_train_data = Mydataset(train_imgs, train3_targets)
                            new_train_dataLoader = DataLoader(new_train_data, batch_size=batch_size, shuffle=True,
                                                              drop_last=True)
                            new_val_data = Mydataset(val_imgs, val3_targets)
                            new_val_dataLoader = DataLoader(new_val_data, batch_size=batch_size, shuffle=True,
                                                            drop_last=True)
                        PINNs.train_dnn3(num_epoch3, new_train_dataLoader, new_val_dataLoader)


                        val_loss, u_loss = PINNs.eval_model(u)
                        if val_loss > 0.01:
                            break
                        if val_loss < best_val_loss:
                            best_val_loss = val_loss
                            best_u_loss = u_loss
                            fin_lr = lr2
                            fin_batch_size = batch_size

                        PINNs.Writer.close()
        with open(file_name, 'a') as file:
            if write_into_file == True:
                sys.stdout = file
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('Fin_loss for kappa:%d, num_train:%d,  batch_size: %d, lr: %e, val_error: %e, RE: %e' % (kappa_val, num_train_val, fin_batch_size, fin_lr, best_val_loss, best_u_loss))
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('/n')
            print('/n')





