# -*- coding: utf-8 -*-
# Jie Jiang

import os
from time import time
from collections import OrderedDict
from shutil import copyfile
import sys
import numpy as np
import scipy
import torch
from matplotlib import pyplot as plt
from scipy import io
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter

device = 'cuda' if torch.cuda.is_available() else 'cpu'


# Enable GPU support if available and set the floating point precision for MG
# set_up_backend("torch", data_type="float32")

# define a self activation function
class Act_fun(nn.Module):
    def __init__(self):
        super(Act_fun, self).__init__()

    def forward(self, x):
        x = torch.sin(x)
        return x

    ### init


def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        nn.init.constant_(m.bias, 1)


# the deep neural network
class DNN(torch.nn.Module):
    def __init__(self, layers):
        super(DNN, self).__init__()

        # parameters
        self.depth = len(layers) - 1

        # set up layer order dict
        self.activation1 = Act_fun
        self.activation2 = torch.nn.ReLU
        layer_list = list()
        for i in range(self.depth - 1):
            layer_list.append(
                ('layer_%d' % i, torch.nn.Linear(layers[i], layers[i + 1]))
            )
            if i < 1:
                layer_list.append(('activation_%d' % i, self.activation1()))
            else:
                layer_list.append(('activation_%d' % i, self.activation1()))

        layer_list.append(
            ('layer_%d_fin' % (self.depth - 1), torch.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)

        # deploy layers
        self.layers = torch.nn.Sequential(layerDict)


    def forward(self, x):
        out1 = self.layers(x)
        return out1


class Mydataset(Dataset):
    def __init__(self, T, V):
        super(Mydataset, self).__init__()
        self.imgs = T
        self.targets = V

    def __getitem__(self, index):
        return self.imgs[index], self.targets[index]

    def __len__(self):
        return len(self.imgs)


class PhysicsInformedNN:
    def __init__(self, grid, layers, case_ID: int, norm_sol, true_kappa, true_K_coe, batch_size, num_epoch,  lr1,  File_name, Write_into_file):
        # data
        self.true_K_coe = torch.Tensor(true_K_coe).to(device)
        self.kappa = torch.Tensor(true_kappa/1.0).to(device)
        self.norm_sol = norm_sol
        self.case_ID = case_ID
        self.file_name = File_name
        self.Write_into_file = Write_into_file
        self.grid = torch.Tensor(grid).to(device)
        self.point_num = 6 * int(true_kappa) +1

        ## using in the forward
        self.help1 = torch.linspace(-1, 1, self.point_num).repeat([batch_size, 1]).to(device)
        self.help2 = torch.linspace(-1, 1, self.point_num).view([-1, 1]).to(device)
        self.time_list = [1., 1., 1., 1.]
        self.train_list = [1., 1., 1., 1.]
        self.val_list = [1., 1., 1., 1.]
        self.test_loss = [1., 1., 1., 1.]
        self.RE = [1., 1., 1., 1.]
        self.RE2 = [1., 1., 1., 1.]

        self.Writer = SummaryWriter('../logs')



        # deep learning
        self.dnn = DNN(layers)
        self.dnn.apply(weight_init)

        self.opt = torch.optim.Adam(self.dnn.parameters(), lr=1e-2, weight_decay=lr1)
        self.lr = torch.optim.lr_scheduler.ExponentialLR(self.opt, gamma=1e-5 ** (1/num_epoch))
        self.cur_dnn = self.dnn
        self.cur_opt = self.opt
        self.cur_lr = self.lr

        self.loss_fn = torch.nn.MSELoss()

        self.time = []
        self.time_val_loss = []
        self.time_train_loss = []
        self.index = 0


    def yh(self, imgs):
        return torch.view_as_complex(self.cur_dnn(imgs)).view([-1, 1])



    def forward(self, imgs):
        temp1 = self.yh(imgs)
        temp2 = self.kappa*torch.abs(imgs.repeat([1, self.point_num])-self.help1)
        temp2 = torch.exp(1j*temp2)
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = self.true_K_coe[0, 0] * torch.trapz(y=temp2*cha, dx=2/(self.point_num-1))
        return torch.view_as_real(temp1.view([-1]) - temp2)


    def left_forward(self, imgs):
        temp1 = self.yh(imgs)
        row = temp1.shape[0]
        temp2 = self.kappa*torch.abs(imgs.repeat([1, self.point_num])-torch.linspace(-1, 1, self.point_num).repeat([row, 1]).to(device))
        temp2 = torch.exp(1j*temp2)
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = self.true_K_coe[0, 0] * torch.trapz(y=temp2*cha, dx=2/(self.ex*self.point_num-1))
        return torch.view_as_real(temp1.view([-1]) - temp2)

    def cal_error2(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        temp2 = np.sum(target ** 2, 1)
        return np.sqrt((2*np.sum(temp)-temp[0]-temp[-1]))/np.sqrt((2*np.sum(temp2)-temp[0]-temp[-1]))

    def cal_error(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        return np.sqrt((2*np.sum(temp)-temp[0]-temp[-1])/20481)/self.norm_sol

    def train_dnn(self, nEpochs, mydataloader, val_dataloader):
        start_time = time()
        self.cur_dnn = self.dnn.to(device)
        self.cur_opt = self.opt
        self.cur_lr = self.lr
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('---------------------------- learn dnn ------------------------------')
        iter = 0
        show_gap = int(nEpochs / 8)
        init_epoch = 1
        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
                if (epoch+1) == init_epoch:
                    init_epoch += show_gap
                    now_time = time()
                    total_val_loss = 0
                    with torch.no_grad():
                        for img, target in val_dataloader:
                            img = img.to(device)
                            target = target.to(device)
                            output = self.forward(img)
                            val_loss = self.loss_fn(target, output)
                            total_val_loss += val_loss.item()
                    with open(self.file_name, 'a') as file:
                        if self.Write_into_file == True:
                            sys.stdout = file
                        print("The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" %
                          (epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                          self.cur_opt.param_groups[0]['lr']))
                    start_time += time()-now_time
                    self.time.append(time()-start_time)
                    self.time_val_loss.append(total_val_loss / len(val_dataloader))
                    self.time_train_loss.append(loss.item())
                    self.train_list[0] = loss.item()
                    self.val_list[0] = total_val_loss / len(val_dataloader)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn.pth' % self.case_ID)
        end_time = time()
        self.time_list[0] = end_time-start_time



    def eval_model(self, u):
        self.cur_dnn = self.dnn
        self.cur_dnn.eval()
        u_dnn = torch.view_as_real(self.yh(torch.linspace(-1, 1, 20481).view([-1, 1]).to(device)).view([-1])).detach().cpu().numpy()
        self.RE[0] = self.cal_error(u, u_dnn)

        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file

            print("------------------------------------time-------------------------------------")
            print(self.time_list)
            print(np.sum(self.time_list[1:]))
            print("------------------------------------train_loss-------------------------------------")
            print(self.train_list)
            print("------------------------------------val_loss-------------------------------------")
            print(self.val_list)
            print("------------------------------------RE-------------------------------------")
            print(self.RE)

        return self.val_list[0], self.RE[0]


if __name__ == '__main__':
    best_u_loss = 100
    Case_ID = 1
    fin_batch_size = 0
    fin_lr = -1
    file_name = '../result/sgl_result.txt'
    write_into_file = True

    #train single grade
    num_epoch = 3500
    # batch_size_list = [64, 128, 256]
    # lr1_list = [0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
    batch_size_list = [256]
    lr1_list = [1e-4]

    # kappa_list = [100, 150, 200, 250, 300, 350, 400]
    kappa_list = [200]
    num_train_list = (np.array(kappa_list)*12 + 1).tolist()

    for kappa_val in kappa_list:
        num_train_val = 12 * kappa_val + 1
        best_val_loss = 100
        for batch_size in batch_size_list:
                for lr1 in lr1_list:
                    with open(file_name, 'a') as file:
                        if write_into_file == True:
                            sys.stdout = file
                        print('*****************************************************')
                        print('num_train:%d, kappa:%d, batch_size:%d, lr1:%e' % (num_train_val, kappa_val, batch_size, lr1))

                    for num in range(5):
                        ## read and prepare data
                        case_basename = 'simple_example_kappa_%d_N_%d.mat' % (kappa_val, num_train_val)
                        rev_filepath = os.path.join('../dataset', case_basename)

                        mat_file = io.loadmat(rev_filepath)


                        real_u = mat_file['real_y'].astype(np.float32)
                        real_f = mat_file['real_f'].astype(np.float32)
                        img_u = mat_file['img_y'].astype(np.float32)
                        img_f = mat_file['img_f'].astype(np.float32)
                        num_train = mat_file['num_train'][0, 0]
                        num_val = mat_file['num_val'][0, 0]
                        x_lim = mat_file['x_lim'].astype(np.float32)
                        norm_y = mat_file['norm_y']
                        kappa = mat_file['kappa']
                        K_coe = mat_file['K_coe']

                        f = np.hstack([real_f, img_f])
                        u = np.hstack([real_u, img_u])

                        train_imgs = x_lim[0: num_train, :]
                        val_imgs = x_lim[num_train: num_train + num_val, :]
                        train_targets = f[0:num_train, :]
                        val_targets = f[num_train:num_val + num_train, :]
                        train_data = Mydataset(train_imgs, train_targets)
                        train_dataLoader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
                        val_data = Mydataset(val_imgs, val_targets)
                        val_dataLoader = DataLoader(val_data, batch_size=batch_size, shuffle=True, drop_last=True)

                        ## train model
                        layers = [1, 256, 256, 128, 128, 64, 64, 32, 32, 2]
                        PINNs = PhysicsInformedNN(
                            train_imgs, layers, Case_ID, norm_y, kappa, K_coe, batch_size, num_epoch, lr1, file_name, write_into_file)

                        PINNs.train_dnn(num_epoch, train_dataLoader, val_dataLoader)

                        val_loss, u_loss = PINNs.eval_model(u)
                        if val_loss > 1:
                            break
                        if val_loss < best_val_loss:
                            best_val_loss = val_loss
                            best_u_loss = u_loss
                            fin_lr = lr1
                            fin_batch_size = batch_size

                        PINNs.Writer.close()
        with open(file_name, 'a') as file:
            if write_into_file == True:
                sys.stdout = file
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('Fin_loss for kappa:%d, num_train:%d,  batch_size: %d, lr: %e, val_error: %e, RE: %e' % (kappa_val, num_train_val, fin_batch_size, fin_lr, best_val_loss, best_u_loss))
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('/n')
            print('/n')





