# -*- coding: utf-8 -*-
# Jie Jiang

import os
from time import time
from collections import OrderedDict
from shutil import copyfile
import sys
import numpy as np
import scipy
import torch
from scipy import io
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter

device = 'cuda' if torch.cuda.is_available() else 'cpu'


# Enable GPU support if available and set the floating point precision for MG
# set_up_backend("torch", data_type="float32")

# define a self activation function
class Act_fun(nn.Module):
    def __init__(self):
        super(Act_fun, self).__init__()

    def forward(self, x):
        x = torch.sin(x)
        return x

    ### init


def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        nn.init.constant_(m.bias, 1)


# the deep neural network
class DNN(torch.nn.Module):
    def __init__(self, layers):
        super(DNN, self).__init__()

        # parameters
        self.depth = len(layers) - 1

        # set up layer order dict
        self.activation1 = Act_fun
        self.activation2 = torch.nn.ReLU
        layer_list = list()
        for i in range(self.depth - 1):
            layer_list.append(
                ('layer_%d' % i, torch.nn.Linear(layers[i], layers[i + 1]))
            )
            if i < 1:
                layer_list.append(('activation_%d' % i, self.activation1()))
            else:
                layer_list.append(('activation_%d' % i, self.activation1()))

        layer_list.append(
            ('layer_%d_fin' % (self.depth - 1), torch.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)

        # deploy layers
        self.layers = torch.nn.Sequential(layerDict)

    def forward(self, x):
        out1 = self.layers(x)
        return out1


class Mydataset(Dataset):
    def __init__(self, T, V):
        super(Mydataset, self).__init__()
        self.imgs = T
        self.targets = V

    def __getitem__(self, index):
        return self.imgs[index], self.targets[index]

    def __len__(self):
        return len(self.imgs)


class PhysicsInformedNN:
    def __init__(self, grid, layers, case_ID: int, norm_sol, true_kappa, batch_size, num_epoch, num_epoch1, num_epoch2,
                 num_epoch3, lr1, lr2, File_name, Write_into_file, init_rate, num_test):
        # data
        self.kappa = torch.Tensor(true_kappa / 1.0).to(device)
        self.norm_sol = norm_sol
        self.case_ID = case_ID
        self.file_name = File_name
        self.Write_into_file = Write_into_file
        self.point_num = 8 * int(true_kappa) + 1
        self.num_test = num_test

        ## using in the forward
        self.help1 = torch.linspace(-1, 1, self.point_num).repeat([batch_size, 1]).to(device)
        self.help2 = torch.linspace(-1, 1, self.point_num).view([-1, 1]).to(device)
        # x, y = torch.meshgrid(self.grid.reshape(-1), self.grid.reshape([-1]))
        # grid2 = torch.stack((x, y), dim=-1)
        # self.K_val = self.K(grid2).detach()
        self.time_list = [1., 1., 1., 1., 1.]
        self.train_list = [1., 1., 1., 1., 1.]
        self.val_list = [1., 1., 1., 1., 1.]
        self.u_loss = [1., 1., 1., 1., 1.]
        # self.error_single = np.zeros([70, 3])
        # self.error_multi = np.zeros([200, 3])

        self.Writer = SummaryWriter('../logs')

        # deep learning
        self.dnn = DNN(layers)
        self.dnn.apply(weight_init)

        self.opt = torch.optim.Adam(self.dnn.parameters(), lr=init_rate, weight_decay=lr1)
        self.lr = torch.optim.lr_scheduler.ExponentialLR(self.opt, gamma=(1e-7 / init_rate) ** (1 / num_epoch))
        # self.lr = torch.optim.lr_scheduler.ReduceLROnPlateau(self.opt,  mode='min', factor=0.5, patience=2, min_lr=1e-9)
        self.cur_dnn = self.dnn
        self.cur_opt = self.opt
        self.cur_lr = self.lr

        self.loss_fn = torch.nn.MSELoss()

        self.time = []
        self.time_val_loss = []
        self.time_train_loss = []
        self.index = 0

    def yh(self, imgs):
        # f = self.cur_dnn(imgs)
        # return torch.reshape(f[:, 0]*torch.exp(1j*f[:, 1]), [-1, 1])
        return torch.view_as_complex(self.cur_dnn(imgs)).view([-1, 1])
        # return imgs*torch.exp(1j*300*imgs)

    def forward(self, imgs):
        temp1 = self.yh(imgs)
        temp2 = self.kappa * torch.abs(imgs.repeat([1, self.point_num]) - self.help1)
        temp2 = torch.exp(1j * temp2)
        K = torch.cos(imgs * (self.help1 + 1))
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = torch.trapz(y=K * temp2 * cha, dx=2 / (self.point_num - 1))
        return torch.view_as_real(temp1.view([-1]) - temp2)

    def cal_error(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        temp1 = np.sum(target ** 2, 1)
        return np.sqrt((2 * np.sum(temp) - temp[0] - temp[-1]) / (2 * np.sum(temp1) - temp1[0] - temp1[-1]))

    def train_dnn(self, nEpochs, mydataloader, val_dataloader):
        start_time = time()
        self.cur_dnn = self.dnn.to(device)
        self.cur_opt = self.opt
        self.cur_lr = self.lr
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('---------------------------- learn dnn ------------------------------')
            print(self.cur_dnn)

            # 输出所有可训练参数
            print('可训练参数')
            for name, param in self.cur_dnn.named_parameters():
                if param.requires_grad:
                    print(name)
        iter = 0
        # 计算步长
        step = nEpochs // 10

        # 生成数组
        show_epoch = np.concatenate([np.array([0]), np.arange(step - 1, nEpochs + 1, step)])

        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                # output2 = self.os_int_forward(img)
                # print(torch.norm(output2-output)/torch.norm(output))
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
            if epoch in show_epoch:
                now_time = time()
                total_val_loss = 0
                with torch.no_grad():
                    for img, target in val_dataloader:
                        img = img.to(device)
                        target = target.to(device)
                        output = self.forward(img)
                        val_loss = self.loss_fn(target, output)
                        total_val_loss += val_loss.item()
                with open(self.file_name, 'a') as file:
                    if self.Write_into_file == True:
                        sys.stdout = file
                    print("The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" %
                          (epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                           self.cur_opt.param_groups[0]['lr']))
                start_time += time() - now_time
                self.time.append(time() - start_time)
                self.time_val_loss.append(total_val_loss / len(val_dataloader))
                self.time_train_loss.append(loss.item())
                self.train_list[0] = loss.item()
                self.val_list[0] = total_val_loss / len(val_dataloader)
                # self.Writer.add_scalar('dnn_train_loss', loss.item(), iter)
                # self.Writer.add_scalar('dnn_val_loss', total_val_loss / len(val_dataloader), iter)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn.pth' % self.case_ID)
        end_time = time()
        self.time_list[0] = end_time - start_time

    def eval_model(self, u, repeated):
        self.cur_dnn = self.dnn
        self.cur_dnn.eval()
        # f_dnn = self.forward(self.grid).detach().cpu().numpy()
        u_dnn = torch.view_as_real(
            self.yh(torch.linspace(-1, 1, self.num_test).to(device).view([-1, 1]).to(device)).view(
                [-1])).detach().cpu().numpy()
        self.u_loss[0] = self.cal_error(u, u_dnn)

        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file

            print("------------------------------------time-------------------------------------")
            print(self.time_list)
            print(np.sum(self.time_list[1:]))
            print("------------------------------------train_loss-------------------------------------")
            print(self.train_list)
            print("------------------------------------val_loss-------------------------------------")
            print(self.val_list)
            # print("------------------------------------test_loss-------------------------------------")
            # print(self.test_loss)
            print("------------------------------------u_loss-------------------------------------")
            print(self.u_loss)
        return self.val_list[0], self.u_loss[0], self.train_list[0]


if __name__ == '__main__':
    Case_ID = 177
    kappa_val = 500
    fin_batch_size = 0
    fin_lr = -1
    file_name = '../result/sgl/no_singular_inc.txt'
    write_into_file = True

    # train single grade
    num_epoch = 4000

    num_epoch1 = 1
    num_epoch2 = 1
    num_epoch3 = 1
    lr2 = 1

    batch_size_list = [128, 256, 512]
    lr1_list = [0]
    layer_list = [10,9,8,7,6,5,4,3,2]
    # lr1_list = [0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
    init_rate_list = [1e-2, 1e-1, 1e-3, 1e-4]

    # train multi grade
    # num_epoch = 1
    #
    # num_epoch1 = 500
    # num_epoch2 = 1000
    # num_epoch3 = 2000
    # lr1 = 0
    # lr2_list = [0, 1e-6]

    before_layers = [1, 300, 300, 400, 400, 500, 500, 600, 600, 700, 700, 800, 800, 2]
    noise_list = [0]  # all data is noise-free

    for idx in layer_list:
        layers = before_layers[0:idx] + [2]

        best_u_loss = 100
        best_val_loss = 100
        fin_init_rate = 0
        fin_batch_size = 0
        fin_lr = -1
        for batch_size in batch_size_list:
            for i in range(len(noise_list)):
                noise_level = noise_list[i]
                for zz in range(len(init_rate_list)):
                    init_rate = init_rate_list[zz]
                    for r in range(len(lr1_list)):
                        lr1 = lr1_list[r]
                        with open(file_name, 'a') as file:
                            if write_into_file == True:
                                sys.stdout = file
                            print('*****************************************************')
                            print('noise_level: %e, init_rate:%e, kappa:%d, batch_size:%d, lr1:%e' % (noise_level,
                                                                                                      init_rate,
                                                                                                      kappa_val,
                                                                                                      batch_size, lr1))

                        for num in range(2):
                            ## read and prepare data
                            case_basename = 'exp1.mat'
                            rev_filepath = os.path.join('../dataset', case_basename)

                            # copyfile(filepath, rev_filepath)
                            mat_file = io.loadmat(rev_filepath)

                            real_u = mat_file['real_y'].astype(np.float32)
                            real_f = mat_file['real_f'].astype(np.float32)
                            img_u = mat_file['img_y'].astype(np.float32)
                            img_f = mat_file['img_f'].astype(np.float32)
                            num_train = mat_file['num_train'][0, 0]
                            num_val = mat_file['num_val'][0, 0]
                            num_test = mat_file['num_test'][0, 0]
                            x_lim = mat_file['x_lim'].astype(np.float32)
                            norm_y = mat_file['norm_y']
                            kappa = mat_file['kappa']
                            with open(file_name, 'a') as file:
                                if write_into_file == True:
                                    sys.stdout = file

                            f = np.hstack([real_f, img_f])
                            u = np.hstack([real_u, img_u])

                            train_imgs = x_lim[0: num_train, :]
                            val_imgs = x_lim[num_train: num_train + num_val, :]
                            # test_imgs = x_lim[num_val + num_train:, :]
                            train_targets = f[0:num_train, :]
                            val_targets = f[num_train:num_val + num_train, :]
                            # test_targets = f[num_val + num_train:, :]

                            train_data = Mydataset(train_imgs, train_targets)
                            train_dataLoader = DataLoader(train_data, batch_size=batch_size, shuffle=True,
                                                          drop_last=True)
                            val_data = Mydataset(val_imgs, val_targets)
                            val_dataLoader = DataLoader(val_data, batch_size=batch_size, shuffle=True, drop_last=True)
                            # test_data = Mydataset(test_imgs, test_targets)
                            # test_dataLoader = DataLoader(test_data, batch_size=batch_size, shuffle=True, drop_last=True)

                            ## train model
                            # layers = [1, 256, 256, 2]
                            # layers = [1, 512, 512, 256, 256, 128, 128,  2]
                            # layers = [1, 256, 256,  128, 128, 64, 64, 32, 32,  2]

                            PINNs = PhysicsInformedNN(
                                train_imgs, layers, Case_ID, norm_y, kappa, batch_size, num_epoch, num_epoch1,
                                num_epoch2, num_epoch3, lr1, lr2, file_name, write_into_file, init_rate, 20001)
                            beg_time = time()
                            PINNs.train_dnn(num_epoch, train_dataLoader, val_dataLoader)
                            end_time = time()
                            with open(file_name, 'a') as file:
                                if write_into_file == True:
                                    sys.stdout = file
                                print('+++++++++++++++++++++++++++++++++++++++++++')
                                print(f"执行耗时: {end_time - beg_time:.4f} 秒")
                                print('+++++++++++++++++++++++++++++++++++++++++++')

                            val_loss, u_loss, train_loss = PINNs.eval_model(u, num)
                            if val_loss < best_val_loss:
                                fin_init_rate = init_rate
                                best_val_loss = val_loss
                                best_u_loss = u_loss
                                fin_lr = lr1
                                fin_batch_size = batch_size
                            if val_loss > best_val_loss * 100 or val_loss > 100 * train_loss:
                                break


                            scipy.io.savemat('../tinumme.mat', {'time': PINNs.time, 'val': PINNs.time_val_loss,
                                                                'test': PINNs.time_train_loss})

                            x = np.array(PINNs.time)

                            PINNs.Writer.close()
        end_time = time()
        with open(file_name, 'a') as file:
            if write_into_file == True:
                sys.stdout = file
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('Fin_loss for noise level:%e, init_rate:%e,  batch_size: %d, lr: %e, val_error: %e, u_error: %e' % (
                noise_level, fin_init_rate, fin_batch_size, fin_lr, best_val_loss, best_u_loss))
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('+++++++++++++++++++++++++++++++++++++++++++')
            print('/n')
            print('/n')





