# -*- coding: utf-8 -*-
# Jie Jiang


import os
from time import time
from collections import OrderedDict
from shutil import copyfile
import sys
import numpy as np
import scipy
import torch
from matplotlib import pyplot as plt
from scipy import io
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter

device = 'cuda' if torch.cuda.is_available() else 'cpu'


# Enable GPU support if available and set the floating point precision for MG
# set_up_backend("torch", data_type="float32")

# define a self activation function
class Act_fun(nn.Module):
    def __init__(self):
        super(Act_fun, self).__init__()

    def forward(self, x):
        x = torch.sin(x)
        return x

    ### init


def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        nn.init.constant_(m.bias, 1)


# the deep neural network
class DNN(torch.nn.Module):
    def __init__(self, layers, init_rate, lr, num_epoch, l, length_mod):
        super(DNN, self).__init__()

        # parameters
        self.depth = len(layers) - 1

        # set up layer order dict
        self.activation1 = Act_fun
        self.activation2 = torch.nn.ReLU
        layer_list = list()
        for i in range(self.depth - 1):
            layer_list.append(
                ('layer_%d' % i, torch.nn.Linear(layers[i], layers[i + 1]))
            )
            if i < 1:
                layer_list.append(('activation_%d' % i, self.activation1()))
            else:
                layer_list.append(('activation_%d' % i, self.activation1()))

        layer_list.append(
            ('layer_%d_fin' % (self.depth - 1), torch.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)

        # deploy layers
        self.layers = torch.nn.Sequential(layerDict)

        i = -2 * length_mod * l
        for p in self.parameters():
            i += 1
            if i <= 0:
                p.requires_grad = False
        self.opt = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=init_rate,
                                    weight_decay=lr)
        self.lr = torch.optim.lr_scheduler.ExponentialLR(self.opt, gamma=(1e-7 / init_rate) ** (1 / num_epoch))

        self.apply(weight_init)

    def forward(self, x):
        out1 = self.layers(x)
        return out1


class NetworkSequence:
    def __init__(self, networks):
        self.networks = networks

    def __iter__(self):
        return iter(self.networks)

    def __getitem__(self, index):
        return self.networks[index]

    def add_network(self, network):
        self.networks.append(network)


class Mydataset(Dataset):
    def __init__(self, T, V):
        super(Mydataset, self).__init__()
        self.imgs = T
        self.targets = V

    def __getitem__(self, index):
        return self.imgs[index], self.targets[index]

    def __len__(self):
        return len(self.imgs)


class PhysicsInformedNN:
    def __init__(self, layers, case_ID: int, norm_sol, batch_size, init_num_epoch, lr2, File_name, Write_into_file,
                 init_rate, max_L, true_kappa, length_mod, num_test):
        # data
        self.kappa = torch.Tensor(true_kappa / 1.0).to(device)
        self.norm_sol = norm_sol
        self.case_ID = case_ID
        self.file_name = File_name
        self.Write_into_file = Write_into_file
        self.ex = 1
        self.num_test = num_test
        self.max_L = max_L
        self.cur_L = 0

        self.time_list = np.ones((max_L))
        self.train_list = np.ones((max_L))
        self.val_list = np.ones((max_L))
        # self.test_loss = np.ones(1, max_L)
        self.u_loss = np.ones((max_L))
        self.point_num = 8 * int(true_kappa) + 1

        ## using in the forward
        self.help1 = torch.linspace(-1, 1, self.point_num).repeat([batch_size, 1]).to(device)
        self.help2 = torch.linspace(-1, 1, self.point_num).view([-1, 1]).to(device)

        self.Writer = SummaryWriter('../logs')

        self.dnn_sequence = NetworkSequence([])
        for l in range(max_L):
            layer = np.hstack((layers[0:1 + length_mod * (l + 1)], layers[-1]))
            self.dnn_sequence.add_network(DNN(layer, init_rate, lr2, init_num_epoch, l, length_mod))

        self.cur_dnn = self.dnn_sequence[0]
        self.cur_opt = self.cur_dnn.opt
        self.cur_lr = self.cur_dnn.lr

        self.loss_fn = torch.nn.MSELoss()

        self.time = []
        self.time_val_loss = []
        self.time_train_loss = []
        self.index = 0

    def yh(self, imgs):
        return torch.view_as_complex(self.cur_dnn(imgs)).view([-1, 1])

    def forward(self, imgs):
        temp1 = self.yh(imgs)
        temp2 = self.kappa * torch.abs(imgs.repeat([1, self.point_num]) - self.help1)
        temp2 = torch.exp(1j * temp2)
        K = torch.cos(imgs * (self.help1 + 1))
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = torch.trapz(y=K * temp2 * cha, dx=2 / (self.point_num - 1))
        return torch.view_as_real(temp1.view([-1]) - temp2)

    def left_forward(self, imgs):
        temp1 = self.yh(imgs)
        row = temp1.shape[0]
        temp2 = self.kappa * torch.abs(
            imgs.repeat([1, self.ex * self.point_num]) - torch.linspace(-1, 1, self.ex * self.point_num).repeat(
                [row, 1]).to(device))
        temp2 = torch.exp(1j * temp2)
        K = torch.cos(imgs * (torch.linspace(-1, 1, self.point_num).repeat([row, 1]).to(device) + 1))
        cha = self.yh(self.help2).view([-1]).repeat([len(imgs), 1])
        temp2 = torch.trapz(y=K*temp2 * cha, dx=2 / (self.ex * self.point_num - 1))
        return torch.view_as_real(temp1.view([-1]) - temp2)


    def cal_error(self, target, pred):
        diff = target - pred
        temp = np.sum(diff ** 2, 1)
        temp1 = np.sum(target ** 2, 1)
        return np.sqrt((2 * np.sum(temp) - temp[0] - temp[-1]) / (2 * np.sum(temp1) - temp1[0] - temp1[-1]))

    def train_dnn(self, nEpochs, mydataloader, val_dataloader, cur_L):
        self.cur_L = cur_L
        start_time = time()
        self.index = 0
        self.cur_dnn = self.dnn_sequence[cur_L]
        if cur_L > 0:
            self.cur_dnn.load_state_dict(torch.load('../models/case%d_dnn%d.pth' % (self.case_ID, cur_L - 1)),
                                         strict=False)
        self.cur_dnn = self.cur_dnn.to(device)
        self.cur_opt = self.cur_dnn.opt
        self.cur_lr = self.cur_dnn.lr
        self.cur_dnn.train()
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print('--------------------------- learn dnn%d ----------------------------' % (cur_L + 1))
            print(self.cur_dnn)

            # 输出所有可训练参数
            print('可训练参数')
            for name, param in self.cur_dnn.named_parameters():
                if param.requires_grad:
                    print(name)
        iter = 0
        # 计算步长
        step = nEpochs // 10

        # 生成数组
        show_epoch = np.concatenate([np.array([0]), np.arange(step - 1, nEpochs + 1, step)])
        for epoch in range(nEpochs):
            for img, target in mydataloader:
                iter += 1
                img = img.to(device)
                target = target.to(device)
                output = self.forward(img)
                loss = self.loss_fn(target, output)

                self.cur_opt.zero_grad()
                loss.backward()
                self.cur_opt.step()
            if epoch in show_epoch:
                now_time = time()
                total_val_loss = 0
                with torch.no_grad():
                    for img, target in val_dataloader:
                        img = img.to(device)
                        target = target.to(device)
                        output = self.forward(img)
                        val_loss = self.loss_fn(target, output)
                        total_val_loss += val_loss.item()
                with open(self.file_name, 'a') as file:
                    if self.Write_into_file == True:
                        sys.stdout = file
                    print(
                        "The epoch: %d, the iter: %d, the training loss: %e, the validation loss: %e, the lr: %e:" % (
                            epoch, iter, loss.item(), total_val_loss / len(val_dataloader),
                            self.cur_opt.param_groups[0]['lr']))
                start_time += time() - now_time
                self.time.append(time() - start_time)
                self.time_val_loss.append(total_val_loss / len(val_dataloader))
                self.time_train_loss.append(loss.item())
                self.train_list[cur_L] = loss.item()
                self.val_list[cur_L] = total_val_loss / len(val_dataloader)
                # self.Writer.add_scalar('dnn_train_loss', loss.item(), iter)
                # self.Writer.add_scalar('dnn_val_loss', total_val_loss / len(val_dataloader), iter)
            self.cur_lr.step()

        torch.save(self.cur_dnn.state_dict(), '../models/case%d_dnn%d.pth' % (self.case_ID, cur_L))
        end_time = time()
        self.time_list[cur_L] = end_time - start_time
        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file
            print(
                'Grade: %d, train_error: %e, val_error: %e' % (cur_L + 1, self.train_list[cur_L], self.val_list[cur_L]))
        return self.val_list[cur_L]

    def eval_model(self, u, repeated):
        self.cur_L -= 1
        multi_iter_freq = np.zeros((self.max_L, self.num_test), dtype=complex)
        for l in range(self.cur_L + 1):
            self.cur_dnn = self.dnn_sequence[l]
            self.cur_dnn.eval()
            if l == 0:
                u_dnn = torch.view_as_real(
                    self.yh(torch.linspace(-1, 1, self.num_test).to(device).view([-1, 1]).to(device)).view(
                        [-1])).detach().cpu().numpy()
                self.u_loss[l] = self.cal_error(u, u_dnn)
                his = u_dnn
            else:
                u_dnn = torch.view_as_real(
                    self.yh(torch.linspace(-1, 1, self.num_test).to(device).view([-1, 1]).to(device)).view(
                        [-1])).detach().cpu().numpy() + his
                self.u_loss[l] = self.cal_error(u, u_dnn)
                his = u_dnn
            complex_vector = u_dnn[:, 0] + 1j * u_dnn[:, 1]
            multi_iter_freq[l, :] = complex_vector.reshape(1, -1)
        np.save('../mat_file/try_fin_%d.npy' % repeated, multi_iter_freq)

        with open(self.file_name, 'a') as file:
            if self.Write_into_file == True:
                sys.stdout = file

            print("------------------------------------time-------------------------------------")
            print(self.time_list)
            print(np.sum(self.time_list[1:]))
            print("------------------------------------train_loss-------------------------------------")
            print(self.train_list)
            print("------------------------------------val_loss-------------------------------------")
            print(self.val_list)
            print("------------------------------------u_loss-------------------------------------")
            print(self.u_loss)
        return self.val_list[self.cur_L], self.u_loss[self.cur_L], self.train_list[self.cur_L]


if __name__ == '__main__':
    fin_init_rate = 0
    best_u_loss = 100
    best_val_loss = 100
    Case_ID = 899013
    fin_batch_size = 0
    fin_lr = -1
    file_name = '../result/mgl/multi_exp1_inc.txt'
    write_into_file = True

    max_L = 12
    min_L = 3
    tolerance = 1e-11

    # train multi grade

    init_num_epoch = 4000

    noise_list = [0]
    batch_size_list = [256, 128, 512]
    lr2_list = [0]
    # lr2_list = [0, 1e-6, 1e-5, 1e-4]
    init_rate_list = [1e-2, 1e-1, 1e-3, 1e-4]

    for l in range(len(batch_size_list)):
        batch_size = batch_size_list[l]
        for i in range(len(noise_list)):
            noise_level = noise_list[i]
            for zz in range(len(init_rate_list)):
                init_rate = init_rate_list[zz]
                for r in range(len(lr2_list)):
                    lr2 = lr2_list[r]
                    with open(file_name, 'a') as file:
                        if write_into_file == True:
                            sys.stdout = file
                        print('*****************************************************')
                        print('noise_level: %e, init_rate:%e,  batch_size:%d, lr2:%e' % (
                            noise_level, init_rate, batch_size, lr2))

                    for num in range(3):
                        ## read and prepare data
                        cur_L = 0
                        case_basename = 'exp1.mat'
                        rev_filepath = os.path.join('../dataset', case_basename)

                        # copyfile(filepath, rev_filepath)
                        mat_file = io.loadmat(rev_filepath)

                        real_u = mat_file['real_y'].astype(np.float32)
                        real_f = mat_file['real_f'].astype(np.float32)
                        img_u = mat_file['img_y'].astype(np.float32)
                        img_f = mat_file['img_f'].astype(np.float32)
                        num_train = mat_file['num_train'][0, 0]
                        num_val = mat_file['num_val'][0, 0]
                        num_test = mat_file['num_test'][0, 0]
                        x_lim = mat_file['x_lim'].astype(np.float32)
                        norm_y = mat_file['norm_y']
                        kappa = mat_file['kappa']

                        f = np.hstack([real_f, img_f])
                        u = np.hstack([real_u, img_u])

                        train_imgs = x_lim[0: num_train, :]
                        val_imgs = x_lim[num_train: num_train + num_val, :]
                        test_imgs = x_lim[num_val + num_train:, :]
                        train_targets = f[0:num_train, :]
                        val_targets = f[num_train:num_val + num_train, :]
                        test_targets = f[num_val + num_train:, :]

                        train_data = Mydataset(train_imgs, train_targets)
                        train_dataLoader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
                        val_data = Mydataset(val_imgs, val_targets)
                        val_dataLoader = DataLoader(val_data, batch_size=batch_size, shuffle=True, drop_last=True)

                        ## train model
                        mod = [256]
                        # layers = [1, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 2]
                        layers = [1, 300, 300, 400, 400, 500, 500, 600, 600, 700, 700, 800, 800,  2]

                        PINNs = PhysicsInformedNN(
                            layers, Case_ID, norm_y, batch_size, init_num_epoch, lr2, file_name, write_into_file,
                            init_rate, max_L, kappa, len(mod), 20001)

                        # train dnn1
                        before_error = 100
                        beg_time = time()
                        now_val_error = PINNs.train_dnn(init_num_epoch, train_dataLoader, val_dataLoader, cur_L)
                        end_time = time()
                        with open(file_name, 'a') as file:
                            if write_into_file == True:
                                sys.stdout = file
                            print('+++++++++++++++++++++++++++++++++++++++++++')
                            print(f"Grade: {cur_L:d}, 执行耗时: {end_time - beg_time:.4f} 秒")
                            print('+++++++++++++++++++++++++++++++++++++++++++')

                        temp_train = np.zeros_like(train_targets)
                        temp_val = np.zeros_like(val_targets)

                        renew_train_targets = train_targets
                        renew_val_targets = val_targets
                        while cur_L < min_L or (now_val_error < before_error and cur_L < max_L - 1):
                            cur_L += 1
                            # create the dataset for dnn
                            with torch.no_grad():
                                for i in range(int(num_train / batch_size)):
                                    temp_train[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                        torch.Tensor(train_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                            device)).detach().cpu().numpy()

                                if int(num_train / batch_size) != num_train / batch_size:
                                    temp_train[int(num_train / batch_size) * batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(train_imgs[int(num_train / batch_size) * batch_size:, :]).to(
                                            device)).detach().cpu().numpy()

                                for i in range(int(num_val / batch_size)):
                                    temp_val[i * batch_size:(i + 1) * batch_size, :] = PINNs.forward(
                                        torch.Tensor(val_imgs[i * batch_size:(i + 1) * batch_size, :]).to(
                                            device)).detach().cpu().numpy()

                                if int(num_val / batch_size) != num_val / batch_size:
                                    temp_val[int(num_val / batch_size) * batch_size:, :] = PINNs.left_forward(
                                        torch.Tensor(val_imgs[int(num_val / batch_size) * batch_size:,
                                                     :]).to(device)).detach().cpu().numpy()

                                renew_train_targets = renew_train_targets - temp_train
                                renew_val_targets = renew_val_targets - temp_val
                                new_train_data = Mydataset(train_imgs, renew_train_targets)
                                new_train_dataLoader = DataLoader(new_train_data, batch_size=batch_size, shuffle=True,
                                                                  drop_last=True)
                                new_val_data = Mydataset(val_imgs, renew_val_targets)
                                new_val_dataLoader = DataLoader(new_val_data, batch_size=batch_size, shuffle=True,
                                                                drop_last=True)

                            # train dnn
                            before_error = now_val_error
                            beg_time = time()
                            now_val_error = PINNs.train_dnn(init_num_epoch, new_train_dataLoader, new_val_dataLoader,
                                                            cur_L)
                            end_time = time()
                            with open(file_name, 'a') as file:
                                if write_into_file == True:
                                    sys.stdout = file
                                print('+++++++++++++++++++++++++++++++++++++++++++')
                                print(f"Grade: {cur_L:d}, 执行耗时: {end_time - beg_time:.4f} 秒")
                                print('+++++++++++++++++++++++++++++++++++++++++++')

                        val_loss, u_loss, train_loss = PINNs.eval_model(u, num)
                        if val_loss < best_val_loss:
                            fin_init_rate = init_rate
                            best_val_loss = val_loss
                            best_u_loss = u_loss
                            fin_lr = lr2
                            fin_batch_size = batch_size
                        if val_loss > best_val_loss * 100 or val_loss > 100 * train_loss:
                            break
                        PINNs.Writer.close()
    with open(file_name, 'a') as file:
        if write_into_file == True:
            sys.stdout = file
        print('+++++++++++++++++++++++++++++++++++++++++++')
        print('+++++++++++++++++++++++++++++++++++++++++++')
        print(layers)
        print('Fin_loss for noise_level:%e, init_rate:%e,  batch_size: %d, lr: %e, val_error: %e, u_error: %e' % (
            noise_level, fin_init_rate, fin_batch_size, fin_lr, best_val_loss, best_u_loss))
        print('+++++++++++++++++++++++++++++++++++++++++++')
        print('+++++++++++++++++++++++++++++++++++++++++++')
        print('/n')
        print('/n')





