# https://discuss.pytorch.org/t/pytorch-adam-vs-tensorflow-adam/74471/5 这个Issue可能有用
import os

import matplotlib.pyplot as plt
import numpy as np
from scipy import io
import torch
from torch import nn
from pyDOE import lhs

# 设置随机数种子
seed = 9527
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 域边界
low_bound = np.array([-5.0, 0.0])
up_bound = np.array([5.0, np.pi / 2])
low_bound_tensor = torch.as_tensor(torch.from_numpy(low_bound), dtype=torch.float32, device=torch.device(device))
up_bound_tensor = torch.as_tensor(torch.from_numpy(up_bound), dtype=torch.float32, device=torch.device(device))

# 采集点数目
N_init = 50
N_bound = 50
N_f = 20000

'''
处理得到所有点坐标及对应测量值
'''
# 读取数据集文件
data_origin = io.loadmat('./Data/NLS.mat')

# 读取原始数据
x = data_origin['x'].flatten()[:, None]  # (261, 1)
t = data_origin['tt'].flatten()[:, None]  # (201, 1)
h = data_origin['uu']  # (256, 201)

# 处理h，分为虚部u和实部v
u = np.real(h)
v = np.imag(h)
h_mod = np.sqrt(u ** 2 + v ** 2)

'''
真实点集
'''
# 对原始数据排列组合，得到完整各坐标点
coord_true = torch.tensor(np.array(np.meshgrid(x, t)).T.reshape((-1, 2), order='A'), dtype=torch.float32,
                          device=torch.device(device))
u_true = torch.tensor(u.T.flatten(), dtype=torch.float32, device=torch.device(device))
v_true = torch.tensor(v.T.flatten(), dtype=torch.float32, device=torch.device(device))
h_mod_true = torch.tensor(h_mod.T.flatten(), dtype=torch.float32, device=torch.device(device))

'''
初值点集
'''
# 随机抽取 N_init 个初值坐标点
idx_init = np.random.choice(x.shape[0], N_init, replace=False)
x_init = x[idx_init, 0:1]
u_init = torch.tensor(u[idx_init, 0:1], dtype=torch.float32, device=torch.device(device))
v_init = torch.tensor(v[idx_init, 0:1], dtype=torch.float32, device=torch.device(device))
coord_init = torch.tensor((np.concatenate((x_init, 0 * x_init), 1)), dtype=torch.float32, device=torch.device(device))

'''
边界点集
'''
# 随机抽取 N_bound 个边界坐标点
idx_bound = np.random.choice(t.shape[0], N_bound, replace=False)
t_bound = torch.tensor(t[idx_bound, 0:1], dtype=torch.float32, device=torch.device(device))
# 为了便于求偏导，把 x_bound 提出来
x_bound_low = (0 * t_bound + low_bound[0]).clone().detach().requires_grad_(True)
x_bound_up = (0 * t_bound + up_bound[0]).clone().detach().requires_grad_(True)
# 再组合回去
coord_bound_low = torch.cat((x_bound_low, t_bound), 1)
coord_bound_up = torch.cat((x_bound_up, t_bound), 1)

'''
域内点集
'''
# 随机抽取 N_f 个域内坐标点
coord_f = torch.tensor((low_bound + (up_bound - low_bound) * lhs(2, N_f)).astype(np.float32), dtype=torch.float32,
                       device=torch.device(device))
# 同理，把 x_f 和 t_f 提出来
x_f = coord_f[:, 0:1]
t_f = coord_f[:, 1:2]
x_f.requires_grad_()
t_f.requires_grad_()
# 再组合回去
coord_f = torch.cat((x_f, t_f), 1)

'''
均方误差函数
'''
mse_fn = torch.nn.MSELoss(reduction='mean')


class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        # Tanh 原文所用方法，大约 1w5 epoch 达到 1e-3，之后在 6~7w epoch 达到 1e-4
        self.linear_tanh_stack = nn.Sequential(
            nn.Linear(2, 64),
            nn.Tanh(),
            nn.Linear(64, 128),
            nn.Tanh(),
            nn.Linear(128, 128),
            nn.Tanh(),
            nn.Linear(128, 64),
            nn.Tanh(),
            nn.Linear(64, 2)
        )
        # TanhShrink 直到 e-3 表现都较为正常，但 1w5 epoch 以后 loss 突然回弹至 e0
        self.linear_tanh_shrink_stack = nn.Sequential(
            nn.Linear(2, 64),
            nn.Tanhshrink(),
            nn.Linear(64, 128),
            nn.Tanhshrink(),
            nn.Linear(128, 128),
            nn.Tanhshrink(),
            nn.Linear(128, 64),
            nn.Tanhshrink(),
            nn.Linear(64, 2)
        )
        # LeakyReLU 训练速度快，但效果拉跨，基本卡死在1e-1左右
        self.linear_leaky_relu_stack = nn.Sequential(
            nn.Linear(2, 64),
            nn.LeakyReLU(),
            nn.Linear(64, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 128),
            nn.LeakyReLU(),
            nn.Linear(128, 64),
            nn.LeakyReLU(),
            nn.Linear(64, 2)
        )
        # ELU，死在 e-2
        self.linear_elu_stack = nn.Sequential(
            nn.Linear(2, 64),
            nn.ELU(),
            nn.Linear(64, 128),
            nn.ELU(),
            nn.Linear(128, 128),
            nn.ELU(),
            nn.Linear(128, 64),
            nn.ELU(),
            nn.Linear(64, 2)
        )

    def forward(self, coord_input):
        coord_input = (2.0 * (coord_input - low_bound_tensor) / (
                up_bound_tensor - low_bound_tensor) - 1.0)  # 正则化
        uv_pred = self.linear_tanh_stack(coord_input)
        u_pred = uv_pred[:, 0:1]
        v_pred = uv_pred[:, 1:2]

        return u_pred, v_pred


def train(model, loss_fn, optimizer):
    """
    初始点集
    """
    u_init_pred, v_init_pred = model(coord_init)

    '''
    loss没用到init的偏导

    # 求u的偏导
    u_init_pred_xt = torch.autograd.grad(outputs=u_init_pred, inputs=coord_init,
                                         grad_outputs=torch.ones_like(u_init_pred), create_graph=True)[0]
    # 求v的偏导
    v_init_pred_xt = torch.autograd.grad(outputs=v_init_pred, inputs=coord_init,
                                         grad_outputs=torch.ones_like(v_init_pred), create_graph=True)[0]
    '''

    """
    边界点集
    """
    # low
    u_bound_low_pred, v_bound_low_pred = model(coord_bound_low)
    optimizer.zero_grad()
    u_bound_low_pred_x = torch.autograd.grad(outputs=u_bound_low_pred, inputs=x_bound_low,
                                             grad_outputs=torch.ones_like(u_bound_low_pred), create_graph=True)[0]
    optimizer.zero_grad()
    v_bound_low_pred_x = torch.autograd.grad(outputs=v_bound_low_pred, inputs=x_bound_low,
                                             grad_outputs=torch.ones_like(v_bound_low_pred), create_graph=True)[0]

    # up
    u_bound_up_pred, v_bound_up_pred = model(coord_bound_up)
    optimizer.zero_grad()
    u_bound_up_pred_x = torch.autograd.grad(outputs=u_bound_up_pred, inputs=x_bound_up,
                                            grad_outputs=torch.ones_like(u_bound_up_pred), create_graph=True)[0]
    optimizer.zero_grad()
    v_bound_up_pred_x = torch.autograd.grad(outputs=v_bound_up_pred, inputs=x_bound_up,
                                            grad_outputs=torch.ones_like(v_bound_up_pred), create_graph=True)[0]

    """
    域内点集
    """
    u_f_pred, v_f_pred = model(coord_f)

    # 一阶偏导
    optimizer.zero_grad()
    u_f_pred_x = \
        torch.autograd.grad(outputs=u_f_pred, inputs=x_f, grad_outputs=torch.ones_like(u_f_pred), create_graph=True)[0]
    optimizer.zero_grad()
    u_f_pred_t = \
        torch.autograd.grad(outputs=u_f_pred, inputs=t_f, grad_outputs=torch.ones_like(u_f_pred), create_graph=True)[0]

    optimizer.zero_grad()
    v_f_pred_x = \
        torch.autograd.grad(outputs=v_f_pred, inputs=x_f, grad_outputs=torch.ones_like(v_f_pred), create_graph=True)[0]
    optimizer.zero_grad()
    v_f_pred_t = \
        torch.autograd.grad(outputs=v_f_pred, inputs=t_f, grad_outputs=torch.ones_like(v_f_pred), create_graph=True)[0]

    # 二阶偏导，只需要 x 的
    optimizer.zero_grad()
    u_f_pred_xx = \
        torch.autograd.grad(outputs=u_f_pred_x, inputs=x_f, grad_outputs=torch.ones_like(u_f_pred_x),
                            create_graph=True)[0]

    optimizer.zero_grad()
    v_f_pred_xx = \
        torch.autograd.grad(outputs=v_f_pred_x, inputs=x_f, grad_outputs=torch.ones_like(v_f_pred_x),
                            create_graph=True)[0]

    # 计算得到u和v，原文章代码这么干，我不太确定是咋算的
    u_f_calc = u_f_pred_t + 0.5 * v_f_pred_xx + (u_f_pred ** 2 + v_f_pred ** 2) * v_f_pred
    v_f_calc = v_f_pred_t - 0.5 * u_f_pred_xx - (u_f_pred ** 2 + v_f_pred ** 2) * u_f_pred

    """
    反向传播
    """
    optimizer.zero_grad()
    loss = loss_fn(u_init, u_init_pred, v_init, v_init_pred, u_bound_low_pred, u_bound_up_pred, v_bound_low_pred,
                   v_bound_up_pred, u_bound_low_pred_x, u_bound_up_pred_x, v_bound_low_pred_x, v_bound_up_pred_x,
                   u_f_calc, v_f_calc)

    return loss


def loss_fn(u_init, u_init_pred, v_init, v_init_pred, u_bound_low_pred, u_bound_up_pred, v_bound_low_pred,
            v_bound_up_pred, u_bound_low_pred_x, u_bound_up_pred_x, v_bound_low_pred_x, v_bound_up_pred_x,
            u_f_calc, v_f_calc):
    return mse_fn(u_init, u_init_pred) + mse_fn(v_init, v_init_pred) + mse_fn(u_bound_low_pred, u_bound_up_pred) + \
           mse_fn(v_bound_low_pred, v_bound_up_pred) + mse_fn(u_bound_low_pred_x, u_bound_up_pred_x) + \
           mse_fn(v_bound_low_pred_x, v_bound_up_pred_x) + \
           mse_fn(u_f_calc, torch.zeros(u_f_calc.shape, device=torch.device(device))) + \
           mse_fn(v_f_calc, torch.zeros(v_f_calc.shape, device=torch.device(device)))


if __name__ == '__main__':

    model = LinearModel()
    # model = torch.load('./Model/Schrodinger_epoch.pt')
    model.to(device)

    '''
    优化器
    '''
    adamOptimizer = torch.optim.Adam(model.parameters())
    optimizer = adamOptimizer

    '''
    学习率衰减
    '''
    stepScheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10000, gamma=0.8, last_epoch=-1)
    plateauScheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.9, patience=80,
                                                                  cooldown=0, min_lr=1e-5, verbose=True)
    cosScheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=1000, eta_min=1e-5,
                                                              verbose=False)
    cosWarmScheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=2500, T_mult=1, eta_min=1e-6,
                                                                            verbose=True)
    scheduler = plateauScheduler

    '''
    初始参数
    '''
    loss_min = 100
    start_epoch = -1
    total_epoch = 50000
    loss_list = []
    save_checkpoint = False
    save_model = False
    reach_2 = False
    reach_3 = False
    reach_4 = False
    reach_5 = False
    milestones = []

    # '''
    # 恢复检查点
    # '''
    # path_checkpoint = "./CheckPoint/Schrodinger_95000.pth"  # 断点路径
    # checkpoint = torch.load(path_checkpoint)  # 加载断点
    # model.load_state_dict(checkpoint['net'])  # 加载模型可学习参数
    # optimizer.load_state_dict(checkpoint['optimizer'])  # 加载优化器参数
    # start_epoch = checkpoint['epoch']  # 设置开始的epoch

    '''
    模型训练
    '''
    for epoch in range(start_epoch + 1, total_epoch):
        loss = train(model, loss_fn, optimizer)
        loss_list.append(loss.item())
        if loss < loss_min:
            loss_min = loss
            # if epoch >= 30000:
            #     print('Update loss_min to %.3e.' % loss_min)
            if epoch >= total_epoch - 100:
                print('Finish! Loss_min: %.3e.' % loss_min)
                break

            '''
            记录里程碑
            '''
            if loss.item() <= 1e-1 and not reach_2:
                reach_2 = True
                print('Reach e-2 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-2 and not reach_3:
                reach_3 = True
                print('Reach e-3 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-3 and not reach_4:
                reach_4 = True
                print('Reach e-4 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-4 and not reach_5:
                reach_5 = True
                print('Reach e-5 at epoch %d.' % epoch)
                milestones.append(epoch)
        if epoch % 10 == 0:
            print('Epoch: %d, Loss: %.3e' % (epoch, loss))
        if epoch % 5000 == 0 and epoch > 0:
            if save_checkpoint:
                checkpoint = {
                    "net": model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    "epoch": epoch
                }
                if not os.path.isdir("./CheckPoint"):
                    os.mkdir("./CheckPoint")
                torch.save(checkpoint, './CheckPoint/Schrodinger_%d.pth' % epoch)

            '''
            训练过程可视化
            '''
            start = epoch - 5000
            end = epoch
            plt.figure(dpi=150)
            plt.title('Loss Function Curve(%d~%d epoch)' % (start, end))
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.plot(loss_list[start:end], label='$Loss$')
            plt.legend()
            if not os.path.isdir("./Pic"):
                os.mkdir("./Pic")
            plt.savefig('./Pic/Schrodinger_(%d-%d epoch).jpg' % (start, end))
            plt.show()

        optimizer.zero_grad()
        loss.backward()

        optimizer.step()

        # scheduler.step()
        scheduler.step(loss)

    '''
    模型预测能力测试
    '''
    u_pred, v_pred = model(coord_true)
    u_pred = u_pred.data.reshape(-1)
    v_pred = v_pred.data.reshape(-1)
    h_mod_pred = (u_pred ** 2 + v_pred ** 2).sqrt()

    mse_u = mse_fn(u_true, u_pred)
    mse_v = mse_fn(v_true, v_pred)
    mse_h_mod = mse_fn(h_mod_true, h_mod_pred)

    print('mse_u: %e' % mse_u)
    print('mse_v: %e' % mse_v)
    print('mse_h_mod: %e' % mse_h_mod)

    if save_model:
        torch.save(model, './Model/Schrodinger_%d.pt' % epoch)
        print('Saved model as ./Model/Schrodinger_%d.pt' % epoch)

    '''
    训练过程可视化
    '''
    plt.figure(dpi=150)
    plt.title('Loss Function Curve(0~%d epoch)' % len(loss_list))
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.plot(loss_list, label='$Loss$')
    plt.legend()
    plt.savefig('./pic/Schrodinger_(0~%d epoch)' % len(loss_list))
    plt.show()

    '''
    打印里程碑
    '''
    for index in range(len(milestones)):
        print('Reached e-%d at epoch %d.' % (index, milestones[index]))
