# https://discuss.pytorch.org/t/pytorch-adam-vs-tensorflow-adam/74471/5 这个Issue可能有用
import os

import matplotlib.pyplot as plt
import numpy as np
from scipy import io
import torch
from torch import nn

# 设置随机数种子
seed = 9527
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 训练点数目
N_train = 5000

'''
处理得到所有点坐标及对应测量值
'''
# 读取数据集文件
data_origin = io.loadmat('./Data/cylinder_nektar_wake.mat')

# 读取原始数据
Psi_star = data_origin['U_star']  # N x 2 x T, (x, y, t) 对应的流速场分量 (u, v)
P_star = data_origin['p_star']  # N x T, (x, y, t) 对应的压力 p
t_star = data_origin['t']  # T x 1, 时间 t
coord_star = data_origin['X_star']  # N x 2, 空间坐标 (x, y)

# 数据处理
N = coord_star.shape[0]  # 5000
T = t_star.shape[0]  # 200
# origin -> N x T -> NT x 1
x = np.tile(coord_star[:, 0:1], (1, T)).flatten()[:, None]
y = np.tile(coord_star[:, 1:2], (1, T)).flatten()[:, None]
t = np.tile(t_star, (1, N)).T.flatten()[:, None]
u = Psi_star[:, 0, :].flatten()[:, None]
v = Psi_star[:, 1, :].flatten()[:, None]
p = P_star.flatten()[:, None]

# 无噪声数据
idx = np.random.choice(N * T, N_train, replace=False)
x_train = torch.tensor(x[idx, :], dtype=torch.float32, device=torch.device(device)).requires_grad_(True)
y_train = torch.tensor(y[idx, :], dtype=torch.float32, device=torch.device(device)).requires_grad_(True)
t_train = torch.tensor(t[idx, :], dtype=torch.float32, device=torch.device(device)).requires_grad_(True)
u_train = torch.tensor(u[idx, :], dtype=torch.float32, device=torch.device(device))
v_train = torch.tensor(v[idx, :], dtype=torch.float32, device=torch.device(device))

coord_train = torch.cat([x_train, y_train, t_train], 1)  # 拼接成完整坐标
low_bound_tensor = coord_train.min(0)[0]
up_bound_tensor = coord_train.max(0)[0]

'''
均方误差函数
'''
mse_fn = torch.nn.MSELoss(reduction='mean')


class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()

        # 待求参数
        self.lambda_1_pred = nn.Parameter(torch.tensor(0.0, dtype=torch.float32, device=torch.device(device)))
        self.lambda_2_pred = nn.Parameter(torch.tensor(0.0, dtype=torch.float32, device=torch.device(device)))

        self.linear_tanh_stack = nn.Sequential(
            nn.Linear(3, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 2)
        )

    def forward(self, coord_input):
        coord_input = (2.0 * (coord_input - low_bound_tensor) / (
                up_bound_tensor - low_bound_tensor) - 1.0)  # 正则化
        psi_p_pred = self.linear_tanh_stack(coord_input)
        psi_pred = psi_p_pred[:, 0:1]
        p_pred = psi_p_pred[:, 1:2]

        u_pred = torch.autograd.grad(outputs=psi_pred, inputs=y_train,
                                     grad_outputs=torch.ones_like(psi_pred), create_graph=True)[0]
        v_pred = -torch.autograd.grad(outputs=psi_pred, inputs=x_train,
                                      grad_outputs=torch.ones_like(psi_pred), create_graph=True)[0]

        u_pred_t = torch.autograd.grad(outputs=u_pred, inputs=t_train,
                                       grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
        u_pred_x = torch.autograd.grad(outputs=u_pred, inputs=x_train,
                                       grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
        u_pred_y = torch.autograd.grad(outputs=u_pred, inputs=y_train,
                                       grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
        u_pred_xx = torch.autograd.grad(outputs=u_pred_x, inputs=x_train,
                                        grad_outputs=torch.ones_like(u_pred_x), create_graph=True)[0]
        u_pred_yy = torch.autograd.grad(outputs=u_pred_y, inputs=y_train,
                                        grad_outputs=torch.ones_like(u_pred_y), create_graph=True)[0]

        v_pred_t = torch.autograd.grad(outputs=v_pred, inputs=t_train,
                                       grad_outputs=torch.ones_like(v_pred), create_graph=True)[0]
        v_pred_x = torch.autograd.grad(outputs=v_pred, inputs=x_train,
                                       grad_outputs=torch.ones_like(v_pred), create_graph=True)[0]
        v_pred_y = torch.autograd.grad(outputs=v_pred, inputs=y_train,
                                       grad_outputs=torch.ones_like(v_pred), create_graph=True)[0]
        v_pred_xx = torch.autograd.grad(outputs=v_pred_x, inputs=x_train,
                                        grad_outputs=torch.ones_like(v_pred_x), create_graph=True)[0]
        v_pred_yy = torch.autograd.grad(outputs=v_pred_y, inputs=y_train,
                                        grad_outputs=torch.ones_like(v_pred_y), create_graph=True)[0]

        p_pred_x = torch.autograd.grad(outputs=p_pred, inputs=x_train,
                                       grad_outputs=torch.ones_like(p_pred), create_graph=True)[0]
        p_pred_y = torch.autograd.grad(outputs=p_pred, inputs=y_train,
                                       grad_outputs=torch.ones_like(p_pred), create_graph=True)[0]

        f_u_pred = u_pred_t + self.lambda_1_pred * (u_pred * u_pred_x + v_pred * u_pred_y) + \
                   p_pred_x - self.lambda_2_pred * (u_pred_xx + u_pred_yy)
        f_v_pred = v_pred_t + self.lambda_1_pred * (u_pred * v_pred_x + v_pred * v_pred_y) + \
                   p_pred_y - self.lambda_2_pred * (v_pred_xx + v_pred_yy)

        return u_pred, v_pred, p_pred, f_u_pred, f_v_pred


def train(model):
    u_pred, v_pred, p_pred, f_u_pred, f_v_pred = model(coord_train)

    loss = mse_fn(u_train, u_pred) + mse_fn(v_train, v_pred) + \
           mse_fn(f_u_pred, torch.zeros(f_u_pred.shape, device=torch.device(device))) + \
           mse_fn(f_v_pred, torch.zeros(f_v_pred.shape, device=torch.device(device)))

    return loss


if __name__ == '__main__':
    lambda_1_true = 1.0
    lambda_2_true = 0.01

    model = LinearModel()
    # model = torch.load('./Model/NavierStokes_epoch.pt')
    model.to(device)

    '''
    优化器
    '''
    adamOptimizer = torch.optim.Adam(model.parameters())
    optimizer = adamOptimizer

    '''
    学习率衰减
    '''
    stepScheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10000, gamma=0.8, last_epoch=-1)
    plateauScheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, factor=0.9, patience=80,
                                                                  cooldown=0, min_lr=1e-5, verbose=True)
    cosScheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=1000, eta_min=1e-5,
                                                              verbose=False)
    cosWarmScheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=2500, T_mult=1, eta_min=1e-6,
                                                                            verbose=True)
    scheduler = plateauScheduler

    '''
    初始参数
    '''
    loss_min = 100
    start_epoch = -1
    total_epoch = 50000
    loss_list = []
    save_checkpoint = False
    save_model = False
    reach_2 = False
    reach_3 = False
    reach_4 = False
    reach_5 = False
    milestones = []

    # '''
    # 恢复检查点
    # '''
    # path_checkpoint = "./CheckPoint/NavierStokes_95000.pth"  # 断点路径
    # checkpoint = torch.load(path_checkpoint)  # 加载断点
    # model.load_state_dict(checkpoint['net'])  # 加载模型可学习参数
    # optimizer.load_state_dict(checkpoint['optimizer'])  # 加载优化器参数
    # start_epoch = checkpoint['epoch']  # 设置开始的epoch

    '''
    模型训练
    '''
    for epoch in range(start_epoch + 1, total_epoch):
        loss = train(model)
        loss_list.append(loss.item())
        if loss < loss_min:
            loss_min = loss
            # if epoch >= 30000:
            #     print('Update loss_min to %.3e.' % loss_min)
            if epoch >= total_epoch - 100:
                print('Finish! Loss_min: %.3e.' % loss_min)
                break

            '''
            记录里程碑
            '''
            if loss.item() <= 1e-1 and not reach_2:
                reach_2 = True
                print('Reach e-2 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-2 and not reach_3:
                reach_3 = True
                print('Reach e-3 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-3 and not reach_4:
                reach_4 = True
                print('Reach e-4 at epoch %d.' % epoch)
                milestones.append(epoch)
            elif loss.item() <= 1e-4 and not reach_5:
                reach_5 = True
                print('Reach e-5 at epoch %d.' % epoch)
                milestones.append(epoch)
        if epoch % 10 == 0:
            print('Epoch: %d, Loss: %.3e' % (epoch, loss))
        if epoch % 5000 == 0 and epoch > 0:
            if save_checkpoint:
                checkpoint = {
                    "net": model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    "epoch": epoch
                }
                if not os.path.isdir("./CheckPoint"):
                    os.mkdir("./CheckPoint")
                torch.save(checkpoint, './CheckPoint/NavierStokes_%d.pth' % epoch)

            '''
            训练过程可视化
            '''
            start = epoch - 5000
            end = epoch
            plt.figure(dpi=150)
            plt.title('Loss Function Curve(%d~%d epoch)' % (start, end))
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.plot(loss_list[start:end], label='$Loss$')
            plt.legend()
            if not os.path.isdir("./Pic"):
                os.mkdir("./Pic")
            plt.savefig('./Pic/NavierStokes_(%d-%d epoch).jpg' % (start, end))
            plt.show()

        optimizer.zero_grad()
        # loss.backward(retain_graph=True)
        loss.backward()

        optimizer.step()

        # scheduler.step()
        scheduler.step(loss)

    '''
    模型预测能力测试
    '''

    if save_model:
        torch.save(model, './Model/NavierStokes_%d.pt' % epoch)
        print('Saved model as ./Model/NavierStokes_%d.pt' % epoch)

    '''
    训练过程可视化
    '''
    plt.figure(dpi=150)
    plt.title('Loss Function Curve(0~%d epoch)' % len(loss_list))
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.plot(loss_list, label='$Loss$')
    plt.legend()
    plt.savefig('./pic/NavierStokes_(0~%d epoch)' % len(loss_list))
    plt.show()

    '''
    打印里程碑
    '''
    for index in range(len(milestones)):
        print('Reached e-%d at epoch %d.' % (index, milestones[index]))
