import numpy as np
from scipy import io
import torch
from torch import nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from pyDOE import lhs

# 域边界
low_bound = np.array([-5.0, 0.0])
up_bound = np.array([5.0, np.pi / 2])

# 采集点数目
N_init = 50
N_bound = 50
N_f = 20000

'''
处理得到所有点坐标及对应测量值
'''
# 读取数据集文件
data_origin = io.loadmat('./Data/NLS.mat')

# 读取原始数据
x = data_origin['x'].flatten()[:, None]  # (261, 1)
t = data_origin['tt'].flatten()[:, None]  # (201, 1)
h = data_origin['uu']  # (256, 201)

# 处理h，分为虚部u和实部v
u = np.real(h)
v = np.imag(h)
h_mod = np.sqrt(u ** 2 + v ** 2)


class TrueDataset(Dataset):  # 真实点集
    def __init__(self):
        # 对原始数据排列组合，得到完整各坐标点
        self.coord = np.array(np.meshgrid(x, t)).T.reshape((-1, 2), order='A').astype(np.float32)
        self.u = u.T.flatten().astype(np.float32)
        self.v = v.T.flatten().astype(np.float32)
        self.h_mod = h_mod.T.flatten().astype(np.float32)

        self.len = self.coord.shape[0]

    def __getitem__(self, index):
        return self.coord[index], self.u[index], self.v[index], self.h_mod[index]

    def __len__(self):
        return self.len


class InitDataset(Dataset):  # 初值点集
    def __init__(self):
        # 随机抽取 N_init 个初值坐标点
        idx = np.random.choice(x.shape[0], N_init, replace=False)
        x_init = x[idx, 0:1]
        self.u_init = u[idx, 0:1].astype(np.float32)
        self.v_init = v[idx, 0:1].astype(np.float32)
        self.coord_init = (np.concatenate((x_init, 0 * x_init), 1).astype(np.float32))

        self.len = N_init

    def __getitem__(self, index):
        return self.coord_init[index], self.u_init[index], self.v_init[index]

    def __len__(self):
        return self.len


class BoundDataset(Dataset):  # 边界点集
    def __init__(self):
        # 随机抽取 N_bound 个边界坐标点
        idx = np.random.choice(t.shape[0], N_bound, replace=False)
        t_bound = t[idx, 0:1]
        self.coord_bound_low = np.concatenate((0 * t_bound + low_bound[0], t_bound), 1).astype(np.float32)
        self.coord_bound_up = np.concatenate((0 * t_bound + up_bound[0], t_bound), 1).astype(np.float32)

        self.len = N_bound

    def __getitem__(self, index):
        return self.coord_bound_low[index], self.coord_bound_up[index]

    def __len__(self):
        return self.len


class FDataset(Dataset):  # 边界点集
    def __init__(self):
        # 随机抽取 N_f 个域内坐标点
        self.coord_f = (low_bound + (up_bound - low_bound) * lhs(2, N_f)).astype(np.float32)

        self.len = N_f

    def __getitem__(self, index):
        return self.coord_f[index]

    def __len__(self):
        return self.len


class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        self.linear_tanh_stack = nn.Sequential(
            nn.Linear(2, 100),
            nn.Tanh(),
            nn.Linear(100, 100),
            nn.Tanh(),
            nn.Linear(100, 100),
            nn.Tanh(),
            nn.Linear(100, 100),
            nn.Tanh(),
            nn.Linear(100, 2)
        )

    def forward(self, coord_input):
        coord_input.requires_grad_()
        uv_pred = self.linear_tanh_stack(coord_input)
        u_pred = uv_pred[:, 0:1]
        v_pred = uv_pred[:, 1:2]

        return u_pred, v_pred


def train(dataloader_init, dataloader_bound, dataloader_f, model, loss_fn, optimizer, epoch):
    for batch, data in enumerate(dataloader_init, 0):
        coord_init, u_init, v_init = data
        u_init_pred, v_init_pred = model(coord_init)

        '''
        loss没用到init的偏导
        
        # 求u的偏导
        u_init_pred_xt = torch.autograd.grad(outputs=u_init_pred, inputs=coord_init,
                                             grad_outputs=torch.ones_like(u_init_pred), retain_graph=True)[0]
        # 求v的偏导
        v_init_pred_xt = torch.autograd.grad(outputs=v_init_pred, inputs=coord_init,
                                             grad_outputs=torch.ones_like(v_init_pred), retain_graph=True)[0]
        '''
    for batch, data in enumerate(dataloader_bound, 0):
        coord_bound_low, coord_bound_up = data

        # low
        u_bound_low_pred, v_bound_low_pred = model(coord_bound_low)
        u_bound_low_pred_x = torch.autograd.grad(outputs=u_bound_low_pred, inputs=coord_bound_low,
                                                 grad_outputs=torch.ones_like(u_bound_low_pred), retain_graph=True)[0][
                             :, 0:1]
        v_bound_low_pred_x = torch.autograd.grad(outputs=v_bound_low_pred, inputs=coord_bound_low,
                                                 grad_outputs=torch.ones_like(v_bound_low_pred), retain_graph=True)[0][
                             :, 0:1]

        # up
        u_bound_up_pred, v_bound_up_pred = model(coord_bound_up)
        u_bound_up_pred_x = torch.autograd.grad(outputs=u_bound_up_pred, inputs=coord_bound_up,
                                                grad_outputs=torch.ones_like(u_bound_up_pred), retain_graph=True)[0][:,
                            0:1]
        v_bound_up_pred_x = torch.autograd.grad(outputs=v_bound_up_pred, inputs=coord_bound_up,
                                                grad_outputs=torch.ones_like(v_bound_up_pred), retain_graph=True)[0][:,
                            0:1]
    for batch, data in enumerate(dataloader_f, 0):
        coord_f = data
        u_f_pred, v_f_pred = model(coord_f)

        # 一阶偏导
        u_f_pred_xt = torch.autograd.grad(outputs=u_f_pred, inputs=coord_f,
                                          grad_outputs=torch.ones_like(u_f_pred), retain_graph=True, create_graph=True)[
            0]
        v_f_pred_xt = torch.autograd.grad(outputs=v_f_pred, inputs=coord_f,
                                          grad_outputs=torch.ones_like(v_f_pred), retain_graph=True, create_graph=True)[
            0]

        # 二阶偏导
        u_f_pred_xt2 = torch.autograd.grad(outputs=u_f_pred_xt, inputs=coord_f,
                                           grad_outputs=torch.ones_like(u_f_pred_xt), retain_graph=True)[0]
        v_f_pred_xt2 = torch.autograd.grad(outputs=v_f_pred_xt, inputs=coord_f,
                                           grad_outputs=torch.ones_like(v_f_pred_xt), retain_graph=True)[0]

        # 计算得到u和v，原文章代码这么干，我不太确定是咋算的
        u_f_calc = u_f_pred_xt[:, 1:2] + 0.5 * v_f_pred_xt2[:, 0:1] + (u_f_pred ** 2 + v_f_pred ** 2) * v_f_pred
        v_f_calc = v_f_pred_xt[:, 1:2] - 0.5 * u_f_pred_xt2[:, 0:1] - (u_f_pred ** 2 + v_f_pred ** 2) * u_f_pred
    optimizer.zero_grad()
    loss = loss_fn(u_init, u_init_pred, v_init, v_init_pred, u_bound_low_pred, u_bound_up_pred, v_bound_low_pred,
                   v_bound_up_pred, u_bound_low_pred_x, u_bound_up_pred_x, v_bound_low_pred_x, v_bound_up_pred_x,
                   u_f_calc, v_f_calc)

    if epoch % 10 == 0:
        print('Epoch: %d, Loss: %.3e' % (epoch, loss))

    optimizer.zero_grad()
    loss.backward()

    optimizer.step()


mse_fn = torch.nn.MSELoss(reduction='mean')


def loss_fn(u_init, u_init_pred, v_init, v_init_pred, u_bound_low_pred, u_bound_up_pred, v_bound_low_pred,
            v_bound_up_pred, u_bound_low_pred_x, u_bound_up_pred_x, v_bound_low_pred_x, v_bound_up_pred_x,
            u_f_calc, v_f_calc):
    return mse_fn(u_init, u_init_pred) + mse_fn(v_init, v_init_pred) + mse_fn(u_bound_low_pred, u_bound_up_pred) + \
           mse_fn(v_bound_low_pred, v_bound_up_pred) + mse_fn(u_bound_low_pred_x, u_bound_up_pred_x) + \
           mse_fn(v_bound_low_pred_x, v_bound_up_pred_x) + mse_fn(u_f_calc, torch.zeros(u_f_calc.shape)) + \
           mse_fn(v_f_calc, torch.zeros(u_f_calc.shape))


if __name__ == '__main__':
    # 数据集
    dataset_true = TrueDataset()
    dataloader_true = DataLoader(dataset=dataset_true)

    dataset_init = InitDataset()
    dataloader_init = DataLoader(dataset=dataset_init, batch_size=dataset_init.len, shuffle=False, num_workers=2)

    dataset_bound = BoundDataset()
    dataloader_bound = DataLoader(dataset=dataset_bound, batch_size=dataset_bound.len, shuffle=False, num_workers=2)

    dataset_f = FDataset()
    dataloader_f = DataLoader(dataset=dataset_f, batch_size=dataset_f.len, shuffle=False, num_workers=2)

    model = LinearModel()

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    for epoch in range(50000):
        train(dataloader_init, dataloader_bound, dataloader_f, model, loss_fn, optimizer, epoch)
