import os

import matplotlib.pyplot as plt
import numpy as np
from scipy import io
import torch
from torch import nn
from pyDOE import lhs

# 设置随机数种子
seed = 9527
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

mse_fn = torch.nn.MSELoss(reduction='mean')

N_train = 2000

data_origin = io.loadmat('./Data/burgers_shock.mat')

t = data_origin['t'].flatten()[:, None]
x = data_origin['x'].flatten()[:, None]
Exact = np.real(data_origin['usol']).T

X, T = np.meshgrid(x, t)

X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None]))
u_star = Exact.flatten()[:, None]

lb = torch.tensor(X_star.min(0), dtype=torch.float32, device=torch.device(device))
ub = torch.tensor(X_star.max(0), dtype=torch.float32, device=torch.device(device))

idx = np.random.choice(X_star.shape[0], N_train, replace=False)
X_train = X_star[idx, :]
x_train = torch.tensor(X_train[:, 0:1], dtype=torch.float32, device=torch.device(device)).requires_grad_(True)
t_train = torch.tensor(X_train[:, 1:2], dtype=torch.float32, device=torch.device(device)).requires_grad_(True)
u_train = torch.tensor(u_star[idx, :], dtype=torch.float32, device=torch.device(device))


class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()

        # 待求参数
        self.lambda_1 = nn.Parameter(torch.tensor(0.0, dtype=torch.float32, device=torch.device(device)))
        self.lambda_2 = nn.Parameter(torch.tensor(-6.0, dtype=torch.float32, device=torch.device(device)))

        self.linear_tanh_stack = nn.Sequential(
            nn.Linear(2, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 20),
            nn.Tanh(),
            nn.Linear(20, 1)
        )

    def forward(self, coord_input):
        lambda1 = self.lambda_1
        lambda2 = torch.exp(self.lambda_2)

        coord_input = (2.0 * (coord_input - lb) / (ub - lb) - 1.0)  # 正则化
        u_pred = self.linear_tanh_stack(coord_input)
        u_pred_t = torch.autograd.grad(outputs=u_pred, inputs=t_train,
                                       grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
        u_pred_x = torch.autograd.grad(outputs=u_pred, inputs=x_train,
                                       grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
        u_pred_xx = torch.autograd.grad(outputs=u_pred_x, inputs=t_train,
                                        grad_outputs=torch.ones_like(u_pred_x), create_graph=True)[0]
        f_pred = u_pred_t + lambda1 * u_pred * u_pred_x - lambda2 * u_pred_xx

        return u_pred, f_pred, lambda1, lambda2


if __name__ == '__main__':
    model = LinearModel()
    model.to(device)

    adamOptimizer = torch.optim.Adam(model.parameters())
    lbfgsOptimizer = torch.optim.LBFGS(model.parameters(), max_iter=50000, max_eval=50000, history_size=50,
                                       tolerance_change=1.0 * np.finfo(float).eps)
    optimizer = adamOptimizer

    start_epoch = -1
    total_epoch = 50000
    epoch = 0

    def closure():
        optimizer.zero_grad()
        u_pred, f_pred, lambda1, lambda2 = model(torch.cat([x_train, t_train], 1))
        loss = mse_fn(u_train, u_pred) + mse_fn(f_pred, torch.zeros(f_pred.shape, device=torch.device(device)))
        optimizer.zero_grad()
        loss.backward()

        global epoch
        epoch += 1
        if epoch % 10 == 0:
            print('Epoch: %d, Loss: %.3e, Lambda_1: %.6f, Lambda_2: %.6f' % (epoch, loss, lambda1, lambda2))

        return loss

    while epoch <= total_epoch:
        optimizer.step(closure)

    # for epoch in range(start_epoch + 1, total_epoch):
    #     u_pred, f_pred, lambda1, lambda2 = model(torch.cat([x_train, t_train], 1))
    #     loss = mse_fn(u_train, u_pred) + mse_fn(f_pred, torch.zeros(f_pred.shape, device=torch.device(device)))
    #
    #     if epoch % 10 == 0:
    #         print('Epoch: %d, Loss: %.3e, Lambda_1: %.6f, Lambda_2: %.6f' % (epoch, loss, lambda1, lambda2))
    #
    #     optimizer.zero_grad()
    #     loss.backward()
    #     optimizer.step()
