import matplotlib.pyplot as plt
fig = plt.figure(figsize=(9, 5))
from pyDOE import lhs
import matplotlib as mpl
mpl.use('TkAgg') 

from locale import T_FMT
import sys
sys.path.insert(0, '../Utilities/')

import mkl
mkl.get_max_threads()


from collections import OrderedDict

import numpy as np


from mpl_toolkits.mplot3d import Axes3D
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
import oneflow as flow
warnings.filterwarnings('ignore')

np.random.seed(1234)

#CUDA support
if flow.cuda.is_available():
    device = flow.device('cuda')
else:
    device = flow.device('cpu')
a=0.4


class DNN(flow.nn.Module):
    def __init__(self, layers):
        super(DNN, self).__init__()

        # parameters
        self.depth = len(layers) - 1  #2

        # set up layer order dict
        self.activation = flow.nn.Tanh

        layer_list = list()
        for i in range(self.depth - 1):
            layer_list.append(
                ('layer_%d' % i, flow.nn.Linear(layers[i], layers[i+1]))
            )
            layer_list.append(('activation_%d' % i, self.activation()))

        layer_list.append(
            ('layer_%d' % (self.depth - 1), flow.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)

        # deploy layers
        self.layers = flow.nn.Sequential(layerDict)

    def forward(self, x):
        out = self.layers(x)
        return out


class Net_U_F(flow.nn.Module):
    def __init__(self, layers):
        super(Net_U_F, self).__init__()
        self.dnn = DNN(layers).to(device)
        return

    def forward(self, x_u, t_u, x_f, t_f):
        u_pred = self.dnn(flow.cat([x_u, t_u], dim=1))
        u = self.dnn(flow.cat([x_f, t_f], dim=1))
        u_t = flow.autograd.grad(
            u, t_f,
            grad_outputs=flow.ones_like(u),
            retain_graph=True,
            create_graph=True
        )[0]
        u_x = flow.autograd.grad(
            u, x_f,
            grad_outputs=flow.ones_like(u),
            retain_graph=True,
            create_graph=True
        )[0]
        u_xx = flow.autograd.grad(
            u_x, x_f,
            grad_outputs=flow.ones_like(u_x),
            retain_graph=True,
            create_graph=True
        )[0]

        f_pred = u_t - a * u_xx
        return (u_pred, f_pred)

class PhysicsInformedNN(flow.nn.Module):
    def __init__(self, u, X_f,layers):
        super(PhysicsInformedNN, self).__init__()
        self.net_u_f = Net_U_F(layers)
        self.u = flow.tensor(u).float().to(device)
        self.x_f = flow.tensor(X_f[:, 0:1], requires_grad=True).float().to(device)
        self.t_f = flow.tensor(X_f[:, 1:2], requires_grad=True).float().to(device)


    def forward(self, X):

        x_u = flow.tensor(X[:, 0:1], requires_grad=True).float().to(device)
        t_u = flow.tensor(X[:, 1:2], requires_grad=True).float().to(device)
        self.net_u_f.train()
   
        u_pred, f_pred = self.net_u_f(x_u, t_u, self.x_f, self.t_f)

        loss = flow.mean((self.u - u_pred) ** 2) + flow.mean(f_pred ** 2)
        return loss

    def eval(self, X):
        self.net_u_f.eval()

        x = flow.tensor(X[:, 0:1], requires_grad=True).float().to(device)
        
        t = flow.tensor(X[:, 1:2], requires_grad=True).float().to(device)
        u, f = self.net_u_f(x, t, self.x_f, self.t_f)
        u = u.detach().cpu().numpy()
        f = f.detach().cpu().numpy()
        return u, f
    

nu = 0.01/np.pi
N_u = 2000
layers = [2, 20, 20, 20, 20, 20, 20, 20, 20, 1]


L=1

#边界
def gen_boundary_train_point():
    x_num, t_num = (40,50)
    x_min, t_min = (0, 0.0)
    x_max, t_max = (1,1.0)

    t=np.linspace(t_min, t_max, num=t_num).reshape(t_num,1)
    x=np.random.choice([x_min, x_max], x_num)[:, None]
    x, t = np.meshgrid(x,t)
    X_b = np.hstack((x.flatten()[:,None], t.flatten()[:,None]))
    usol = np.zeros((x_num, t_num)).flatten()[:,None]
    return X_b, usol

def on_boundary(x,x_min,x_max):
    return np.any(np.isclose(x, [x_min, x_max]), axis=-1)

#初始
def gen_initial_train_point():
    num=3000
    x_min=0
    x_max=L
    x=np.linspace(x_min, x_max, num=num).reshape(num,1)
    x=x[~on_boundary(x,x_min,x_max)]  
    t=np.zeros(x.shape[0]).reshape(x.shape[0],1)
    X_i = np.hstack((x, t))
    usol=np.sin(np.pi*X_i[:,0:1]/L)
    return X_i, usol

N_f=5000
x_min, t_min = (0, 0.0)
x_max, t_max = (1,1.0)

def gen_domain_train_point(x_min,x_max,t_min,t_max):
    X_min=np.array([x_min,t_min],dtype='float32')
    X_max=np.array([x_max,t_max],dtype='float32')
    X_f_train=X_min+(X_max-X_min)*lhs(2,N_f)

    return X_f_train


X_u_train_b, usol_b=gen_boundary_train_point()

X_u_train_i,usol_i=gen_initial_train_point()
u_train=np.vstack([usol_b,usol_i])
X_u_train=np.vstack((X_u_train_b,X_u_train_i))



X_f_train=gen_domain_train_point(x_min,x_max,t_min,t_max)


x_test=np.vstack((X_u_train, X_f_train))

my_model = PhysicsInformedNN(u_train, x_test, layers)

optimizer = flow.optim.Adam(my_model.parameters(), lr=1e-4)


u_pred_test=[]

for i in range(1, 5000):
    l = my_model(X_u_train)

    # if i % 1 == 0:
    #     print('========step: %d, Loss: %e' %(i,l), flush=True)
    # if i % 1 == 0:
    #     u_pred, f_pred = my_model.eval(X_u_train)
    #     error_u = np.linalg.norm(u_train-u_pred,2)/np.linalg.norm(u_train,2)
    #     print('Error u: %e' % (error_u))
    optimizer.zero_grad()
    l.backward()
    optimizer.step()


u_test, f_test = my_model.eval(x_test)

#######plt#########

#ax = fig.add_subplot(111)
# ax.plot(steps,loss_train, 'r--', linewidth = 2, label = 'Prediction')
# ax.set_xlabel('$step$')
# ax.set_ylabel('$loss$')
# plt.show()
X,Y=np.meshgrid(x_test[:, 0], x_test[:, 1])
Z,f=my_model.eval(x_test)

ax = fig.gca(projection='3d')
ax.plot_trisurf(x_test[:, 0], x_test[:, 1], Z[:,0], linewidth=0.3, antialiased=True, cmap='rainbow', alpha=0.8)

ax.set_xlabel("$x$")
ax.set_ylabel("$t$")
ax.set_zlabel("$u_{}$".format(i + 1))
plt.show()

