from cProfile import label
from re import L
import sys
from turtle import color, forward
sys.path.insert(0, '../Utilities/')

import oneflow as flow
from collections import OrderedDict

import numpy as np
import matplotlib.pyplot as plt
#import scipy.io
#from scipy.interpolate import griddata
#from plotting import newfig, savefig
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.gridspec as gridspec
#import time

np.random.seed(1234)

# CUDA support 
if flow.cuda.is_available():
    device = flow.device('cuda')
else:
    device = flow.device('cpu')

# the deep neural network
class DNN(flow.nn.Module):
    def __init__(self, layers):
        super(DNN, self).__init__()
        
        # parameters
        self.depth = len(layers) - 1
        
        # set up layer order dict
        self.activation = flow.nn.Tanh
        
        layer_list = list()
        for i in range(self.depth - 1): 
            layer_list.append(
                ('layer_%d' % i, flow.nn.Linear(layers[i], layers[i+1]))
            )
            layer_list.append(('activation_%d' % i, self.activation()))
            
        layer_list.append(
            ('layer_%d' % (self.depth - 1), flow.nn.Linear(layers[-2], layers[-1]))
        )
        layerDict = OrderedDict(layer_list)
        
        # deploy layers
        self.layers = flow.nn.Sequential(layerDict)
        
    def forward(self, x):
        out = self.layers(x)
        return out


class Net_U_F(flow.nn.Module):
    """ autograd version of calculating residual """
    def __init__(self, layers) :    
        super(Net_U_F, self).__init__()
        
        # create net
        self.dnn = DNN(layers).to(device)
        
    def forward(self, x):
        u_pred = self.dnn(x)
        u_x = flow.autograd.grad(
            u_pred, x, 
            grad_outputs=flow.ones_like(u_pred),
            retain_graph=True,
            create_graph=True
        )[0]
        u_xx = flow.autograd.grad(
            u_x, x, 
            grad_outputs=flow.ones_like(u_x),
            retain_graph=True,
            create_graph=True
        )[0]
        
        # condition -> 0
        f_pred = -u_xx - np.pi ** 2 * flow.sin(np.pi * x)
        return (u_pred, f_pred)


# the physics-guided neural network
class PhysicsInformedNN(flow.nn.Module):
    def __init__(self, u, layers):
        super(PhysicsInformedNN, self).__init__()
        self.net_u_f = Net_U_F(layers)
        self.u = flow.tensor(u).float().to(device)
        
    def forward(self, X):
        self.net_u_f.train()
        x = flow.tensor(X, requires_grad=True).float().to(device)
        u_pred,f_pred = self.net_u_f(x)
        loss = flow.mean((self.u-u_pred)**2) + flow.mean(f_pred**2)      
        return loss 
    
    def eval(self, X):
        self.net_u_f.eval()
        x = flow.tensor(X, requires_grad=True).float().to(device)
        u,f = self.net_u_f(x)
        u = u.detach().cpu().numpy()
        f = f.detach().cpu().numpy()       
        
        return u, f  
     

def func(x):
    return np.sin(np.pi * x)

# 取值范围[-1, 1]
l,r = -1.0 ,1.0

## generate data
# sample train data
num_domain = 16 
num_boundary = 2
x_domain = np.linspace(l,r,num_domain+1,endpoint=False)[1:,None] # 不含边界点
x_bd=np.array([[l],[r]])
u_domain = func(x_domain)
u_bd = func(x_bd)
x_train = np.vstack((x_domain,x_bd))
u_train = np.vstack((u_domain,u_bd))

# sample test data, without boundary points
num_test = 100
test_x = np.linspace(l,r,num_test+1,endpoint=False)[1:,None]
test_y = func(test_x)


layers = [1, 50, 50, 50, 1]

# train net
model = PhysicsInformedNN(u_train, layers)
optimizer = flow.optim.Adam(model.parameters(), lr=1e-4)

for i in range(1, 10000):
    weights = model.state_dict()
    loss = model(x_train)
    
    if i % 1 ==0:
        print(
            '========step: %d, Loss: %e' %
            (
                i,
                loss
            ), flush=True
        )
    if i % 1000 == 0:
        u_pred, f_pred = model.eval(test_x)
        error_u = np.linalg.norm(test_y-u_pred,2)/np.linalg.norm(test_y,2)
        print('Residual u: %e' % (error_u))
        
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

# predict
fig = plt.figure(figsize=(8,5))

xline = np.linspace(-1,1,1000)
yline = func(xline)
plt.plot(xline,yline,color='gray',label='Exact')
plt.scatter(x_train, u_train, cmap='red',label='Train')

predict,_ = model.eval(test_x)

plt.plot(test_x,predict,color='Green',linestyle='dashed',label='Prediction')
plt.xlabel('x')
plt.ylabel('u(x)')
plt.legend(loc='upper left')

plt.show()


