import numpy as np
from random import uniform

import torch
from torch import nn
import torch.optim as optim


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

N_u=10000  
N_f=100000 
constant= 0.01 / np.pi  
layers = np.array([2,30,30,30,30,30,30,30,30,1]) 

def generate_data():
    x_upper = np.ones((N_u // 4, 1), dtype=float)
    x_lower = np.ones((N_u // 4, 1), dtype=float) * (-1)
    t_zero = np.zeros((N_u // 2, 1), dtype=float)

    t_upper = np.random.rand(N_u // 4, 1)
    t_lower = np.random.rand(N_u // 4, 1)
    x_zero = (-1) + np.random.rand(N_u // 2, 1) * (1 - (-1))

    X_upper = np.hstack((x_upper, t_upper))
    X_lower = np.hstack((x_lower, t_lower)) 
    X_zero = np.hstack((x_zero, t_zero))   

    X_u_train = np.vstack((X_upper, X_lower, X_zero))

    index = np.arange(0, N_u)
    np.random.shuffle(index)
    X_u_train = X_u_train[index, :]

    X_f_train = np.zeros((N_f, 2), dtype=float)
    for row in range(N_f):
        x = uniform(-1, 1)  
        t = uniform(0, 1)  
        X_f_train[row, 0] = x
        X_f_train[row, 1] = t

    X_f_train = np.vstack((X_f_train, X_u_train))

    return(X_f_train)

def f_ic(x):
  return(-torch.sin(torch.tensor(np.pi)*x))

class NeuralNetwork(nn.Module):
  def __init__(self, layers):

    super().__init__()

    self.linears = nn.ModuleList([nn.Linear(layers[i], layers[i+1]) for i in range(len(layers)-1)])
    self.activation=nn.Tanh()

    for i in range(len(layers)-1):
      nn.init.xavier_normal_(self.linears[i].weight.data, gain=1.0)
      nn.init.zeros_(self.linears[i].bias.data)

  def forward(self, x, t):

    input=torch.cat((x, t), dim=1)

    for i in range(len(layers)-2):
        z = self.linears[i](input)
        input = self.activation(z)

    input = self.linears[-1](input)
    return input

class PINN():
    def __init__(self, X_f):

        self.x_f = X_f[:, 0].reshape(-1, 1).clone().detach().requires_grad_(True)
        self.t_f = X_f[:, 1].reshape(-1, 1).clone().detach().requires_grad_(True)

        self.null = torch.zeros((self.x_f.shape[0], 1)).cuda()

        self.net = NeuralNetwork(layers).cuda()

        self.optimizer = torch.optim.LBFGS(self.net.parameters(),
                                           lr=1,
                                           max_iter=50000,
                                           max_eval=50000,
                                           history_size=50,
                                           tolerance_grad=1e-08,
                                           tolerance_change=0.5 * np.finfo(float).eps,
                                           line_search_fn="strong_wolfe")

        self.ls = 0

        self.iter = 0

    def u_hat(self, x, t):
        u_pred = self.net(x, t)
        psi = ( (1 - (x ** 2)) * (t * u_pred) ) + ( (torch.exp(-t)) * f_ic(x) )
        return psi

    def f_hat(self, x, t):
        u = self.u_hat(x, t)

        u_t = torch.autograd.grad(
            u, t,
            grad_outputs=torch.ones_like(u),
            retain_graph=True,
            create_graph=True)[0]

        u_x = torch.autograd.grad(
            u, x,
            grad_outputs=torch.ones_like(u),
            retain_graph=True,
            create_graph=True)[0]

        u_xx = torch.autograd.grad(
            u_x, x,
            grad_outputs=torch.ones_like(u_x),
            retain_graph=True,
            create_graph=True)[0]

        f = u_t + (u * u_x) - (constant * u_xx)

        return f

    def closure(self):
        self.optimizer.zero_grad()

        f_prediction = self.f_hat(self.x_f, self.t_f)

        f_loss = loss_function(f_prediction, self.null)
        self.ls = f_loss

        self.ls.backward()

        self.iter += 1

        if not self.iter % 100:
            print('Epoch: {0:}, Loss: {1:6.9f}'.format(self.iter, self.ls))

        return self.ls

    def train(self):
        self.net.train()
        self.optimizer.step(self.closure)
        
loss_function = nn.MSELoss()

X_f_train = generate_data()
X_f_train = torch.tensor(X_f_train, dtype=torch.float32, device=device)


pin = PINN(X_f_train)
pin.train()



import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_solution():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    x = torch.linspace(-1, 1, 200).to(device)
    t = torch.linspace(0, 1, 100).to(device)

    # x & t grids:
    X, T = torch.meshgrid(x, t)

    # x & t columns:
    xcol = X.reshape(-1, 1)
    tcol = T.reshape(-1, 1)

    # one large column:
    usol = pin.u_hat(xcol, tcol)

    # reshape solution:
    U = usol.reshape(x.numel(), t.numel())

    # transform to numpy:
    xnp = x.cpu().numpy()
    tnp = t.cpu().numpy()
    Unp = U.detach().cpu().numpy()

    # plot:
    fig = plt.figure(figsize=(9, 4.5))
    ax = fig.add_subplot(111)

    h = ax.imshow(Unp,
                  interpolation='nearest',
                  cmap='rainbow',
                  extent=[tnp.min(), tnp.max(), xnp.min(), xnp.max()],
                  origin='lower', aspect='auto')

    plt.xlabel('t')
    plt.ylabel('x')
    plt.title('u(x,t)', fontsize=10)

    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.10)
    cbar = fig.colorbar(h, cax=cax)
    cbar.ax.tick_params(labelsize=10)
    fig.savefig("test.png")
    
plot_solution()