import paddle
import paddle.nn as nn
import numpy as np
import matplotlib.pyplot as plt

# 神经网络model
class DNN_single(nn.Layer):
    def __init__(self, depth=5, width=50, active=nn.Sigmoid()):
        super(DNN_single, self).__init__()
        self.depth = depth
        self.width = width
        self.active = active

        self.layers = nn.LayerList()
        self.layers.append(nn.Linear(3, self.width, weight_attr=nn.initializer.XavierNormal())) 
        self.layers.append(self.active)
        for i in range(depth + 1):
            self.layers.append(nn.Linear(self.width, self.width, weight_attr=nn.initializer.XavierNormal()))
            self.layers.append(self.active)
        self.layers.append(nn.Linear(self.width, 4, weight_attr=nn.initializer.XavierNormal()))

        self.layers = nn.Sequential(*self.layers)
        # self.apply(initialize_weights)

    def forward(self, inn_var):

        out_var = self.layers(inn_var)

        return out_var

    def loadmodel(self, File):
        try:
            checkpoint = paddle.load(File)
            self.set_state_dict(checkpoint['model']) 
            start_epoch = checkpoint['epoch']
            print("load start epoch at " + str(start_epoch))
            return start_epoch
        except:
            print("load model failed! start a new model.")
            return 0, []

# Auto Differentiation
def gradient(y, x, order=1, create=True):
    if order == 1:
        return paddle.grad(y, x, create_graph=create, retain_graph=True)[0]
    else:
        return paddle.stack([paddle.autograd.grad([y[:, i].sum()], [x], create_graph=True, retain_graph=True)[0]
                             for i in range(y.shape[1])], axis=-1)

# 定义偏微分方程的残差
def loss_pde(Net, inn, rho, nu, criterion):
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    z = inn[:, 2].reshape((-1,1))
    out = Net(paddle.concat((x, y, z), axis=-1)) #u,v,w,p
    
    u = out[:, 0:1]
    v = out[:, 1:2]
    w = out[:, 2:3]
    p = out[:, 3:4]
    
    du_x = gradient(u, x)
    du_y = gradient(u, y)
    du_z = gradient(u, z)
    
    dv_x = gradient(v, x)
    dv_y = gradient(v, y)
    dv_z = gradient(v, z)
    
    dw_x = gradient(w, x)
    dw_y = gradient(w, y)
    dw_z = gradient(w, z)
    
    dp_x = gradient(p, x)
    dp_y = gradient(p, y)
    dp_z = gradient(p, z)
    
    du_x_x = gradient(du_x, x)
    du_y_y = gradient(du_y, y)
    du_z_z = gradient(du_z, z)
    
    dv_x_x = gradient(dv_x, x)
    dv_y_y = gradient(dv_y, y)
    dv_z_z = gradient(dv_z, z)
    
    dw_x_x = gradient(dw_x, x)
    dw_y_y = gradient(dw_y, y)
    dw_z_z = gradient(dw_z, z)
       
    continuity = du_x + dv_y + dw_z
    x_momentum = u * du_x + v * du_y + w * du_z + 1./rho * dp_x - nu * (du_x_x + du_y_y + du_z_z)
    y_momentum = u * dv_x + v * dv_y + w * dv_z + 1./rho * dp_y - nu * (dv_x_x + dv_y_y + dv_z_z)
    z_momentum = u * dw_x + v * dw_y + w * dw_z + 1./rho * dp_z - nu * (dw_x_x + dw_y_y + dw_z_z)

    loss_con = criterion(continuity, paddle.zeros_like(continuity))
    loss_x = criterion(x_momentum, paddle.zeros_like(x_momentum))
    loss_y = criterion(x_momentum, paddle.zeros_like(y_momentum))
    loss_z = criterion(x_momentum, paddle.zeros_like(z_momentum))
    return loss_con + loss_x + loss_y + loss_z

# 定义数据标签的残差
def loss_data(Net, inn, label, criterion):
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    z = inn[:, 2].reshape((-1,1))
    out = Net(paddle.concat((x, y, z), axis=-1))

    u = out[:, 0:1]
    v = out[:, 1:2]
    w = out[:, 2:3]
    p = out[:, 3:4]
    
    return criterion(u, label['u']) + criterion(v, label['v']) + criterion(w, label['w']) + criterion(p, label['p'])

# 计算pde残差，每一个点是一个例子，只计算单个点
def loss_pde_forward(Net, inn):
    rho = 1.2
    nu = 1.5e-5
    # mean = np.mean(inn.numpy, axis=0)  
    # std = np.std(inn.numpy, axis=0)  
    
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    z = inn[:, 2].reshape((-1,1))
    out = Net(paddle.concat((x, y, z), axis=-1)) #u,v,w,p
    
    u = out[:, 0:1]
    v = out[:, 1:2]
    w = out[:, 2:3]
    p = out[:, 3:4]
    
    du_x = gradient(u, x)
    du_y = gradient(u, y)
    du_z = gradient(u, z)
    
    dv_x = gradient(v, x)
    dv_y = gradient(v, y)
    dv_z = gradient(v, z)
    
    dw_x = gradient(w, x)
    dw_y = gradient(w, y)
    dw_z = gradient(w, z)
    
    dp_x = gradient(p, x)
    dp_y = gradient(p, y)
    dp_z = gradient(p, z)
    
    du_x_x = gradient(du_x, x)
    du_y_y = gradient(du_y, y)
    du_z_z = gradient(du_z, z)
    
    dv_x_x = gradient(dv_x, x)
    dv_y_y = gradient(dv_y, y)
    dv_z_z = gradient(dv_z, z)
    
    dw_x_x = gradient(dw_x, x)
    dw_y_y = gradient(dw_y, y)
    dw_z_z = gradient(dw_z, z)
    
    # print(u)
    # print(du_x)
    # print(du_x_x)
    # print(u * du_x)
    # print(1./rho * dp_x)
       
    continuity = du_x + dv_y + dw_z
    x_momentum = u * du_x + v * du_y + w * du_z + 1./rho * dp_x - nu * (du_x_x + du_y_y + du_z_z)
    y_momentum = u * dv_x + v * dv_y + w * dv_z + 1./rho * dp_y - nu * (dv_x_x + dv_y_y + dv_z_z)
    z_momentum = u * dw_x + v * dw_y + w * dw_z + 1./rho * dp_z - nu * (dw_x_x + dw_y_y + dw_z_z)
    
    loss_con = paddle.abs(continuity)
    loss_x = paddle.abs(x_momentum)
    loss_y = paddle.abs(y_momentum)
    loss_z = paddle.abs(z_momentum)
    return loss_con + loss_x + loss_y + loss_z

# 将tensor转化为numpy数组
def to_numpy(inputs):
    if isinstance(input, np.ndarray):
        return input
    else:
        return inputs.numpy()

# Training：训练，x_pde_train为顶点坐标，label为对应的CFD解，默认优化器为Adam
def train(Net, x_pde_train, label, best_loss, epoches = 20000, opt='Adam', lr = 1e-3, print_every = 100, step = 1, progress_path = 'work/progress/level_01',pdparas_name = 'ns.pdparams'):

    scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=1e-3, T_max=300000, verbose=False)
    optimizer = paddle.optimizer.Adam(parameters=Net.parameters(), learning_rate=scheduler)
    if opt in ['LBFGS']:
        # optimizer = paddle.incubate.optimizer.LBFGS(parameters=Net.parameters(), max_iter=100)
        optimizer = paddle.optimizer.LBFGS(parameters=Net.parameters())
    

    criterion = nn.MSELoss()

    loss_history = []

    for epoch in range(1, 1+epoches):

        if opt in ['LBFGS']:
            def closure():
                optimizer.clear_grad()
                loss_DATA = loss_data(Net, x_pde_train, label, criterion)
                # loss_PDE = loss_pde(Net, x_pde_train, criterion)
                LOSS = loss_DATA
                if epoch % print_every == 0 or epoch == 1+epoches:
                    print(f'epoch{epoch}, total loss : {LOSS.item():.8f}')
                loss_history.append([epoch, LOSS.item()])
                LOSS.backward()
                return LOSS
            LOSS = optimizer.step(closure)
        else:
            optimizer.clear_grad()
            loss_DATA = loss_data(Net, x_pde_train, label, criterion)
            # loss_PDE = loss_pde(Net, x_pde_train, criterion)
            LOSS = loss_DATA
            if epoch % print_every == 0 or epoch == 1+epoches:
                print(f'epoch{epoch}, total loss : {LOSS.item():.8f}')
            loss_history.append([epoch, LOSS.item()])
            LOSS.backward()
            optimizer.step()
            scheduler.step()
            
        if LOSS.item()<=best_loss:
            obj = {'model':Net.state_dict(), 'opt':optimizer.state_dict(), 'epoch':epoch}
            path = progress_path + '/model_paras/' + pdparas_name
            paddle.save(obj, path)
            best_loss = LOSS.item()
    # np.savetxt(output_path + 'loss.txt', loss_history)
    np.savetxt(progress_path + f'/loss/loss{step}.txt', loss_history)

# 可视化训练点的结果
def show_quiver_stream(Net, xyz_and_labels):
    x = np.linspace(0, 3, 201)
    y = np.linspace(0, 3, 201)
    xx, yy = np.meshgrid(x, y)

    x_p = xx.reshape(-1,1)
    y_p = yy.reshape(-1,1)

    xy_points = np.concatenate((x_p, y_p), axis=1)

    z_points = 1.5 * np.ones((201*201, 1))
    xyz_points = np.concatenate((xy_points, z_points), axis=1)

    # test_data = xyz_points
    mean = np.mean(xyz_and_labels, axis=0)  
    std = np.std(xyz_and_labels, axis=0) 
    sta_xyz_points = (xyz_points - mean[:3]) / std[:3]


    sta_xyz_points = paddle.to_tensor(sta_xyz_points, dtype='float32')

    result = to_numpy(Net(sta_xyz_points))
    u = result[:, 0]
    v = result[:, 1]
    w = result[:, 2]
    p = result[:, 3]
    vel = np.sqrt(u**2 + v**2 + w**2)

    U=(u**2+v**2+w**2)**0.5
    vel = U.reshape(201,201)
    fig, ax = plt.subplots(figsize=(20,8), num=1,dpi=100,edgecolor=None,frameon=True)
    vxnew = u.reshape(201,201)
    vynew = v.reshape(201,201)
    num_per = 3
    ctf = plt.quiver(xx[::num_per,::num_per], yy[::num_per,::num_per], vxnew[::num_per,::num_per], vynew[::num_per,::num_per], vel[::num_per,::num_per], pivot='tip',cmap='RdBu_r')
    plt.colorbar(ctf, location='right')
    ax.set_aspect('equal', adjustable='box')
    ax.set_title('Velocity quiver')
    plt.show()

    from pylab import streamplot
    fig, ax=plt.subplots(figsize=(20,8), num=1,dpi=100,edgecolor=None,frameon=True)
    ax.set_aspect('equal', adjustable='box')
    ax.set_title('Stream line')
    streamplot(xx, yy, vxnew, vynew, density=1,broken_streamlines=False) #画流线的函数
    plt.ylabel(r'Y')
    plt.xlabel(r'X')

def show_velocity(Net, xyz_and_labels):
    x = np.linspace(0, 3, 201)
    y = np.linspace(0, 3, 201)
    xx, yy = np.meshgrid(x, y)

    x_p = xx.reshape(-1,1)
    y_p = yy.reshape(-1,1)

    xy_points = np.concatenate((x_p, y_p), axis=1)
    z_points = 1.5 * np.ones((201*201, 1))
    xyz_points = np.concatenate((xy_points, z_points), axis=1)

    # standarize the input
    mean = np.mean(xyz_and_labels, axis=0)  
    std = np.std(xyz_and_labels, axis=0) 
    sta_xyz_points = (xyz_points - mean[:3]) / std[:3]
    sta_xyz_points = paddle.to_tensor(sta_xyz_points, dtype='float32')

    result = to_numpy(Net(sta_xyz_points))
    u = result[:, 0]
    v = result[:, 1]
    w = result[:, 2]
    p = result[:, 3]
    vel = np.sqrt(u**2 + v**2 + w**2)

    U=(u**2+v**2+w**2)**0.5
    vel = U.reshape(201,201)

    # plot
    fig = plt.figure(figsize=(20,10),dpi=150)
    ax = fig.add_subplot(121, projection='3d')  
    ctf = ax.plot_surface(xx, yy, vel, cmap="RdBu_r")
    # ctf = ax.scatter3D(xx, yy, vel, cmap="RdBu_r")
    ax2 = fig.add_subplot(122, projection='3d')  
    ctf2 = ax2.contour3D(xx, yy, vel,levels=1000, cmap="RdBu_r")
    ax.set_aspect('equal')
    ax2.set_aspect('equal')
    # plt.colorbar(ctf)  
    plt.colorbar(ctf2,shrink=0.5, aspect=10)  
    plt.colorbar(ctf,shrink=0.5, aspect=10)  
    plt.xlabel(r'$x$')
    plt.ylabel(r'$y$')
    ax.set_title('Velocity surface')
    ax2.set_title('Velocity contour')
    plt.show()  
    
def show_pressure(Net, xyz_and_labels):
    x = np.linspace(0, 3, 201)
    y = np.linspace(0, 3, 201)
    xx, yy = np.meshgrid(x, y)

    x_p = xx.reshape(-1,1)
    y_p = yy.reshape(-1,1)

    xy_points = np.concatenate((x_p, y_p), axis=1)
    z_points = 1.5 * np.ones((201*201, 1))
    xyz_points = np.concatenate((xy_points, z_points), axis=1)

    # standarize the input
    mean = np.mean(xyz_and_labels, axis=0)  
    std = np.std(xyz_and_labels, axis=0) 
    sta_xyz_points = (xyz_points - mean[:3]) / std[:3]
    sta_xyz_points = paddle.to_tensor(sta_xyz_points, dtype='float32')

    result = to_numpy(Net(sta_xyz_points))
    u = result[:, 0]
    v = result[:, 1]
    w = result[:, 2]
    p = result[:, 3]
    vel = np.sqrt(u**2 + v**2 + w**2)

    U=(u**2+v**2+w**2)**0.5
    vel = U.reshape(201,201)
    pre = p.reshape(201,201)
    # plot
    fig = plt.figure(figsize=(20,10),dpi=150)
    ax = fig.add_subplot(121, projection='3d')  
    ctf = ax.plot_surface(xx, yy, pre, cmap="RdBu_r")
    # ctf = ax.scatter3D(xx, yy, vel, cmap="RdBu_r")
    ax2 = fig.add_subplot(122, projection='3d')  
    ctf2 = ax2.contour3D(xx, yy, pre,levels=1000, cmap="RdBu_r")
    ax.set_aspect('equal')
    ax2.set_aspect('equal')
    # plt.colorbar(ctf)  
    plt.colorbar(ctf2,shrink=0.5, aspect=10)  
    plt.colorbar(ctf,shrink=0.5, aspect=10)  
    plt.xlabel(r'$x$')
    plt.ylabel(r'$y$')
    ax.set_title('Pressure surface')
    ax2.set_title('Pressure contour')
    plt.show() 
    

def show_slice_uvwp(Net, xyz_and_labels):   
    x = np.linspace(0, 3, 201)
    y = np.linspace(0, 3, 201)
    xx, yy = np.meshgrid(x, y)

    x_p = xx.reshape(-1,1)
    y_p = yy.reshape(-1,1)

    xy_points = np.concatenate((x_p, y_p), axis=1)
    z_points = 1.5 * np.ones((201*201, 1))
    xyz_points = np.concatenate((xy_points, z_points), axis=1)

    # standarize the input
    mean = np.mean(xyz_and_labels, axis=0)  
    std = np.std(xyz_and_labels, axis=0) 
    sta_xyz_points = (xyz_points - mean[:3]) / std[:3]
    sta_xyz_points = paddle.to_tensor(sta_xyz_points, dtype='float32')

    result = to_numpy(Net(sta_xyz_points))
    u = result[:, 0]
    v = result[:, 1]
    w = result[:, 2]
    p = result[:, 3]
    # vel = np.sqrt(u**2 + v**2 + w**2)

    U=(u**2+v**2+w**2)**0.5
    vel = U.reshape(201,201)
    new_u = u.reshape(201,201)
    new_v = v.reshape(201,201)
    new_w = w.reshape(201,201)
    new_p = p.reshape(201,201)

    # plot
    fig, ax = plt.subplots(2,2,figsize=(10, 8), dpi=100,sharex=True, sharey=True)

    ctf0 = ax[0][0].contourf(xx, yy, vel, levels=500, cmap="RdBu_r")
    ax[0][0].set_aspect('equal')
    ctf1 = ax[0][1].contourf(xx, yy, new_u, levels=500, cmap="RdBu_r")
    ax[0][1].set_aspect('equal')
    ctf2 = ax[1][0].contourf(xx, yy, new_v, levels=500, cmap="RdBu_r")
    ax[1][0].set_aspect('equal')
    ctf3 = ax[1][1].contourf(xx, yy, new_p, levels=500, cmap="RdBu_r")
    ax[1][1].set_aspect('equal')


    plt.colorbar(ctf0)  
    plt.colorbar(ctf1)  
    plt.colorbar(ctf2)  
    plt.colorbar(ctf3)  
    # plt.colorbar(ctf)  
    ax[1][0].set_xlabel(r'$X$')
    ax[1][1].set_xlabel(r'$X$')
    ax[0][0].set_ylabel(r'$Y$')
    ax[1][0].set_ylabel(r'$Y$')
    # plt.ylabel(r'$y$')
    ax[0][0].set_title(r'$V$')
    ax[0][1].set_title(r'$V_x$')
    ax[1][0].set_title(r'$V_y$')
    ax[1][1].set_title(r'$Pressure$')
    plt.show()   

