import paddle
import paddle.nn as nn
import numpy as np
import matplotlib.pyplot as plt

# 神经网络model
class DNN_single(nn.Layer):
    def __init__(self, depth=5, width=50, active=nn.Sigmoid()):
        super(DNN_single, self).__init__()
        self.depth = depth
        self.width = width
        self.active = active

        self.layers = nn.LayerList()
        self.layers.append(nn.Linear(2, self.width, weight_attr=nn.initializer.XavierNormal()))
        self.layers.append(self.active)
        for i in range(depth + 1):
            self.layers.append(nn.Linear(self.width, self.width, weight_attr=nn.initializer.XavierNormal()))
            self.layers.append(self.active)
        self.layers.append(nn.Linear(self.width, 4, weight_attr=nn.initializer.XavierNormal()))

        self.layers = nn.Sequential(*self.layers)
        # self.apply(initialize_weights)

    def forward(self, inn_var):

        out_var = self.layers(inn_var)

        return out_var

    def loadmodel(self, File):
        try:
            checkpoint = paddle.load(File)
            self.set_state_dict(checkpoint['model']) 
            start_epoch = checkpoint['epoch']
            print("load start epoch at " + str(start_epoch))
            return start_epoch
        except:
            print("load model failed! start a new model.")
            return 0, []

# Auto Differentiation
def gradient(y, x, order=1, create=True):
    if order == 1:
        return paddle.grad(y, x, create_graph=create, retain_graph=True)[0]
    else:
        return paddle.stack([paddle.autograd.grad([y[:, i].sum()], [x], create_graph=True, retain_graph=True)[0]
                             for i in range(y.shape[1])], axis=-1)

# 定义偏微分方程的残差
def loss_pde(Net, inn, criterion):
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    out = Net(paddle.concat((x, y), axis=-1))
    
    gamma = 1.4
    u = out[:, 0:1]
    v = out[:, 1:2]
    p = out[:, 2:3]
    rho = out[:, 3:4]
    
    drhou_x = gradient(rho*u, x)
    drhov_y = gradient(rho*v, y)
    
    drhouu_x = gradient(rho*u*u, x)
    drhouv_y = gradient(rho*u*v, y)
    drhouv_x = gradient(rho*u*v, x)
    drhovv_y = gradient(rho*v*v, y)
    
    dp_x = gradient(p, x)
    dp_y = gradient(p, y)
    
    E = p/(gamma-1) + 0.5*rho*(u**2 + v**2)
    duE_x = gradient(u*E, x)
    dvE_y = gradient(v*E, y)
      
    continuity = drhou_x + drhov_y
    x_momentum = gamma * (drhouu_x + drhouv_y) + dp_x
    y_momentum = gamma * (drhouv_x + drhovv_y) + dp_y
    energy = duE_x + dvE_y
    
    loss_con = criterion(continuity, paddle.zeros_like(continuity))
    loss_x = criterion(x_momentum, paddle.zeros_like(x_momentum))
    loss_y = criterion(x_momentum, paddle.zeros_like(y_momentum))
    loss_energy = criterion(energy, paddle.zeros_like(energy))
    return loss_con + loss_x + loss_y + loss_energy

# 定义数据标签的残差
def loss_data(Net, inn, label, criterion):
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    out = Net(paddle.concat((x, y), axis=-1))

    u = out[:, 0:1]
    v = out[:, 1:2]
    p = out[:, 2:3]
    rho = out[:, 3:4]
    
    return criterion(u, label['u']) + criterion(v, label['v']) + criterion(p, label['p']) + criterion(rho, label['rho'])

# 计算pde残差，每一个点是一个例子，只计算单个点
def loss_pde_forward(Net, inn):
    x = inn[:, 0].reshape((-1,1))
    y = inn[:, 1].reshape((-1,1))
    out = Net(paddle.concat((x, y), axis=-1))
    
    gamma = 1.4
    u = out[:, 0:1]
    v = out[:, 1:2]
    p = out[:, 2:3]
    rho = out[:, 3:4]
    
    drhou_x = gradient(rho*u, x)
    drhov_y = gradient(rho*v, y)
    
    drhouu_x = gradient(rho*u*u, x)
    drhouv_y = gradient(rho*u*v, y)
    drhouv_x = gradient(rho*u*v, x)
    drhovv_y = gradient(rho*v*v, y)
    
    dp_x = gradient(p, x)
    dp_y = gradient(p, y)
    
    E = p/(gamma-1) + 0.5*rho*(u**2 + v**2)
    duE_x = gradient(u*E, x)
    dvE_y = gradient(v*E, y)
      
    continuity = drhou_x + drhov_y
    x_momentum = gamma * (drhouu_x + drhouv_y) + dp_x
    y_momentum = gamma * (drhouv_x + drhovv_y) + dp_y
    energy = duE_x + dvE_y
    
    loss_con = paddle.abs(continuity)
    loss_x = paddle.abs(x_momentum)
    loss_y = paddle.abs(y_momentum)
    loss_energy = paddle.abs(energy)
    return loss_con + loss_x + loss_y + loss_energy

# 将tensor转化为numpy数组
def to_numpy(inputs):
    if isinstance(input, np.ndarray):
        return input
    else:
        return inputs.numpy()

# Training：训练，x_pde_train为顶点坐标，label为对应的CFD解，默认优化器为Adam
def train(Net, x_pde_train, label, best_loss, epoches = 20000, opt='Adam', lr = 1e-3, print_every = 100, step = 1, progress_path = 'work/progress/level_01',pdparas_name = 'eular.pdparams'):


    optimizer = paddle.optimizer.Adam(parameters=Net.parameters(), learning_rate=lr)
    if opt in ['LBFGS']:
        # optimizer = paddle.incubate.optimizer.LBFGS(parameters=Net.parameters(), max_iter=100)
        optimizer = paddle.optimizer.LBFGS(parameters=Net.parameters())
    

    criterion = nn.MSELoss()

    loss_history = []

    for epoch in range(1, 1+epoches):

        if opt in ['LBFGS']:
            def closure():
                optimizer.clear_grad()
                loss_DATA = loss_data(Net, x_pde_train, label, criterion)
                loss_PDE = loss_pde(Net, x_pde_train, criterion)
                LOSS = loss_DATA + loss_PDE
                if epoch % print_every == 0 or epoch == 1+epoches:
                    print(f'epoch{epoch}, total loss : {LOSS.item():.8f}, loss pde : {loss_PDE.item():.8f}, loss data : {loss_DATA.item():.8f}')
                loss_history.append([epoch, LOSS.item(), loss_PDE.item(), loss_DATA.item()])
                LOSS.backward()
                return LOSS
            LOSS = optimizer.step(closure)
        else:
            optimizer.clear_grad()
            loss_DATA = loss_data(Net, x_pde_train, label, criterion)
            loss_PDE = loss_pde(Net, x_pde_train, criterion)
            LOSS = loss_DATA + loss_PDE
            if epoch % print_every == 0 or epoch == 1+epoches:
                print(f'epoch{epoch}, total loss : {LOSS.item():.8f}, loss pde : {loss_PDE.item():.8f}, loss data : {loss_DATA.item():.8f}')
            loss_history.append([epoch, LOSS.item(), loss_PDE.item(), loss_DATA.item()])
            LOSS.backward()
            optimizer.step()
            
        if LOSS.item()<=best_loss:
            obj = {'model':Net.state_dict(), 'opt':optimizer.state_dict(), 'epoch':epoch}
            path = progress_path + '/model_paras/' + pdparas_name
            paddle.save(obj, path)
            best_loss = LOSS.item()
    # np.savetxt(output_path + 'loss.txt', loss_history)
    np.savetxt(progress_path + f'/loss/loss{step}.txt', loss_history)

# 可视化训练点的结果
def show_results(Net, x_pde_train):
    result = to_numpy(Net(x_pde_train))
    u = result[:, 0]
    v = result[:, 1]
    p = result[:, 2]
    rho = result[:, 3]

    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3),dpi=150)
    ax.set_aspect('equal')
    cf = ax.scatter(x_pde_train[:, 0], x_pde_train[:, 1], c=u, alpha=0.8, edgecolors='none', cmap='rainbow', marker='o', s=int(2))
    ax.set_title(r'$U_x$')
    fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04)

    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3),dpi=150)
    ax.set_aspect('equal')
    cf = ax.scatter(x_pde_train[:, 0], x_pde_train[:, 1], c=v, alpha=0.8, edgecolors='none', cmap='rainbow', marker='o', s=int(2))
    ax.set_title(r'$U_y$')
    fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04)

    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3),dpi=150)
    ax.set_aspect('equal')
    cf = ax.scatter(x_pde_train[:, 0], x_pde_train[:, 1], c=p, alpha=0.8, edgecolors='none', cmap='rainbow', marker='o', s=int(2))
    ax.set_title(r'$Pressure$')
    fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04)

    fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 3),dpi=150)
    ax.set_aspect('equal')
    cf = ax.scatter(x_pde_train[:, 0], x_pde_train[:, 1], c=rho, alpha=0.8, edgecolors='none', cmap='rainbow', marker='o', s=int(2))
    ax.set_title(r'$\rho$')
    fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04)
