
import numpy as np
from random import uniform

import torch
from torch import nn
import torch.optim as optim
from PPINN.function import f_ic
from PPINN.model import NeuralNetwork

layers = np.array([2,30,30,30,30,30,30,30,30,1]) 
constant= 0.01 / np.pi  
loss_function = nn.MSELoss()

# 老版本。完全放弃
class PINN():
    def __init__(self, X_f, device):

        self.x_f = X_f[:, 0].reshape(-1, 1).clone().detach().requires_grad_(True)
        self.t_f = X_f[:, 1].reshape(-1, 1).clone().detach().requires_grad_(True)

        self.null = torch.zeros((self.x_f.shape[0], 1), device=device)

        self.net = NeuralNetwork(layers, device)

        self.optimizer = torch.optim.LBFGS(self.net.parameters(),
                                           lr=1,
                                           max_iter=50000,
                                           max_eval=50000,
                                           history_size=50,
                                           tolerance_grad=1e-08,
                                           tolerance_change=0.5 * np.finfo(float).eps,
                                           line_search_fn="strong_wolfe")

        self.ls = 0

        self.iter = 0

    def u_hat(self, x, t):
        u_pred = self.net(x, t)
        psi = ( (1 - (x ** 2)) * (t * u_pred) ) + ( (torch.exp(-t)) * f_ic(x) )
        return psi

    def f_hat(self, x, t):
        u = self.u_hat(x, t)

        u_t = torch.autograd.grad(
            u, t,
            grad_outputs=torch.ones_like(u),
            retain_graph=True,
            create_graph=True)[0]

        u_x = torch.autograd.grad(
            u, x,
            grad_outputs=torch.ones_like(u),
            retain_graph=True,
            create_graph=True)[0]

        u_xx = torch.autograd.grad(
            u_x, x,
            grad_outputs=torch.ones_like(u_x),
            retain_graph=True,
            create_graph=True)[0]

        f = u_t + (u * u_x) - (constant * u_xx)

        return f

    def closure(self):
        self.optimizer.zero_grad()

        f_prediction = self.f_hat(self.x_f, self.t_f)

        f_loss = loss_function(f_prediction, self.null)
        self.ls = f_loss

        self.ls.backward()

        self.iter += 1

        if not self.iter % 100:
            print('Epoch: {0:}, Loss: {1:6.9f}'.format(self.iter, self.ls))

        return self.ls

    def train(self):
        self.net.train()
        self.optimizer.step(self.closure)

# 3D版本暂时弃用。等待以后需要的时候，按照2D的版本进行更新。
class PINN3D():
    def __init__(self, X_f, N_u, N_f, device):
        self.x_f = {}
        for key, value in X_f.items():
            self.x_f[key] = X_f[key].clone().detach().requires_grad_(True)

        layers[0] = X_f['interior'].shape[1]
        layers[-1] = 4
            
        self.fluid_null = torch.zeros((4, N_f), device=device)
        self.BC_null = torch.zeros((3, N_u), device=device)

        self.net = NeuralNetwork(layers, device)

        self.optimizer = torch.optim.LBFGS(self.net.parameters(),
                                           lr=1,
                                           max_iter=50000,
                                           max_eval=50000,
                                           history_size=50,
                                           tolerance_grad=1e-08,
                                           tolerance_change=0.5 * np.finfo(float).eps,
                                           line_search_fn="strong_wolfe")

        self.ls = 0
        self.iter = 0
        self.rho = 1
        self.mu = 1

    def grad(self, numer, denom):
        return torch.autograd.grad(
            numer, denom,
            grad_outputs=torch.ones_like(numer),
            retain_graph=True,
            create_graph=True)[0]
    def f_hat(self, x):
        field = self.net(x, None)
        bc_wall = self.net(self.x_f['wall_0'], None)
        bc_v_inlet_0 = self.net(self.x_f['velocity_inlet_0'], None)
        bc_v_inlet_1 = self.net(self.x_f['velocity_inlet_1'], None)
        bc_p_outlet_0 = self.net(self.x_f['pressure_outlet_0'], None)

        p = field[:, 0]
        ux = field[:, 1]
        uy = field[:, 2]
        uz = field[:, 3]

        p_z = self.grad(p, x)
        p_x = p_z[:, 0]
        p_y = p_z[:, 1]
        p_z = p_z[:, 2]
        
        ux_z = self.grad(ux, x)
        ux_x = ux_z[:, 0]
        ux_y = ux_z[:, 1]
        ux_z = ux_z[:, 2]


        uy_z = self.grad(uy, x)
        uy_x = uy_z[:, 0]
        uy_y = uy_z[:, 1]
        uy_z = uy_z[:, 2]

        
        uz_z = self.grad(uz, x)
        uz_x = uz_z[:, 0]
        uz_y = uz_z[:, 1]
        uz_z = uz_z[:, 2]

        ux_xx = self.grad(ux_x, x)[:, 0]
        ux_yy = self.grad(ux_y, x)[:, 1]
        ux_zz = self.grad(ux_z, x)[:, 2]

        uy_xx = self.grad(uy_x, x)[:, 0]
        uy_yy = self.grad(uy_y, x)[:, 1]
        uy_zz = self.grad(uy_z, x)[:, 2]

        uz_xx = self.grad(uz_x, x)[:, 0]
        uz_yy = self.grad(uz_y, x)[:, 1]
        uz_zz = self.grad(uz_z, x)[:, 2]

        # 支路速度入口
        bc1_ux = bc_v_inlet_0[:, 1]
        bc1_uy = bc_v_inlet_0[:, 2] + 0.2
        bc1_uz = bc_v_inlet_0[:, 3]
        bc1 = torch.stack([bc1_ux, bc1_uy, bc1_uz])

        # 主路速度入口
        bc2_ux = bc_v_inlet_1[:, 1]
        bc2_uy = bc_v_inlet_1[:, 2]
        bc2_uz = bc_v_inlet_1[:, 3] + 0.1
        bc2 = torch.stack([bc2_ux, bc2_uy, bc2_uz])

        # 主路压力出口
        bc3_p = bc_p_outlet_0[:, 0]
        bc3_ux = bc_p_outlet_0[:, 1]
        bc3_uz = bc_p_outlet_0[:, 3]
        bc3 = torch.stack([bc3_p, bc3_ux, bc3_uz])

        # 壁面
        bc4_ux = bc_wall[:, 1]
        bc4_uy = bc_wall[:, 2]
        bc4_uz = bc_wall[:, 3]
        bc4 = torch.stack([bc4_ux, bc4_uy, bc4_uz])
        
        bc = torch.cat([bc1, bc2, bc3, bc4], dim=1)

        # 流域
        f1 = ux_x + uy_y + uz_z
        f2 = ux * ux_x + uy * ux_y + uz * ux_z + 1. / self.rho * p_x - self.mu * (ux_xx + ux_yy + ux_zz)
        f3 = ux * uy_x + uy * uy_y + uz * uy_z + 1. / self.rho * p_y - self.mu * (uy_xx + uy_yy + uy_zz)
        f4 = ux * uz_x + uy * uz_y + uz * uz_z + 1. / self.rho * p_z - self.mu * (uz_xx + uz_yy + uz_zz)

        f = torch.stack([f1, f2, f3, f4])

        return f, bc

    def closure(self):
        self.optimizer.zero_grad()

        f_pred, bc_pred = self.f_hat(self.x_f['interior'])

        f_loss = loss_function(f_pred, self.fluid_null) + loss_function(bc_pred, self.BC_null)
        self.ls = f_loss

        self.ls.backward()

        self.iter += 1

        if not self.iter % 100:
            print('Epoch: {0:}, Loss: {1:6.9f}'.format(self.iter, self.ls))

        return self.ls

    def train(self):
        self.net.train()
        self.optimizer.step(self.closure)
        
class PINN2D():
    def __init__(self, X_f, N_u, N_f, device):
        self.x_f = {}
        for key, value in X_f.items():
            self.x_f[key] = X_f[key].clone().detach().requires_grad_(True)

        layers[0] = X_f['interior'].shape[1]
        layers[-1] = 6
            
        self.fluid_null = torch.zeros((7, N_f), device=device)
        self.BC_null = torch.zeros((2, N_u), device=device)

        self.net = NeuralNetwork(layers, device)
        self.optimizer_LBFGS = torch.optim.LBFGS(self.net.parameters(),
                                           max_iter=100000,
                                           max_eval=100000,
                                           history_size=50,
                                           tolerance_change=1 * np.finfo(float).eps,
                                           line_search_fn="strong_wolfe")
        self.optimizer_Adam = torch.optim.Adam(self.net.parameters(), lr=1e-5)
        self.ls = 0
        self.iter = 0
        self.rho = 1
        self.mu = 0.02

    def grad(self, numer, denom):
        return torch.autograd.grad(
            numer, denom,
            grad_outputs=torch.ones_like(numer),
            retain_graph=True,
            create_graph=True)[0]
        
    def u_hat(self, field, x):
        p = field[:, 0]
        psi = field[:, 1]
        sxx = field[:, 2]
        sxy = field[:, 3]
        syy = field[:, 4]
        
        uz = self.grad(psi, x)
        ux = uz[:, 1]
        uy = -uz[:, 0]
        return p, ux, uy, sxx, sxy, syy
    
    def f_hat(self, x):
        field = self.net(x, None)
        bc_wall = self.net(self.x_f['wall_0'], None)
        bc_v_inlet_0 = self.net(self.x_f['velocity_inlet_0'], None)
        bc_v_inlet_1 = self.net(self.x_f['velocity_inlet_1'], None)
        bc_p_outlet_0 = self.net(self.x_f['pressure_outlet_0'], None)


        p, ux, uy, sxx, sxy, syy = self.u_hat(field, x)
        
        p_z = self.grad(p, x)
        p_x = p_z[:, 0]
        p_y = p_z[:, 1]
        
        ux_z= self.grad(ux, x)
        ux_x = ux_z[:, 0]
        ux_y = ux_z[:, 1]

        uy_z = self.grad(uy, x)
        uy_x = uy_z[:, 0]
        uy_y = uy_z[:, 1]

        sxx_z = self.grad(sxx, x)
        sxx_x = sxx_z[:, 0]
        sxx_y = sxx_z[:, 1]

        sxy_z = self.grad(sxy, x)
        sxy_x = sxy_z[:, 0]
        sxy_y = sxy_z[:, 1]

        syy_z = self.grad(syy, x)
        syy_x = syy_z[:, 0]
        syy_y = syy_z[:, 1]

        # 速度入口
        bc1_ux = bc_v_inlet_0[:, 1] + 1.0
        bc1_uy = bc_v_inlet_0[:, 2]
        bc1 = torch.stack([bc1_ux, bc1_uy])

        # # 速度入口
        # bc2_ux = bc_v_inlet_1[:, 1]
        # bc2_uy = bc_v_inlet_1[:, 2] + 0.2
        # bc2 = torch.stack([bc2_ux, bc2_uy])

        # 压力出口
        bc3_p = bc_p_outlet_0[:, 0]
        bc3_uy = bc_p_outlet_0[:, 1]
        bc3 = torch.stack([bc3_p, bc3_uy])

        # 壁面
        bc4_ux = bc_wall[:, 1]
        bc4_uy = bc_wall[:, 2]
        bc4 = torch.stack([bc4_ux, bc4_uy])
        
        bc = torch.cat([bc1, bc3, bc4], dim=1)

        # 流域
        f1 = ux_x + uy_y
        f2 = self.rho * (ux * ux_x + uy * ux_y) - sxx_x - sxy_y
        f3 = self.rho * (ux * uy_x + uy * uy_y) - sxy_x - syy_y
        f4 = self.mu * (ux_x + ux_x) - p - sxx
        f5 = self.mu * (uy_y + uy_y) - p - syy
        f6 = self.mu * (ux_y + uy_x) - sxy
        f7 = p + 0.5 * (sxx + syy)

        f = torch.stack([f1, f2, f3, f4, f5, f6, f7])

        return f, bc

    def train(self):
        self.net.train()
        for it in range(10000):
            self.optimizer_Adam.zero_grad() 
            def closure():
                self.optimizer_LBFGS.zero_grad()
                f_pred, bc_pred = self.f_hat(self.x_f['interior'])
                f_loss = loss_function(f_pred, self.fluid_null) + 2 * loss_function(bc_pred, self.BC_null)
                self.ls = f_loss

                self.ls.backward()

                self.iter += 1
                if not self.iter % 100:
                    print('Epoch: {0:}, Loss: {1:6.9f}'.format(self.iter, self.ls))

                return self.ls

            self.optimizer_Adam.step(closure)
        self.iter = 0
        self.optimizer_LBFGS.step(closure)