import numpy as np
import time
from pyDOE import lhs
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import shutil
import random
import torch
import torch.nn as nn
import os
from PIL import Image
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

class Dimension:
    def __init__(self, t0, D0, U0, re):
        self.t0 = t0
        self.D0 = D0
        self.U0 = U0
        self.re = re

class Particle:
    def __init__(self, r, xc, yc, num_r, num_refine, rhop):
        self.rhop = rhop
        self.xc = xc
        self.yc = yc
        self.r = r
        self.num_r = num_r
        self.num_refine = num_refine

class PINN_laminar_flow:
    def __init__(self, rho, mu, re, particle, dimension, particle_t_c, Collo, IC, INLET, OUTLET, WALL, uv_layers, particle_layers, ExistModel=0, uvDir='model/'):
        self.rho = rho
        self.mu = mu
        self.re = re
        self.particle = particle
        self.dimension = dimension 

        self.particle_t_c = torch.tensor(particle_t_c, dtype=torch.float32).to(device).requires_grad_(True)[:, None]

        self.x_c = torch.tensor(Collo[:, 0:1], dtype=torch.float32).to(device).requires_grad_(True)
        self.y_c = torch.tensor(Collo[:, 1:2], dtype=torch.float32).to(device).requires_grad_(True)
        self.t_c = torch.tensor(Collo[:, 2:3], dtype=torch.float32).to(device).requires_grad_(True)

        self.x_IC = torch.tensor(IC[:, 0:1], dtype=torch.float32).to(device).requires_grad_(True)
        self.y_IC = torch.tensor(IC[:, 1:2], dtype=torch.float32).to(device).requires_grad_(True)
        self.t_IC = torch.tensor(IC[:, 2:3], dtype=torch.float32).to(device).requires_grad_(True)

        self.x_INLET = torch.tensor(INLET[:, 0:1], dtype=torch.float32).to(device).requires_grad_(True)
        self.y_INLET = torch.tensor(INLET[:, 1:2], dtype=torch.float32).to(device).requires_grad_(True)
        self.t_INLET = torch.tensor(INLET[:, 2:3], dtype=torch.float32).to(device).requires_grad_(True)
        self.u_INLET = torch.tensor(INLET[:, 3:4], dtype=torch.float32).to(device).requires_grad_(True)
        self.v_INLET = torch.tensor(INLET[:, 4:5], dtype=torch.float32).to(device).requires_grad_(True)

        self.x_OUTLET = torch.tensor(OUTLET[:, 0:1], dtype=torch.float32).to(device).requires_grad_(True)
        self.y_OUTLET = torch.tensor(OUTLET[:, 1:2], dtype=torch.float32).to(device).requires_grad_(True)
        self.t_OUTLET = torch.tensor(OUTLET[:, 2:3], dtype=torch.float32).to(device).requires_grad_(True)

        self.x_WALL = torch.tensor(WALL[:, 0:1], dtype=torch.float32).to(device).requires_grad_(True)
        self.y_WALL = torch.tensor(WALL[:, 1:2], dtype=torch.float32).to(device).requires_grad_(True)
        self.t_WALL = torch.tensor(WALL[:, 2:3], dtype=torch.float32).to(device).requires_grad_(True)

        self.uv_layers_flow = uv_layers
        self.uv_layers_particle = particle_layers
        if ExistModel == 0:
            self.uv_net_flow = self.initialize_NN(self.uv_layers_flow).to(device).requires_grad_(True)
            self.uv_net_particle = self.initialize_NN(self.uv_layers_particle).to(device).requires_grad_(True)
        else:
            print("Loading uv NN ...")
            self.uv_net_flow, self.uv_net_particle = self.load_NN(uvDir, self.uv_layers_flow, self.uv_layers_particle)

        self.optimizer_Adam_flow = torch.optim.Adam(self.uv_net_flow.parameters(), lr=0.001)
        self.optimizer_Adam_particle = torch.optim.Adam(self.uv_net_particle.parameters(), lr=0.001)

    def initialize_NN(self, layers):
        num_layers = len(layers)
        modules = []
        for l in range(0, num_layers - 1):
            modules.append(nn.Linear(layers[l], layers[l + 1]))
            if l < num_layers - 2:
                modules.append(nn.SiLU())
        return nn.Sequential(*modules)

    def save_NN(self, fileDir):
        torch.save(self.uv_net_flow.state_dict(), os.path.join(fileDir, 'flow.pth'))
        torch.save(self.uv_net_particle.state_dict(), os.path.join(fileDir, 'particle.pth'))
        print("Save uv NN parameters successfully...")

    def load_NN(self, fileDir, layers_flow, layers_particle):
        net_flow = self.initialize_NN(layers_flow)
        net_particle = self.initialize_NN(layers_particle)
        net_flow.load_state_dict(torch.load(os.path.join(fileDir, 'flow.pth')))
        net_particle.load_state_dict(torch.load(os.path.join(fileDir, 'particle.pth')))
        net_flow.to(device).requires_grad_(True)
        net_particle.to(device).requires_grad_(True)
        print(" - Load NN parameters successfully...")
        return net_flow, net_particle

    def net_uv_particle(self, t):
        psips = self.uv_net_particle(t)
        up = psips[:, 0:1]
        vp = psips[:, 1:2]
        return up, vp

    def net_uv_flow(self, x, y, t):
        X = torch.cat([x, y, t], dim=1)
        psips = self.uv_net_flow(X)
        psi = psips[:, 0:1]
        p = psips[:, 1:2]
        s11 = psips[:, 2:3]
        s22 = psips[:, 3:4]
        s12 = psips[:, 4:5]
        u = torch.autograd.grad(psi, y, grad_outputs=torch.ones_like(psi), create_graph=True)[0]
        v = -torch.autograd.grad(psi, x, grad_outputs=torch.ones_like(psi), create_graph=True)[0]
        return u, v, p, s11, s22, s12

    def generate_xp(self, up, vp, t):
        n = len(up)
        xp = torch.zeros(n)[:, None]
        yp = torch.zeros(n)[:, None]
        xp[1:] =0.5 * (up[1:] + up[:-1]) * (t[1:] - t[:-1])
        yp[1:] =0.5 * (vp[1:] + vp[:-1]) * (t[1:] - t[:-1])
        xp = torch.cumsum(xp, dim=0)
        yp = torch.cumsum(yp, dim=0)
        xp = xp + self.particle.xc
        yp = yp + self.particle.yc
        return xp, yp

    def get_particle_nearby_coords(self, xp, yp, t, nearby_factor):
        xp = xp.detach().cpu().numpy()
        yp = yp.detach().cpu().numpy()
        t = t.detach().cpu().numpy()

        x_per_t = self.particle.num_r
        theta = np.linspace(0.0, np.pi * 2.0, self.particle.num_r)[0:-1]
        x_near = np.array([])
        y_near = np.array([])
        t_near = np.array([])
        for i_t in range(t.shape[0]):
            x_near = np.concatenate((x_near, np.multiply(self.particle.r * nearby_factor, np.cos(theta)) + xp[i_t]))
            y_near = np.concatenate((y_near, np.multiply(self.particle.r * nearby_factor, np.sin(theta)) + yp[i_t]))
            t_near = np.concatenate((t_near, t[i_t] * np.ones_like(theta)))
        x_near = x_near.flatten()[:, None]
        y_near = y_near.flatten()[:, None]
        t_near = t_near.flatten()[:, None]
        return x_near, y_near, t_near, x_per_t
    
    def get_particle_refine_coords(self, xp, yp, t, refine_layers):
        x_near = np.array([])[:, None]
        y_near = np.array([])[:, None]
        t_near = np.array([])[:, None]
        for i in range(1, 1 + refine_layers):
            x_near_i, y_near_i, t_near_i, _ = self.get_particle_nearby_coords(xp, yp, self.particle_t_c, 1 + i * 0.002)
            x_near = np.concatenate((x_near, x_near_i))
            y_near = np.concatenate((y_near, y_near_i))
            t_near = np.concatenate((t_near, t_near_i))
        num_refine = self.particle.num_refine if(self.particle.num_refine < x_near.shape[0]) else x_near.shape[0]
        indices = np.random.choice(x_near.shape[0], num_refine, replace=False)
        x_near = x_near[indices]
        y_near = y_near[indices]
        t_near = t_near[indices]
        return x_near, y_near, t_near

    def net_f_particle(self, t):
        up, vp = self.net_uv_particle(t)
        xp, yp = self.generate_xp(up, vp, t)
        x_near, y_near, t_near, x_per_t = self.get_particle_nearby_coords(xp, yp, t, 1.2)
        x_near = torch.tensor(x_near, dtype=torch.float32).to(device).requires_grad_(True)
        y_near = torch.tensor(y_near, dtype=torch.float32).to(device).requires_grad_(True)
        t_near = torch.tensor(t_near, dtype=torch.float32).to(device).requires_grad_(True)

        u, v, _, _, _, _= self.net_uv_flow(x_near, y_near, t_near)
        u = u.view(x_per_t - 1, t.shape[0])
        v = v.view(x_per_t - 1, t.shape[0])
        u = u.mean(dim=0)[:, None]
        v = v.mean(dim=0)[:, None]

        Dp = 2 * particle.r # 颗粒直径
        rhop = self.particle.rhop
        # re = rhop * Dp / self.mu * torch.sqrt((u - up) ** 2 + (v - vp) ** 2) 
        # Cd = 24.0 / re
        up_t = torch.autograd.grad(up, t, grad_outputs=torch.ones_like(up), create_graph=True)[0]
        vp_t = torch.autograd.grad(vp, t, grad_outputs=torch.ones_like(vp), create_graph=True)[0]
        f_up = up_t - (u - up) * 24 / self.re / rhop / (4.0 / 3.0 * Dp ** 2)
        f_vp = vp_t - (v - vp) * 24 / self.re / rhop / (4.0 / 3.0 * Dp ** 2)
        return f_up, f_vp

    def net_f_flow(self, x, y, t):
        rho = self.rho
        mu = self.mu
        u, v, p, s11, s22, s12 = self.net_uv_flow(x, y, t)

        s11_1 = torch.autograd.grad(s11, x, grad_outputs=torch.ones_like(s11), create_graph=True)[0]
        s12_2 = torch.autograd.grad(s12, y, grad_outputs=torch.ones_like(s12), create_graph=True)[0]
        s22_2 = torch.autograd.grad(s22, y, grad_outputs=torch.ones_like(s22), create_graph=True)[0]
        s12_1 = torch.autograd.grad(s12, x, grad_outputs=torch.ones_like(s12), create_graph=True)[0]

        u_x = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u), create_graph=True)[0]
        u_y = torch.autograd.grad(u, y, grad_outputs=torch.ones_like(u), create_graph=True)[0]

        v_x = torch.autograd.grad(v, x, grad_outputs=torch.ones_like(v), create_graph=True)[0]
        v_y = torch.autograd.grad(v, y, grad_outputs=torch.ones_like(v), create_graph=True)[0]

        u_t = torch.autograd.grad(u, t, grad_outputs=torch.ones_like(u), create_graph=True)[0]
        v_t = torch.autograd.grad(v, t, grad_outputs=torch.ones_like(v), create_graph=True)[0]

        f_u = u_t + (u * u_x + v * u_y) - (s11_1 + s12_2)
        f_v = v_t + (u * v_x + v * v_y) - (s12_1 + s22_2)

        f_s11 = -p + 2 / self.re * u_x - s11
        f_s22 = -p + 2 / self.re * v_y - s22
        f_s12 = 1 / self.re * (u_y + v_x) - s12

        f_p = p + (s11 + s22) / 2

        return f_u, f_v, f_s11, f_s22, f_s12, f_p

    def train(self, iter, learning_rate):
        self.optimizer_Adam_particle.param_groups[0]['lr'] = learning_rate
        self.optimizer_Adam_flow.param_groups[0]['lr'] = learning_rate
        for it in range(iter):
            self.optimizer_Adam_particle.zero_grad()
            self.optimizer_Adam_flow.zero_grad()

            # 颗粒损失计算
            f_pred_up, f_pred_vp = self.net_f_particle(self.particle_t_c)
            up_IC_pred, vp_IC_pred = self.net_uv_particle(self.particle_t_c[0:1])
            loss_f_particle = torch.mean(f_pred_up ** 2) + torch.mean(f_pred_vp ** 2)
            loss_IC_particle = torch.mean((up_IC_pred - 0.0) ** 2) + torch.mean(vp_IC_pred ** 2)
            
            loss_particle = loss_f_particle + 5 * loss_IC_particle

            # 流动损失计算
            up, vp = self.net_uv_particle(self.particle_t_c)
            xp, yp = self.generate_xp(up, vp, self.particle_t_c)
            x_surf, y_surf, t_surf, _ = self.get_particle_nearby_coords(xp, yp, self.particle_t_c, 1.0)
            x_surf = torch.tensor(x_surf, dtype=torch.float32).to(device).requires_grad_(True)
            y_surf = torch.tensor(y_surf, dtype=torch.float32).to(device).requires_grad_(True)
            t_surf = torch.tensor(t_surf, dtype=torch.float32).to(device).requires_grad_(True)
            
            x_refine, y_refine, t_refine = self.get_particle_refine_coords(xp, yp, self.particle_t_c, 5)  

            up = torch.repeat_interleave(up, self.particle.num_r - 1, dim=0)
            vp = torch.repeat_interleave(vp, self.particle.num_r - 1, dim=0)
            x_refine = torch.tensor(x_refine, dtype=torch.float32).to(device).requires_grad_(True)
            y_refine = torch.tensor(y_refine, dtype=torch.float32).to(device).requires_grad_(True)
            t_refine = torch.tensor(t_refine, dtype=torch.float32).to(device).requires_grad_(True)

            f_pred_u, f_pred_v, f_pred_s11, f_pred_s22, f_pred_s12, f_pred_p = self.net_f_flow(self.x_c, self.y_c, self.t_c)
            f_near_u, f_near_v, f_near_s11, f_near_s22, f_near_s12, f_near_p = self.net_f_flow(x_refine, y_refine, t_refine)
            u_IC_pred, v_IC_pred, p_IC_pred, _, _, _ = self.net_uv_flow(self.x_IC, self.y_IC, self.t_IC)
            u_WALL_pred, v_WALL_pred, _, _, _, _ = self.net_uv_flow(self.x_WALL, self.y_WALL, self.t_WALL)
            u_particle_pred, v_particle_pred, _, _, _, _ = self.net_uv_flow(x_surf, y_surf, t_surf)
            u_INLET_pred, v_INLET_pred, _, _, _, _ = self.net_uv_flow(self.x_INLET, self.y_INLET, self.t_INLET)
            _, _, p_OUTLET_pred, _, _, _ = self.net_uv_flow(self.x_OUTLET, self.y_OUTLET, self.t_OUTLET)
            loss_f_flow = torch.mean(f_pred_u ** 2) + torch.mean(f_pred_v ** 2) + torch.mean(f_pred_s11 ** 2) + \
                         torch.mean(f_pred_s22 ** 2) + torch.mean(f_pred_s12 ** 2) + torch.mean(f_pred_p ** 2)
            loss_f_near = torch.mean(f_near_u ** 2) + torch.mean(f_near_v ** 2) + torch.mean(f_near_s11 ** 2) + \
                         torch.mean(f_near_s22 ** 2) + torch.mean(f_near_s12 ** 2) + torch.mean(f_near_p ** 2)
            loss_IC_flow = torch.mean((u_IC_pred - U_max) ** 2) + torch.mean(v_IC_pred ** 2) + torch.mean(p_IC_pred ** 2)
            loss_WALL_flow = torch.mean(u_WALL_pred ** 2) + torch.mean(v_WALL_pred ** 2)
            loss_INLET_flow = torch.mean((u_INLET_pred - U_max) ** 2) + torch.mean(v_INLET_pred ** 2)
            loss_OUTLET_flow = torch.mean((p_OUTLET_pred - 0.0) ** 2)

            loss_HOLE_flow = torch.mean((u_particle_pred - up) ** 2) + torch.mean((v_particle_pred - vp) ** 2)


            loss_flow = loss_f_flow + loss_f_near + 5 * loss_HOLE_flow + 2 * (loss_WALL_flow + loss_INLET_flow + loss_OUTLET_flow + loss_IC_flow)

            loss_particle.backward()
            loss_flow.backward()
            self.optimizer_Adam_particle.step()
            self.optimizer_Adam_flow.step()

            if it % 100 == 0:
                print('It: %d, Particle Loss: %.3e, Flow Loss: %.3e' % (it, loss_particle.item(), loss_flow.item()))

    def predict_flow(self, x_star, y_star, t_star):
        x_star = torch.tensor(x_star, dtype=torch.float32).to(device).requires_grad_(True)
        y_star = torch.tensor(y_star, dtype=torch.float32).to(device).requires_grad_(True)
        t_star = torch.tensor(t_star, dtype=torch.float32).to(device).requires_grad_(True)
        u_star, v_star, p_star, _, _, _ = self.net_uv_flow(x_star, y_star, t_star)
        return u_star.cpu().detach().numpy(), v_star.cpu().detach().numpy(), p_star.cpu().detach().numpy()
    
    def predict_particle(self, t_star):
        t_star = torch.tensor(t_star, dtype=torch.float32).to(device).requires_grad_(True)
        u_star, v_star = self.net_uv_particle(t_star)
        return u_star, v_star

def create_gif(image_folder, output_file, duration=100):
    # 获取文件夹中的所有 PNG 文件
    image_files = [f for f in os.listdir(image_folder) if f.endswith('.png')]
    image_files.sort()

    images = []
    for file in image_files:
        file_path = os.path.join(image_folder, file)
        try:
            image = Image.open(file_path)
            images.append(image)
        except Exception as e:
            print(f"无法打开图片 {file_path}: {e}")

    if images:
        # 保存为 GIF 动图
        images[0].save(os.path.join(image_folder, output_file), save_all=True, append_images=images[1:],
                       duration=duration, loop=0)
        print(f"GIF 动图已保存为 {output_file}")
    else:
        print("未找到有效的 PNG 图片。")

def postProcess(xmin, xmax, ymin, ymax, field, s=2, num=0):
    [x_pred, y_pred, t_pred, u_pred, v_pred, p_pred] = field
    fig, ax = plt.subplots(nrows=3, figsize=(6, 8))
    cf = ax[0].scatter(x_pred, y_pred, c=u_pred, alpha=0.7, edgecolors='none', cmap='rainbow', marker='o', s=s)
    ax[0].axis('square')
    ax[0].set_xlim([xmin, xmax])
    ax[0].set_ylim([ymin, ymax])
    ax[0].set_title('u predict')
    fig.colorbar(cf, ax=ax[0], fraction=0.046, pad=0.04)

    cf = ax[1].scatter(x_pred, y_pred, c=v_pred, alpha=0.7, edgecolors='none', cmap='rainbow', marker='o', s=s)
    ax[1].axis('square')
    ax[1].set_xlim([xmin, xmax])
    ax[1].set_ylim([ymin, ymax])
    ax[1].set_title('v predict')
    fig.colorbar(cf, ax=ax[1], fraction=0.046, pad=0.04)

    cf = ax[2].scatter(x_pred, y_pred, c=p_pred, alpha=0.7, edgecolors='none', cmap='rainbow', marker='o', s=s)
    ax[2].axis('square')
    ax[2].set_xlim([xmin, xmax])
    ax[2].set_ylim([ymin, ymax])
    ax[2].set_title('p predict')
    fig.colorbar(cf, ax=ax[2], fraction=0.046, pad=0.04)
    t_str = t_pred[1][0]
    plt.suptitle('Time: ' + str(t_str) + 's', fontsize=16)
    plt.savefig(f'./output/uvp_comparison_{num:03d}.png', dpi=150)
    plt.close('all')

def CartGrid(xmin, xmax, ymin, ymax, tmin, tmax, num_x, num_y, num_t):
    x = np.linspace(xmin, xmax, num=num_x)
    y = np.linspace(ymin, ymax, num=num_y)
    t = np.linspace(tmin, tmax, num=num_t)
    xxx, yyy, ttt = np.meshgrid(x, y, t)
    xxx = xxx.flatten()[:, None]
    yyy = yyy.flatten()[:, None]
    ttt = ttt.flatten()[:, None]
    return xxx, yyy, ttt

if __name__ == "__main__":
    # 参数总控制部分begin
    # 量纲
    U0 = 0.0001
    D0 = 0.02
    t0 = D0 / U0
    rho0 = 1000
    mu0 = 0.001
    re = rho0 * U0 * D0 / mu0 
    # 实际参数
        # 空间
    xmin_phy = -0.01
    xmax_phy = 0.04
    ymin_phy = -0.01
    ymax_phy = 0.01

        # 时间
    tmin_phy = 0
    tmax_phy = 0.1

        # 物性参数
    U_max_phy = 0.0001
    rho_phy = 1000
    mu_phy = 0.001

    # 无量纲参数
        # 空间
    xmin = xmin_phy / D0
    xmax = xmax_phy / D0
    ymin = ymin_phy / D0
    ymax = ymax_phy / D0

        # 时间
    tmin = tmin_phy / t0
    tmax = tmax_phy / t0

        # 物性参数
    U_max = U_max_phy / U0
    rho = rho_phy / rho0
    mu = mu_phy / mu0

    # 训练采样点
    num_x = 161
    num_y = 81
    num_t = 81

    num_content = 160000
    num_refine = 6000
    refine_factor = 0.002 # 加密因子

    # 预测采样点
    num_x_p = 401
    num_y_p = 161
    num_t_p = 51

    # 网络结构
    uv_layers = [3] + 8 * [50] + [5]
    particle_layers = [1] + 8 * [50] + [2]
    iter = 5000
    learning_rate = 2e-4
    useExitingModel = False

    # 路径
    output_folder = './output'
    model_folder = './model'

    # 开关
    tracking_particle_p = True # 是否追踪粒子
    
    dimension = Dimension(t0, D0, U0, re)
    if tracking_particle_p:
        # 实际参数
        r_phy = 0.0055
        xc_phy = 0.0
        yc_phy = 0.0
        rhop_phy = 5000

        # 无量纲参数
        r = r_phy / D0
        xc = xc_phy / D0
        yc = yc_phy / D0
        rhop = rhop_phy / rho0

        # 采样点
        num_r = 161 # 圆周采样点数量
        num_particle_refine = 40000
        particle_refine_factor = 0.05
        num_particle_t = 1000

    # 参数总控制部分end

    lb = np.array([xmin, ymin, tmin])
    ub = np.array([xmax, ymax, tmax])

    # 初始条件
    x_IC, y_IC, t_IC = CartGrid(xmin=xmin, xmax=xmax,
                                ymin=ymin, ymax=ymax,
                                tmin=tmin, tmax=tmin,
                                num_x=num_x, num_y=num_y, num_t=1)
    IC = np.concatenate((x_IC, y_IC, t_IC), 1)
    # 上壁面边界
    x_upb, y_upb, t_upb = CartGrid(xmin=xmin, xmax=xmax,
                                   ymin=ymax, ymax=ymax,
                                   tmin=tmin, tmax=tmax,
                                   num_x=num_x, num_y=1, num_t=num_t)
    # 下壁面边界
    x_lwb, y_lwb, t_lwb = CartGrid(xmin=xmin, xmax=xmax,
                                   ymin=ymin, ymax=ymax,
                                   tmin=tmin, tmax=tmax,
                                   num_x=num_x, num_y=1, num_t=num_t)
    wall_up = np.concatenate((x_upb, y_upb, t_upb), 1)
    wall_lw = np.concatenate((x_lwb, y_lwb, t_lwb), 1)
    WALL = np.concatenate((wall_up, wall_lw), 0)
    # 入口边界
    x_inb, y_inb, t_inb = CartGrid(xmin=xmin, xmax=xmin,
                                   ymin=ymin, ymax=ymax,
                                   tmin=tmin, tmax=tmax,
                                   num_x=1, num_y=num_y, num_t=num_t)
    # u_inb = 4 * U_max * y_inb * (ymax - y_inb) / (ymax ** 2) * (np.sin(3.1416 * t_inb / tmax + 3 * 3.1416 / 2) + 1.0)
    u_inb = U_max + 0 * y_inb
    v_inb = np.zeros_like(x_inb)
    INB = np.concatenate((x_inb, y_inb, t_inb, u_inb, v_inb), 1)

    # 出口边界
    x_outb, y_outb, t_outb = CartGrid(xmin=xmax, xmax=xmax,
                                      ymin=ymin, ymax=ymax,
                                      tmin=tmin, tmax=tmax,
                                      num_x=1, num_y=num_y, num_t=num_t)
    OUTB = np.concatenate((x_outb, y_outb, t_outb), 1)

    # 局部采样点加密
    XY_c = lb + (ub - lb) * lhs(3, num_content)
        # 上下壁面加密
    refine_thickness = (ymax - ymin) * refine_factor
    XY_c_lw = [xmin, ymin, tmin] + [xmax - xmin, refine_thickness, tmax - tmin] * lhs(3, num_refine)
    XY_c_up = [xmin, ymax, tmin] + [xmax - xmin, -refine_thickness, tmax - tmin] * lhs(3, num_refine)
    XY_c = np.concatenate((XY_c, XY_c_lw, XY_c_up), 0)
    XY_c = np.concatenate((XY_c, WALL, OUTB, INB[:, 0:3]), 0)

    t_c = tmin + tmax * lhs(1, num_particle_t)
    t_c = np.insert(t_c, 0, 0)
    t_c = np.sort(t_c)

    particle = Particle(r, xc, yc, num_r, num_particle_refine, rhop)
    model = PINN_laminar_flow(rho, mu, re, particle, dimension, t_c, XY_c, IC, INB, OUTB, WALL, uv_layers, particle_layers, ExistModel = useExitingModel)

    start_time = time.time()
    model.train(iter=iter, learning_rate=learning_rate)
    print("--- %s seconds ---" % (time.time() - start_time))
    # 保存模型
    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    model.save_NN(model_folder)

    shutil.rmtree(output_folder, ignore_errors=True)
    os.makedirs(output_folder)
    if tracking_particle_p:
        t1 = np.linspace(tmin, tmax, num_t_p)
        up_pred = np.zeros_like(t1)
        vp_pred = np.zeros_like(t1)
        x1 = np.zeros_like(t1)
        y1 = np.zeros_like(t1)
        x1[0] = 0
        y1[0] = 0
        for i in range(num_t_p):
            up_pred[i], vp_pred[i] = model.predict_particle(t1[i:i+1, None])
        x1[1:] = 0.5 * (up_pred[1:] + up_pred[:-1]) * (t1[1:] - t1[:-1])
        y1[1:] = 0.5 * (vp_pred[1:] + vp_pred[:-1]) * (t1[1:] - t1[:-1])
        x1 = np.cumsum(x1)
        y1 = np.cumsum(y1)
        x1 = x1 + xc
        y1 = y1 + yc

    for i in range(num_t_p):
        x_star = np.linspace(xmin, xmax, num_x_p)
        y_star = np.linspace(ymin, ymax, num_y_p)
        x_star, y_star = np.meshgrid(x_star, y_star)
        x_star = x_star.flatten()[:, None]
        y_star = y_star.flatten()[:, None]
        # 构建颗粒蒙版            
        if tracking_particle_p:
            dst = ((x_star - x1[i]) ** 2 + (y_star - y1[i]) ** 2) ** 0.5
            x_star = x_star[dst >= r]
            y_star = y_star[dst >= r]
            x_star = x_star.flatten()[:, None]
            y_star = y_star.flatten()[:, None]
        
        t_star = np.ones_like(x_star) * (i * (tmax - tmin) / (num_t_p - 1))
        u_pred, v_pred, p_pred = model.predict_flow(x_star, y_star, t_star)
        p_pred = p_pred * rho0 * U0 **2
        u_pred = u_pred * U0
        v_pred = v_pred * U0
        x_star = x_star * D0
        y_star = y_star * D0
        t_star = t_star * t0
        field = [x_star, y_star, t_star, u_pred, v_pred, p_pred]
        amp_pred = (u_pred ** 2 + v_pred ** 2) ** 0.5

        postProcess(xmin=xmin*D0, xmax=xmax*D0, ymin=ymin*D0, ymax=ymax*D0, field=field, s=2, num=i)
    create_gif('./output', 'uvp.gif', duration=100)
    