#include <map>
#include <vector>
#include <string>
#include "smartpipe.h"

using namespace std;
using namespace sp;

void registe(){
    Image::Crop::cropWithInput::registe();
    Image::Crop::cropWithParas::registe();
    Image::Gen::genFromDisk::registe();
    Image::Gen::genFromMemory::registe();
    Image::Resize::letterBoxResize::registe();
    Image::Resize::resize::registe();
    Image::Save::markAndSave::registe();
    Image::Save::save::registe();
    Image::Trans::trans::registe();
    Model::Group::groupByBatch::registe();
    Model::Split::splitByShape::registe();
    Model::Trans::transferToDeviceMemory::registe();
    Model::Trans::transferToHostMemory::registe();
    Model::Yolo::yolo_complete::registe();
    Model::Yolo::yolo_preprocess::registe();
    Model::Yolo::yolo_inference::registe();
    Model::Yolo::yolo_postprocess::registe();
    Model::Retinanet::retinanet_preprocess::registe();
    Model::Retinanet::retinanet_inference::registe();
    Model::Retinanet::retinanet_postprocess::registe();
    Model::LPRnet::lprnet_preprocess::registe();
    Model::LPRnet::lprnet_inference::registe();
    Model::LPRnet::lprnet_postprocess::registe();
    Model::Openpose::openpose_preprocess::registe();
    Model::Openpose::openpose_inference::registe();
    Model::Openpose::openpose_postprocess::registe();
    Tool::Group::groupByRequestId::registe();
    Tool::Split::splitByFlowId::registe();
}

char* DATA_SOURCE;
char* DATA_TARGET;

char* input_video_data_ptr;
char* output_video_data_ptr;

// 共用参数
string video_path = "/data/lx/SmartPipe/data_source/videos/0123.mp4";                   // 视频路径
string save_path = "/data/lx/SmartPipe/apps/car_license_plate_recognition/output.avi";  // 输出路径
int video_channels = 4;                // 视频路数
long single_cnt = 500;                 // 单路视频帧数
int single_fps = 30;                   // 单路视频帧率
long cnt = single_cnt*video_channels;  // 处理帧数

/**
 * @brief 单路视频
 * 
 */
// 1. 不绑定核 & 单instance_number & 单batch_size
void test1_1(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm, false); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 2. 绑定核 & 单instance_number & 单batch_size
void test1_2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 3. 绑定核 & 自适应instance_number & 自适应batch_size 吞吐优先 & 装箱 
void test1_3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 4. 绑定核 & 自适应instance_number & 自适应batch_size 时延优先 & 装箱 
void test1_4(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 5. 绑定核 & 自适应instance_number & 自适应batch_size scale = 0.5 & 装箱
void test1_5(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

/**
 * @brief 双路视频
 * 
 */
// 1. 不绑定核 & 单instance_number & 单batch_size
void test2_1(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm, false); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 2. 绑定核 & 单instance_number & 单batch_size
void test2_2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6};         // GPU:0
    Fs_map[5] = {f5};             // GPU:0
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f13, f15};       // GPU:1
    Fs_map[13] = {f14};            // GPU:1
    Fs_map[14] = {f16};
    Fs_map[15] = {f17};
    Fs_map[16] = {f18};
    Fs_map[17] = {f19};
    Fs_map[18] = {f20};
    Fs_map[19] = {f21};
    Fs_map[20] = {f22, f24};       // GPU:2
    Fs_map[21] = {f23};            // GPU:2
    Fs_map[22] = {f25};
    Fs_map[23] = {f26};
    Fs_map[24] = {f27};
    Fs_map[25] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 3. 绑定核 & 自适应instance_number & 自适应batch_size 吞吐优先 & 装箱 
void test2_3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 4. 绑定核 & 自适应instance_number & 自适应batch_size 时延优先 & 装箱 
void test2_4(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 5. 绑定核 & 自适应instance_number & 自适应batch_size scale = 0.5 & 装箱
void test2_5(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

/**
 * @brief 四路视频
 * 
 */
// 1. 不绑定核 & 单instance_number & 单batch_size
void test4_1(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm, false); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 2. 绑定核 & 单instance_number & 单batch_size
void test4_2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6};         // GPU:0
    Fs_map[5] = {f5};             // GPU:0
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f13, f15};       // GPU:1
    Fs_map[13] = {f14};            // GPU:1
    Fs_map[14] = {f16};
    Fs_map[15] = {f17};
    Fs_map[16] = {f18};
    Fs_map[17] = {f19};
    Fs_map[18] = {f20};
    Fs_map[19] = {f21};
    Fs_map[20] = {f22, f24};       // GPU:2
    Fs_map[21] = {f23};            // GPU:2
    Fs_map[22] = {f25};
    Fs_map[23] = {f26};
    Fs_map[24] = {f27};
    Fs_map[25] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 3. 绑定核 & 自适应instance_number & 自适应batch_size 吞吐优先 & 装箱 
void test4_3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 4. 绑定核 & 自适应instance_number & 自适应batch_size 时延优先 & 装箱 
void test4_4(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 5. 绑定核 & 自适应instance_number & 自适应batch_size scale = 0.5 & 装箱
void test4_5(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

/**
 * @brief 八路视频
 * 
 */
// 1. 不绑定核 & 单instance_number & 单batch_size
void test8_1(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm, false); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 2. 绑定核 & 单instance_number & 单batch_size
void test8_2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 3. 绑定核 & 自适应instance_number & 自适应batch_size 吞吐优先 & 装箱 
void test8_3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 4. 绑定核 & 自适应instance_number & 自适应batch_size 时延优先 & 装箱 
void test8_4(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 5. 绑定核 & 自适应instance_number & 自适应batch_size scale = 0.5 & 装箱
void test8_5(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}
/**
 * @brief 十六路视频
 * 
 */
// 1. 不绑定核 & 单instance_number & 单batch_size
void test16_1(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
// 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm, false); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 2. 绑定核 & 单instance_number & 单batch_size
void test16_2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

// 3. 绑定核 & 自适应instance_number & 自适应batch_size 吞吐优先 & 装箱 
void test16_3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 4. 绑定核 & 自适应instance_number & 自适应batch_size 时延优先 & 装箱 
void test16_4(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

// 5. 绑定核 & 自适应instance_number & 自适应batch_size scale = 0.5 & 装箱
void test16_5(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){

}

/**
 * @brief 绑定核，SmartPipe进行调度，不组合。
 * 
 * @param smm 
 * @param gpu_smm 
 */
void test2(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    cout << "functions build complete." << endl;
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    cout << "functions connect complete." << endl;
    // 扩展多instance
    Function* f1_0;
    expand(f1, 1, &f1_0);
    // Function* f28_0;
    // expand(f28, 1, &f28_0);
    // cout << "functions expand complete." << endl;
    // 部署表，因为是先对GPU进行装箱后对CPU进行装箱，所以CPU的装箱结果一定某个Executor只会对应一个GPU，且最多对应GPU个数个。
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1};
    Fs_map[2] = {f2};
    Fs_map[3] = {f3};
    Fs_map[4] = {f4, f6, f13, f15, f22, f24};
    Fs_map[5] = {f5, f14, f23};
    Fs_map[6] = {f7};
    Fs_map[7] = {f8};
    Fs_map[8] = {f9};
    Fs_map[9] = {f10};
    Fs_map[10] = {f11};
    Fs_map[11] = {f12};
    Fs_map[12] = {f16};
    Fs_map[13] = {f17};
    Fs_map[14] = {f18};
    Fs_map[15] = {f19};
    Fs_map[16] = {f20};
    Fs_map[17] = {f21};
    Fs_map[18] = {f25};
    Fs_map[19] = {f26};
    Fs_map[20] = {f27};
    Fs_map[21] = {f28};
    Fs_map[22] = {f1_0};
    // Fs_map[22] = {f28_0};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

/**
 * @brief 绑定核，SmartPipe进行调度，进行组合，手动负载均衡。
 * 
 * @param smm 
 * @param gpu_smm 
 */
void test3(SharedMemoryManager& smm, Gpu_SharedMemoryManager& gpu_smm){
    // 构造app
    Function* f0 = new Image::Gen::genFromMemory(input_video_data_ptr, cnt, single_fps*video_channels, 3840, 2160);
    Function* f1 = new Image::Resize::resize(640, 384);
    Function* f2 = new Model::Yolo::yolo_preprocess(640, 384);
    Function* f3 = new Model::Group::groupByBatch(1);
    Function* f4 = new Model::Trans::transferToDeviceMemory();
    Function* f5 = new Model::Yolo::yolo_inference(640, 384);
    Function* f6 = new Model::Trans::transferToHostMemory();
    Function* f7 = new Model::Split::splitByShape();
    Function* f8 = new Model::Yolo::yolo_postprocess(640, 384);
    Function* f9 = new Image::Crop::cropWithInput(0);
    Function* f10 = new Image::Resize::resize(320, 320);
    Function* f11 = new Model::Retinanet::retinanet_preprocess();
    Function* f12 = new Model::Group::groupByBatch(1);
    Function* f13 = new Model::Trans::transferToDeviceMemory();
    Function* f14 = new Model::Retinanet::retinanet_inference();
    Function* f15 = new Model::Trans::transferToHostMemory();
    Function* f16 = new Model::Split::splitByShape();
    Function* f17 = new Model::Retinanet::retinanet_postprocess();
    Function* f18 = new Image::Crop::cropWithInput(1);
    Function* f19 = new Image::Resize::resize(94, 24);
    Function* f20 = new Model::LPRnet::lprnet_preprocess();
    Function* f21 = new Model::Group::groupByBatch(1);
    Function* f22 = new Model::Trans::transferToDeviceMemory();
    Function* f23 = new Model::LPRnet::lprnet_inference();
    Function* f24 = new Model::Trans::transferToHostMemory();
    Function* f25 = new Model::Split::splitByShape();
    Function* f26 = new Model::LPRnet::lprnet_postprocess();
    Function* f27 = new Tool::Group::groupByRequestId(cnt);
    Function* f28 = new Image::Save::markAndSave(output_video_data_ptr, 30, 3840, 2160);
    // 连接逻辑关系
    connectOneToMany(3, f0, f9, f28);
    connect(f10, f18);
    connectOneByOne(29, f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
    // 部署表
    map<short, vector<Function*>> Fs_map;
    Fs_map[0] = {f0};
    Fs_map[1] = {f1, f2, f3};
    Fs_map[2] = {f4, f6, f13, f15, f22, f24, f8, f11, f16, f17, f18, f20, f21, f25, f26, f27};
    Fs_map[3] = {f5, f14, f23, f7, f9, f10, f12, f19};
    Fs_map[4] = {f28};
    // 构造app
    App app(0, Fs_map, &smm, &gpu_smm); // 最后一个参数代表是否进行核的绑定
    // 运行app
    app.check();
    app.printMsg();
    app.init();
    app.run();
    app.waitForComplete();
}

int main(){
    // 注册
    registe();
    // 声明共享内存管理对象
    SharedMemoryManager smm;
    Gpu_SharedMemoryManager gpu_smm;
    // 将视频加载到内存中
    SharedMemoryPool input_video_pool = smm.createSharedMemoryPool("input_video", Cv_Mat_Data_3840_2160_Memory, cnt);
    SharedMemoryPool output_video_pool = smm.createSharedMemoryPool("output_video", Cv_Mat_Data_3840_2160_Memory, cnt);
    input_video_data_ptr = input_video_pool.getDataPtr();
    output_video_data_ptr = output_video_pool.getDataPtr();
    Image::Gen::genFromDisk f_start(video_path, input_video_data_ptr, cnt, 30, 3840, 2160);
    f_start.run();
    // 进行测试
    pid_t pid = fork();
    if(pid){
        int res = system("monitor lab1");
    }else{
        test2_2(smm, gpu_smm);
        // 检查是否存在内存泄露
        smm.check();
        // 将输出视频保存到磁盘
        // Image::Save::save f_end(save_path, output_video_data_ptr, cnt, 30, 3840, 2160);
        // f_end.run();
    }
}