/**
 * @file FaceRecognitionChannel.hpp
 * @author SDK_TEAM
 * @brief
 * @version 0.1
 * @date 2022-12-01
 *
 * Copyright:
 * © 2018 北京灵汐科技有限公司 版权所有。
 * 注意：以下内容均为北京灵汐科技有限公司原创，未经本公司允许，不得转载，否则将视为侵权；对于不遵守此声明或者其他违法使用以下内容者，本公司依法保留追究权。
 * © 2018 Lynxi Technologies Co., Ltd. All rights reserved.
 * NOTICE: All information contained here is, and remains the property of Lynxi.
 * This file can not be copied or distributed without the permission of Lynxi
 * Technologies Co., Ltd.
 *
 */

#pragma once

#include <lyn_api.h>
#include <lyn_blas.h>
#include <lyn_plugin.h>
#include <unistd.h>
#include <iostream>
#include "batchMem.hpp"
#include "blockQueue.hpp"
#include "boxInfoEncode.h"
#include "bufferPool.hpp"
#include "faceRecognitionParams.hpp"
#include "face_post_process.h"
#include "framePool.hpp"
#include "ipeParamFaceRecognition.h"
#include "nlohmann/json.hpp"
#include "opencvWindow.hpp"
#include "osd_plugin.h"
#include "statsInfo.h"
#include "sys/time.h"
#include "typeConv.hpp"
#include "util.hpp"
#include "videoDecoder.h"
#include "videoEncoder.h"

// 统计信息
#define FACE_RECOGNITION_STATS_FRAME_RATE 0
#define FACE_RECOGNITION_STATS_DECODE_TIME 1
#define FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME 2
#define FACE_RECOGNITION_STATS_DETECT_INFER_TIME 3
#define FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME 4
#define FACE_RECOGNITION_STATS_FEATURE_PREPROCESS_TIME 5
#define FACE_RECOGNITION_STATS_FEATURE_INFER_TIME 6
#define FACE_RECOGNITION_STATS_FEATURE_POSTPROCESS_TIME 7

// 设备context结构体，用于设备对应的相关资源存储
typedef struct {
  lynContext_t ctx;        // context 句柄
  const char *pModelPath;  // 模型路径
} DEVICE_CONTEXT_T;

struct FaceRecognitionChannelInfo {
  std::string inputPath;
  std::string outputPath;
  std::string channelName;
  std::string faceDetectModelPath;
  std::string faceDetectPostPluginPath;
  std::string faceFeatureModelPath;
  int deviceID;
  lynContext_t *context;
  InputType inputType;
  int maxFps;
};

namespace FaceAttributes {
struct FaceRect {
 private:
  size_t EnsureEven(size_t n) {
    if (n % 2 != 0) {
      return n + 1;
    }
    return n;
  }

 public:
  size_t x, y, w, h;

  void MakeEven() {
    x = EnsureEven(x);
    y = EnsureEven(y);
    w = EnsureEven(w);
    h = EnsureEven(h);
  }
};

class FeatureData {
 public:
  FeatureData() : numFeature(0), features(nullptr) {}

  FeatureData(int num)
      : features(std::shared_ptr<float>(new float[num],
                                        [](float *p) { delete[] p; })) {
    numFeature = num;
  }

  ~FeatureData() {}

  int numFeature;
  std::shared_ptr<float> features;

  static void getAffineTransform(const float *points_from,
                                 const float *points_to, int num_point,
                                 float *tm, float (*tmatrix)[3]) {
    float ma[4][4] = {{0.f}};
    float mb[4] = {0.f};
    float mm[4];

    for (int i = 0; i < num_point; i++) {
      ma[0][0] +=
          points_from[0] * points_from[0] + points_from[1] * points_from[1];
      ma[0][2] += points_from[0];
      ma[0][3] += points_from[1];

      mb[0] += points_from[0] * points_to[0] + points_from[1] * points_to[1];
      mb[1] += points_from[0] * points_to[1] - points_from[1] * points_to[0];
      mb[2] += points_to[0];
      mb[3] += points_to[1];

      points_from += 2;
      points_to += 2;
    }

    ma[1][1] = ma[0][0];
    ma[2][1] = ma[1][2] = -ma[0][3];
    ma[3][1] = ma[1][3] = ma[2][0] = ma[0][2];
    ma[2][2] = ma[3][3] = (float)num_point;
    ma[3][0] = ma[0][3];

    // MM = inv(A) * B
    // matrix 4x4 invert by https://github.com/willnode/N-Matrix-Programmer
    // suppose the user provide valid points combination
    // I have not taken det == zero into account here   :>  --- nihui
    float mai[4][4];
    float det;
    // clang-format off
    // *INDENT-OFF*
    {
        float A2323 = ma[2][2] * ma[3][3] - ma[2][3] * ma[3][2];
        float A1323 = ma[2][1] * ma[3][3] - ma[2][3] * ma[3][1];
        float A1223 = ma[2][1] * ma[3][2] - ma[2][2] * ma[3][1];
        float A0323 = ma[2][0] * ma[3][3] - ma[2][3] * ma[3][0];
        float A0223 = ma[2][0] * ma[3][2] - ma[2][2] * ma[3][0];
        float A0123 = ma[2][0] * ma[3][1] - ma[2][1] * ma[3][0];
        float A2313 = ma[1][2] * ma[3][3] - ma[1][3] * ma[3][2];
        float A1313 = ma[1][1] * ma[3][3] - ma[1][3] * ma[3][1];
        float A1213 = ma[1][1] * ma[3][2] - ma[1][2] * ma[3][1];
        float A2312 = ma[1][2] * ma[2][3] - ma[1][3] * ma[2][2];
        float A1312 = ma[1][1] * ma[2][3] - ma[1][3] * ma[2][1];
        float A1212 = ma[1][1] * ma[2][2] - ma[1][2] * ma[2][1];
        float A0313 = ma[1][0] * ma[3][3] - ma[1][3] * ma[3][0];
        float A0213 = ma[1][0] * ma[3][2] - ma[1][2] * ma[3][0];
        float A0312 = ma[1][0] * ma[2][3] - ma[1][3] * ma[2][0];
        float A0212 = ma[1][0] * ma[2][2] - ma[1][2] * ma[2][0];
        float A0113 = ma[1][0] * ma[3][1] - ma[1][1] * ma[3][0];
        float A0112 = ma[1][0] * ma[2][1] - ma[1][1] * ma[2][0];

        det = ma[0][0] * (ma[1][1] * A2323 - ma[1][2] * A1323 + ma[1][3] * A1223)
            - ma[0][1] * (ma[1][0] * A2323 - ma[1][2] * A0323 + ma[1][3] * A0223)
            + ma[0][2] * (ma[1][0] * A1323 - ma[1][1] * A0323 + ma[1][3] * A0123)
            - ma[0][3] * (ma[1][0] * A1223 - ma[1][1] * A0223 + ma[1][2] * A0123);

        det = 1.f / det;

        mai[0][0] =   (ma[1][1] * A2323 - ma[1][2] * A1323 + ma[1][3] * A1223);
        mai[0][1] = - (ma[0][1] * A2323 - ma[0][2] * A1323 + ma[0][3] * A1223);
        mai[0][2] =   (ma[0][1] * A2313 - ma[0][2] * A1313 + ma[0][3] * A1213);
        mai[0][3] = - (ma[0][1] * A2312 - ma[0][2] * A1312 + ma[0][3] * A1212);
        mai[1][0] = - (ma[1][0] * A2323 - ma[1][2] * A0323 + ma[1][3] * A0223);
        mai[1][1] =   (ma[0][0] * A2323 - ma[0][2] * A0323 + ma[0][3] * A0223);
        mai[1][2] = - (ma[0][0] * A2313 - ma[0][2] * A0313 + ma[0][3] * A0213);
        mai[1][3] =   (ma[0][0] * A2312 - ma[0][2] * A0312 + ma[0][3] * A0212);
        mai[2][0] =   (ma[1][0] * A1323 - ma[1][1] * A0323 + ma[1][3] * A0123);
        mai[2][1] = - (ma[0][0] * A1323 - ma[0][1] * A0323 + ma[0][3] * A0123);
        mai[2][2] =   (ma[0][0] * A1313 - ma[0][1] * A0313 + ma[0][3] * A0113);
        mai[2][3] = - (ma[0][0] * A1312 - ma[0][1] * A0312 + ma[0][3] * A0112);
        mai[3][0] = - (ma[1][0] * A1223 - ma[1][1] * A0223 + ma[1][2] * A0123);
        mai[3][1] =   (ma[0][0] * A1223 - ma[0][1] * A0223 + ma[0][2] * A0123);
        mai[3][2] = - (ma[0][0] * A1213 - ma[0][1] * A0213 + ma[0][2] * A0113);
        mai[3][3] =   (ma[0][0] * A1212 - ma[0][1] * A0212 + ma[0][2] * A0112);
    }
    // *INDENT-ON*
    // clang-format on

    mm[0] = det * (mai[0][0] * mb[0] + mai[0][1] * mb[1] + mai[0][2] * mb[2] +
                   mai[0][3] * mb[3]);
    mm[1] = det * (mai[1][0] * mb[0] + mai[1][1] * mb[1] + mai[1][2] * mb[2] +
                   mai[1][3] * mb[3]);
    mm[2] = det * (mai[2][0] * mb[0] + mai[2][1] * mb[1] + mai[2][2] * mb[2] +
                   mai[2][3] * mb[3]);
    mm[3] = det * (mai[3][0] * mb[0] + mai[3][1] * mb[1] + mai[3][2] * mb[2] +
                   mai[3][3] * mb[3]);

    tm[0] = tm[4] = mm[0];
    tm[1] = -mm[1];
    tm[3] = mm[1];
    tm[2] = mm[2];
    tm[5] = mm[3];

    tmatrix[0][0] = tmatrix[1][1] = mm[0];
    tmatrix[0][1] = -mm[1];
    tmatrix[1][0] = mm[1];
    tmatrix[0][2] = mm[2];
    tmatrix[1][2] = mm[3];
  }

  static void getTransformMat(const float (&landmark)[10], const FaceRect &rect,
                              float (&matrix)[2][3]) {
    const float point_landmark[10] = {
        // +8 是因为我们处理112*112的图
        30.2946f + 8.0f,   51.6963f, 65.5318f + 8.0f, 51.5014f,
        48.0252f + 8.0f,   71.7366f, 33.5493f + 8.0f, 92.3655f,
        62.729904f + 8.0f, 92.2041f,
    };
    const size_t NUM_LANDMARKS = 5;

    auto tmp_landmarks = make_shared<FeatureData>(NUM_LANDMARKS * 2);
    float *landmarksPtr = tmp_landmarks->features.get();
    for (size_t ll = 0; ll < NUM_LANDMARKS * 2; ll++) {
      landmarksPtr[ll] = landmark[ll];
    }

    std::vector<float> landmarks(NUM_LANDMARKS * 2);
    for (size_t i = 0; i < NUM_LANDMARKS; i++) {
      landmarks[2 * i] = landmarksPtr[2 * i] - rect.x;
      landmarks[2 * i + 1] = landmarksPtr[2 * i + 1] - rect.y;

      landmarks[2 * i] = landmarks[2 * i] < 0 ? 0 : landmarks[2 * i];
      landmarks[2 * i + 1] = landmarks[2 * i + 1] < 0 ? 0 : landmarks[2 * i + 1];
    }
    float tm[6];  // 用来对齐数据
    getAffineTransform(landmarks.data(), point_landmark, NUM_LANDMARKS, tm,
                       matrix);
  }
};
}  // namespace FaceAttributes

static FaceAttributes::FaceRect ExtendFaceBox(const FaceDetectBox &faceBox,
                                              size_t imagePixelW,
                                              size_t imagePixelH) {
  float w = faceBox.xmax - faceBox.xmin;
  float h = faceBox.ymax - faceBox.ymin;

  size_t xmin = max(int(faceBox.xmin), (int)0);
  size_t ymin = max(int(faceBox.ymin), (int)0);
  size_t xmax = min(int(faceBox.xmax), int(imagePixelW - 1));
  size_t ymax = min(int(faceBox.ymax), int(imagePixelH - 1));

  auto rect = FaceAttributes::FaceRect{
      xmin,
      ymin,
      xmax - xmin,
      ymax - ymin,
  };

  rect.MakeEven();
  return rect;
}

inline bool CheckFaceBox(const FaceDetectBox &faceBox) {
  float w = faceBox.xmax - faceBox.xmin;
  float h = faceBox.ymax - faceBox.ymin;

  if (w <= 20 || h <= 20) {
    return false;
  }

  for (int i = 0; i < 10; i++) {
    if (faceBox.landmark[i] < 0) {
      return false;
    }
  }

  return true;
}

struct FaceRecognitionChannel {
  std::string m_path = "";
  VideoDecoder m_decoder;
  BlockQueue<lynFrame_t *> m_queue;  //专门给显示用的队列
  FramePool *m_framePool;            //三种模式下公用的对象池
  lynContext_t *m_context;
  lynVdecOutInfo_t m_videoInfo;
  std::thread *m_thread;
  std::string m_channelName;
  StatsInfo *m_statsInfo;

  FaceRecognitionChannel() : m_queue(5), m_statsInfo(nullptr) {}

  ~FaceRecognitionChannel() {
    if (m_framePool) {
      CHECK_ERR(lynSetCurrentContext(*m_context));
      delete m_framePool;
    }
  }

  bool Init(FaceRecognitionChannelInfo &channelInfo) {
    m_channelName = channelInfo.channelName;
    m_context = channelInfo.context;
    map<uint32_t, std::string> face_recognition_Stats;
    face_recognition_Stats[FACE_RECOGNITION_STATS_FRAME_RATE] =
        "frame rate(fps)";
    if (argPrintStats != 0) {
      face_recognition_Stats[FACE_RECOGNITION_STATS_DECODE_TIME] =
          "decode cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME] =
          "detect preprocess cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_INFER_TIME] =
          "detect infer cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME] =
          "detect postprocess cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_FEATURE_PREPROCESS_TIME] =
          "feature preprocess cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_FEATURE_INFER_TIME] =
          "feature infer cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_FEATURE_POSTPROCESS_TIME] =
          "feature postprocess cost time(ms)";
    }
    m_statsInfo = new StatsInfo(m_channelName, face_recognition_Stats);
    m_thread =
        new std::thread(&FaceRecognitionChannel::ThreadFunc, this, channelInfo);
    return true;
  }

  void Close() {
    if (m_thread != nullptr) {
      m_thread->join();
      delete m_thread;
      m_thread = nullptr;
    }
    if (m_statsInfo) {
      m_statsInfo->StopPrint();
      delete m_statsInfo;
    }
  }

  void putFrame(lynFrame_t *frame) { m_framePool->Push(frame); }

  bool getFrame(lynFrame_t **frame, int timeout = 1000) {
    bool ret = m_queue.take(*frame, timeout);
    if (!ret) {
      // std::cout << "failed to take frame to video!" << std::endl;
    }
    return ret;
  }

  void getVideoInfo(lynVdecOutInfo_t &videoInfo) { videoInfo = m_videoInfo; }

  void ThreadFunc(FaceRecognitionChannelInfo channelInfo) {
    CHECK_ERR(lynSetCurrentContext(*channelInfo.context));

    ifstream faces_config_file(argFaceLibConfig);
    if (!faces_config_file.is_open()) {
      const string err = "cannot open facelib config file: " + argFaceLibConfig;
      throw invalid_argument(err);
    }
    auto faces_config = nlohmann::json::parse(faces_config_file);
    vector<string> face_lib_labels =
        faces_config["names"].get<vector<string>>();
    const string features_path = faces_config["features"].get<string>();
    ifstream features_bin(features_path);
    if (!features_bin.is_open()) {
      const string err = "cannot open facelib bin file: " + features_path;
      throw invalid_argument(err);
    }

    // 1. 加载模型
    CHECK_ERR(lynCosineInit());

    ModelInfo faceDetectModelInfo;
    faceDetectModelInfo.LoadModelByPath(
        channelInfo.faceDetectModelPath.c_str());
    ModelInfo faceFeatureModelInfo;
    faceFeatureModelInfo.LoadModelByPath(
        channelInfo.faceFeatureModelPath.c_str());

    // 读取人脸库信息
    uint8_t *face_lib_features_host =
        new uint8_t[faceFeatureModelInfo.outputSize * face_lib_labels.size()];
    void *face_lib_features = nullptr;
    CHECK_ERR(
        lynMalloc((void **)&face_lib_features,
                  faceFeatureModelInfo.outputSize * face_lib_labels.size()));
    features_bin.read((char *)face_lib_features_host,
                      faceFeatureModelInfo.outputSize * face_lib_labels.size());
    CHECK_ERR(
        lynMemcpy(face_lib_features, face_lib_features_host,
                  faceFeatureModelInfo.outputSize * face_lib_labels.size(),
                  ClientToServer));
    delete[] face_lib_features_host;

    // 2. 根据传入的ShowType，生成输出文件路径，并删除上次运行输出文件
    std::string outPath = channelInfo.outputPath;
    if (argShowType == ShowType::SaveFile) {
      outPath += ".264";
      std::remove(outPath.c_str());
    } else if (argShowType == ShowType::OnlyApu) {
      outPath += "_BoxInfo.json";
      std::remove(outPath.c_str());
    }

    // 3. 初始化视频解码类，并获取输出信息
    VideoDecoder videoDecoder;
    videoDecoder.Init(channelInfo.inputPath, channelInfo.inputType,
                      channelInfo.maxFps);
    lynVdecOutInfo_t vdecOutInfo;
    videoDecoder.GetVdecOutInfo(&vdecOutInfo);
    m_videoInfo = vdecOutInfo;
    uint32_t nVdecOutSize = vdecOutInfo.predictBufSize;

    // 4. 创建 stream 与 event
    lynStream_t ipeFaceDetectStream = nullptr;
    lynStream_t apuFaceDetectSendStream = nullptr;
    lynStream_t apuFaceDetectRecvStream = nullptr;
    lynStream_t postFaceDetectStream = nullptr;
    CHECK_ERR(lynCreateStream(&ipeFaceDetectStream));
    CHECK_ERR(lynCreateStream(&apuFaceDetectSendStream));
    CHECK_ERR(lynCreateStream(&apuFaceDetectRecvStream));
    CHECK_ERR(lynCreateStream(&postFaceDetectStream));

    lynEvent_t ipeFaceDetectEvent = nullptr;
    lynEvent_t apuFaceDetectEvent = nullptr;
    CHECK_ERR(lynCreateEvent(&ipeFaceDetectEvent));
    CHECK_ERR(lynCreateEvent(&apuFaceDetectEvent));

    lynStream_t ipeFaceFeatureStream = nullptr;
    lynStream_t apuFaceFeatureSendStream = nullptr;
    lynStream_t apuFaceFeatureRecvStream = nullptr;
    lynStream_t postFaceFeatureStream = nullptr;
    CHECK_ERR(lynCreateStream(&ipeFaceFeatureStream));
    CHECK_ERR(lynCreateStream(&apuFaceFeatureSendStream));
    CHECK_ERR(lynCreateStream(&apuFaceFeatureRecvStream));
    CHECK_ERR(lynCreateStream(&postFaceFeatureStream));

    lynEvent_t ipeFaceFeatureEvent = nullptr;
    lynEvent_t apuFaceFeatureEvent = nullptr;
    CHECK_ERR(lynCreateEvent(&ipeFaceFeatureEvent));
    CHECK_ERR(lynCreateEvent(&apuFaceFeatureEvent));

    lynStream_t osdStream = nullptr;
    CHECK_ERR(lynCreateStream(&osdStream));

    // 5. 创建各个资源池，避免重复申请与释放
    void *apuFaceDetectBuffer;
    BufferPool apuFaceDetectBufferPool(
        faceDetectModelInfo.outputSize * faceDetectModelInfo.batchSize, 5);
    BatchMem *pIpeFaceDetectBuf = nullptr;
    BatchMemPool oIpeFaceDetectBufPool(false, faceDetectModelInfo.inputSize,
                                       faceDetectModelInfo.batchSize, 5);
    BlockQueue<lynFrame_t *> blockQueue(5);
    m_framePool = new FramePool(nVdecOutSize, 5);
    FramePool vencRecvFramePool(nVdecOutSize, 5);

    // 6. 开启解码线程
    videoDecoder.Start(*channelInfo.context, std::ref(blockQueue),
                       std::ref(*m_framePool));

    // 7. 加载 Plugin, 并设置后处理参数
    lynPlugin_t faceDetectPostPlugin;
    CHECK_ERR(lynPluginRegister(&faceDetectPostPlugin,
                                channelInfo.faceDetectPostPluginPath.c_str()));

    lynPlugin_t osdPlugin;
    if (argShowType != OnlyApu) {
      CHECK_ERR(lynPluginRegister(&osdPlugin, argOsdPluginPath.c_str()));
    }

    FacePostProcessInfo_t post_info;
    memset(&post_info, 0, sizeof(FacePostProcessInfo_t));
    post_info.score_threshold = 0.25;
    post_info.nms_threshold = 0.45;
    post_info.width = faceDetectModelInfo.width;
    post_info.height = faceDetectModelInfo.height;
    post_info.ori_width = vdecOutInfo.width;
    post_info.ori_height = vdecOutInfo.height;
    FaceDetectInfo *pDevDetectInfo;
    CHECK_ERR(lynMalloc((void **)&pDevDetectInfo, sizeof(FaceDetectInfo)));
    post_info.detectInfo = (FaceDetectInfo *)pDevDetectInfo;

    // 8. 初始化 IPE 处理类
    IpeParamScrfd ipeScrfd(faceDetectModelInfo.width,
                           faceDetectModelInfo.height);
    ipeScrfd.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                        LYN_PIX_FMT_NV12);

    // 9. 根据传入的 ShowType 决定是使用显示窗口类, 视频编码类，BoxInfo 编码类
    VideoEncoder *videoEncoder = nullptr;
    BoxInfoEncode *boxInfoEncode = nullptr;
    if (argShowType == ShowType::SaveFile) {
      videoEncoder = new VideoEncoder;
      videoEncoder->Init(vdecOutInfo, outPath, m_framePool, &vencRecvFramePool);
    } else if (argShowType == ShowType::DirectShow) {
      // OpencvWindowManager::GetInstance()->SetWindowSize(strWindowName,
      // vdecOutInfo.height,
      //                                                   vdecOutInfo.width);
    } else if (argShowType == ShowType::OnlyApu) {
      boxInfoEncode = new BoxInfoEncode;
      boxInfoEncode->Init(outPath);
    }

    lynBoxesInfo *pDevBoxesInfo;
    CHECK_ERR(lynMalloc((void **)&pDevBoxesInfo, sizeof(lynBoxesInfo)));
    CHECK_ERR(lynMemset(pDevBoxesInfo, 0, sizeof(lynBoxesInfo)));

    bool bEos = false;
    int iBatchIndex = 0;
    bool resetEncoder = false;
    m_statsInfo->StartPrint();
    while (!bEos) {
      // 10. 取出一个解码 Frame
      lynFrame_t *pFrameTmp;
      int ret = blockQueue.take(pFrameTmp);
      if (!ret) {
        continue;
      }

      bEos = pFrameTmp->eos;

      if (!bEos) {
        m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DECODE_TIME, 1,
                                 (uint64_t)pFrameTmp->userPtr);
      }
      //分辨率发生变化，更新ipe参数
      if (bEos && videoDecoder.m_resolutionchange) {
        if (argShowType == ShowType::SaveFile) {
          videoEncoder->EncodeImage(pFrameTmp);
          videoEncoder->UnInit();
          resetEncoder = true;
        } else {
          m_framePool->Push(pFrameTmp);
        }
        bEos = false;
        videoDecoder.m_resolutionchange = false;
        continue;  //
      }

      videoDecoder.GetVdecOutInfo(&vdecOutInfo);
      ipeScrfd.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                          LYN_PIX_FMT_NV12);
      m_videoInfo = vdecOutInfo;

      if (resetEncoder) {
        vencRecvFramePool.ResetSize(vdecOutInfo.predictBufSize);
        videoEncoder->Init(vdecOutInfo, outPath, m_framePool,
                           &vencRecvFramePool);
        resetEncoder = false;
      }

      if (iBatchIndex == 0) {
        pIpeFaceDetectBuf = oIpeFaceDetectBufPool.GetBatchMem();
      }

      // 11. IPE 处理
      lynEvent_t ipeFaceDetectBeginEvt =
          getDeviceTimePoint(ipeFaceDetectStream);
      ipeScrfd.CalcParam(ipeFaceDetectStream, pFrameTmp->data,
                         pIpeFaceDetectBuf->GetElement());
      lynEvent_t ipeFaceDetectEndEvt = getDeviceTimePoint(ipeFaceDetectStream);

      auto ipeFaceDetectCallback = [this, ipeFaceDetectBeginEvt,
                                    ipeFaceDetectEndEvt]() {
        float elapsedTime =
            getEventElapsedTime(ipeFaceDetectBeginEvt, ipeFaceDetectEndEvt);
        m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME,
                                 1, static_cast<uint64_t>(elapsedTime * 1000));
        CHECK_ERR(lynDestroyEvent(ipeFaceDetectBeginEvt));
        CHECK_ERR(lynDestroyEvent(ipeFaceDetectEndEvt));
      };
      SetCallback(ipeFaceDetectStream, ipeFaceDetectCallback);

      ++iBatchIndex;

      // 12. IPE输出满了或是最后一个包，进行apu处理
      if (iBatchIndex == faceDetectModelInfo.batchSize || bEos) {
        CHECK_ERR(lynRecordEvent(ipeFaceDetectStream, ipeFaceDetectEvent));
        CHECK_ERR(
            lynStreamWaitEvent(apuFaceDetectSendStream, ipeFaceDetectEvent));
        apuFaceDetectBuffer = apuFaceDetectBufferPool.Pop();
        // 13. 调用 APU 推理接口
        CHECK_ERR(lynModelSendInputAsync(
            apuFaceDetectSendStream, faceDetectModelInfo.model,
            pIpeFaceDetectBuf->Buffer(), apuFaceDetectBuffer,
            faceDetectModelInfo.batchSize));

        lynEvent_t apuFaceDetectBeginEvt =
            getDeviceTimePoint(apuFaceDetectRecvStream);
        CHECK_ERR(lynModelRecvOutputAsync(apuFaceDetectRecvStream,
                                          faceDetectModelInfo.model));
        lynEvent_t apuFaceDetectEndEvt =
            getDeviceTimePoint(apuFaceDetectRecvStream);

        auto inferFaceDetectCallback = [this, apuFaceDetectBeginEvt,
                                        apuFaceDetectEndEvt, iBatchIndex]() {
          float elapsedTime =
              getEventElapsedTime(apuFaceDetectBeginEvt, apuFaceDetectEndEvt);
          m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DETECT_INFER_TIME,
                                   iBatchIndex,
                                   static_cast<uint64_t>(elapsedTime * 1000));
          CHECK_ERR(lynDestroyEvent(apuFaceDetectBeginEvt));
          CHECK_ERR(lynDestroyEvent(apuFaceDetectEndEvt));
        };
        SetCallback(apuFaceDetectRecvStream, inferFaceDetectCallback);

        // 14. 在 APU 推理完之后，调用 Plugin 进行后处理
        CHECK_ERR(lynRecordEvent(apuFaceDetectRecvStream, apuFaceDetectEvent));
        CHECK_ERR(lynStreamWaitEvent(postFaceDetectStream, apuFaceDetectEvent));

        post_info.output_tensor = apuFaceDetectBuffer;
        lynEvent_t postFaceDetectBeginEvt =
            getDeviceTimePoint(postFaceDetectStream);
        CHECK_ERR(lynPluginRunAsync(postFaceDetectStream, faceDetectPostPlugin,
                                    "lynFacePostProcess", &post_info,
                                    sizeof(post_info)));
        lynEvent_t postFaceDetectEndEvt =
            getDeviceTimePoint(postFaceDetectStream);

        auto postFaceDetectCallback =
            [pIpeFaceDetectBuf, &oIpeFaceDetectBufPool, apuFaceDetectBuffer,
             &apuFaceDetectBufferPool, this, postFaceDetectBeginEvt,
             postFaceDetectEndEvt]() {
              float elapsedTime = getEventElapsedTime(postFaceDetectBeginEvt,
                                                      postFaceDetectEndEvt);
              m_statsInfo->UpdateStats(
                  FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME, 1,
                  static_cast<uint64_t>(elapsedTime * 1000));
              apuFaceDetectBufferPool.Push(apuFaceDetectBuffer);
              oIpeFaceDetectBufPool.PutBatchMem(pIpeFaceDetectBuf);
              CHECK_ERR(lynDestroyEvent(postFaceDetectBeginEvt));
              CHECK_ERR(lynDestroyEvent(postFaceDetectEndEvt));
            };
        SetCallback(postFaceDetectStream, postFaceDetectCallback);

        FaceDetectInfo *pHostDetectInfo = new FaceDetectInfo;
        CHECK_ERR(lynMemcpyAsync(postFaceDetectStream, pHostDetectInfo,
                                 pDevDetectInfo, sizeof(FaceDetectInfo),
                                 ServerToClient));

        CHECK_ERR(lynSynchronizeStream(postFaceDetectStream));

        lynBoxesInfo *pHostBoxesInfo = new lynBoxesInfo;
        memset(pHostBoxesInfo, 0, sizeof(lynBoxesInfo));

        // 分析人脸特征
        void *pDevFeatureApuBuf = nullptr;
        vector<int> faceIndexVec;
        if (pHostDetectInfo->boxesNum) {
          CHECK_ERR(lynMalloc(
              (void **)&pDevFeatureApuBuf,
              faceFeatureModelInfo.outputSize * pHostDetectInfo->boxesNum));
        }
        pHostBoxesInfo->boxesNum = pHostDetectInfo->boxesNum;

        for (int faceIndex = 0; faceIndex < pHostDetectInfo->boxesNum;
             faceIndex++) {
          const auto box = pHostDetectInfo->boxes[faceIndex];
          auto faceRect =
              ExtendFaceBox(box, vdecOutInfo.width, vdecOutInfo.height);
          pHostBoxesInfo->boxes[faceIndex].xmin = faceRect.x;
          pHostBoxesInfo->boxes[faceIndex].ymin = faceRect.y;
          pHostBoxesInfo->boxes[faceIndex].xmax = faceRect.x + faceRect.w;
          pHostBoxesInfo->boxes[faceIndex].ymax = faceRect.y + faceRect.h;
          strcpy(pHostBoxesInfo->boxes[faceIndex].label, "unknown");

          if (CheckFaceBox(box)) {
            lynEvent_t ipeFaceFeatureBeginEvt =
                getDeviceTimePoint(ipeFaceFeatureStream);
            // 人脸裁剪
            IpeParamCrop *ipeCrop = new IpeParamCrop(faceRect.x, faceRect.y,
                                                     faceRect.w, faceRect.h);
            uint8_t *pFaceCropIpeBufOut = nullptr;
            CHECK_ERR(lynMalloc((void **)&pFaceCropIpeBufOut,
                                faceRect.w * faceRect.h * 3));
            ipeCrop->SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                                LYN_PIX_FMT_NV12);
            ipeCrop->CalcParam(ipeFaceFeatureStream, pFrameTmp->data,
                               pFaceCropIpeBufOut);

            // 特征模型预处理
            float transformMat[2][3];
            FaceAttributes::FeatureData::getTransformMat(box.landmark, faceRect,
                                                         transformMat);

            float (*pMat)[3];
            pMat = new float[2][3];
            memcpy(pMat, transformMat, sizeof(transformMat));
            IpeParamAffine *ipeAffine = new IpeParamAffine(
                faceFeatureModelInfo.width, faceFeatureModelInfo.height,
                pMat, 0, 0, 0);
            uint8_t *pFeatureIpeBuf = nullptr;
            CHECK_ERR(lynMalloc((void **)&pFeatureIpeBuf,
                                faceFeatureModelInfo.inputSize));
            ipeAffine->SetImgInfo(faceRect.w, faceRect.h, LYN_PIX_FMT_RGB24);
            try {
              ipeAffine->CalcParam(ipeFaceFeatureStream, pFaceCropIpeBufOut,
                                   pFeatureIpeBuf);
            } catch (const std::runtime_error &e) {
              cout << "cannot affine face" << endl;
              CHECK_ERR(lynDestroyEvent(ipeFaceFeatureBeginEvt));
              CHECK_ERR(lynFree(pFaceCropIpeBufOut));
              CHECK_ERR(lynFree(pFeatureIpeBuf));
              delete ipeCrop;
              delete ipeAffine;
              delete[] pMat;
              continue;
            }
            lynEvent_t ipeFaceFeatureEndEvt =
                getDeviceTimePoint(ipeFaceFeatureStream);
            auto ipeFaceFeatureCallback = [this, ipeCrop, pFaceCropIpeBufOut,
                                           ipeAffine, pMat, ipeFaceFeatureBeginEvt,
                                           ipeFaceFeatureEndEvt]() {
              float elapsedTime = getEventElapsedTime(ipeFaceFeatureBeginEvt,
                                                      ipeFaceFeatureEndEvt);
              m_statsInfo->UpdateStats(
                  FACE_RECOGNITION_STATS_FEATURE_PREPROCESS_TIME, 1,
                  static_cast<uint64_t>(elapsedTime * 1000));
              CHECK_ERR(lynDestroyEvent(ipeFaceFeatureBeginEvt));
              CHECK_ERR(lynDestroyEvent(ipeFaceFeatureEndEvt));
              CHECK_ERR(lynFree(pFaceCropIpeBufOut));
              delete ipeCrop;
              delete ipeAffine;
              delete[] pMat;
            };
            SetCallback(ipeFaceFeatureStream, ipeFaceFeatureCallback);

            CHECK_ERR(
                lynRecordEvent(ipeFaceFeatureStream, ipeFaceFeatureEvent));
            CHECK_ERR(lynStreamWaitEvent(apuFaceFeatureSendStream,
                                         ipeFaceFeatureEvent));

            CHECK_ERR(lynModelSendInputAsync(
                apuFaceFeatureSendStream, faceFeatureModelInfo.model,
                pFeatureIpeBuf,
                (uint8_t *)pDevFeatureApuBuf +
                    faceFeatureModelInfo.outputSize * faceIndexVec.size(),
                1));

            lynEvent_t apuFaceFeatureBeginEvt =
                getDeviceTimePoint(apuFaceFeatureRecvStream);
            CHECK_ERR(lynModelRecvOutputAsync(apuFaceFeatureRecvStream,
                                              faceFeatureModelInfo.model));
            lynEvent_t apuFaceFeatureEndEvt =
                getDeviceTimePoint(apuFaceFeatureRecvStream);

            faceIndexVec.push_back(faceIndex);
            SetCallback(apuFaceFeatureRecvStream,
                        [this, pFeatureIpeBuf, apuFaceFeatureBeginEvt,
                         apuFaceFeatureEndEvt] {
                          float elapsedTime = getEventElapsedTime(
                              apuFaceFeatureBeginEvt, apuFaceFeatureEndEvt);
                          m_statsInfo->UpdateStats(
                              FACE_RECOGNITION_STATS_FEATURE_INFER_TIME, 1,
                              static_cast<uint64_t>(elapsedTime * 1000));
                          CHECK_ERR(lynDestroyEvent(apuFaceFeatureBeginEvt));
                          CHECK_ERR(lynDestroyEvent(apuFaceFeatureEndEvt));

                          CHECK_ERR(lynFree(pFeatureIpeBuf));
                        });
          }
        }

        CHECK_ERR(
            lynRecordEvent(apuFaceFeatureRecvStream, apuFaceFeatureEvent));
        CHECK_ERR(
            lynStreamWaitEvent(postFaceFeatureStream, apuFaceFeatureEvent));

        // 人脸比对
        lynEvent_t postFaceFeatureBeginEvt =
            getDeviceTimePoint(postFaceFeatureStream);
        if (faceIndexVec.size() > 0) {
          void *pDevCosineBuf;
          CHECK_ERR(lynMalloc(
              (void **)&pDevCosineBuf,
              faceIndexVec.size() * (sizeof(int16_t) + sizeof(float))));
          CHECK_ERR(lynExecuteCosineAsync(
              postFaceFeatureStream, LYN_TRANS, faceIndexVec.size(),
              face_lib_labels.size(), 1, pDevFeatureApuBuf, DT_FLOAT16,
              face_lib_features, DT_FLOAT16, pDevCosineBuf));
          uint8_t *pHostCosineBuf =
              new uint8_t[faceIndexVec.size() *
                          (sizeof(int16_t) + sizeof(float))];
          CHECK_ERR(lynMemcpyAsync(
              postFaceFeatureStream, pHostCosineBuf, pDevCosineBuf,
              faceIndexVec.size() * (sizeof(int16_t) + sizeof(float)),
              ServerToClient));
          SetCallback(postFaceFeatureStream, [pDevCosineBuf, pHostCosineBuf,
                                              faceIndexVec, pHostDetectInfo,
                                              pDevFeatureApuBuf, pHostBoxesInfo,
                                              face_lib_labels] {
            int faceCount = faceIndexVec.size();
            for (int i = 0; i < faceCount; i++) {
              auto face_index = faceIndexVec[i];
              auto idx =
                  *(float *)(pHostCosineBuf + faceCount * sizeof(int16_t) +
                             i * sizeof(float));
              auto score = COMMON::half2float(
                  *(int16_t *)(pHostCosineBuf + i * sizeof(int16_t)));
              if (idx >= 0 && idx < face_lib_labels.size()) {
                if (score > 5) {
                  auto name = face_lib_labels[idx];
                  strcpy(pHostBoxesInfo->boxes[face_index].label, name.c_str());
                }
              }
            }
            CHECK_ERR(lynFree(pDevCosineBuf));
            CHECK_ERR(lynFree(pDevFeatureApuBuf));
            delete[] pHostCosineBuf;
            delete pHostDetectInfo;
          });
        } else {
          CHECK_ERR(lynFree(pDevFeatureApuBuf));
        }
        lynEvent_t postFaceFeatureEndEvt =
            getDeviceTimePoint(postFaceFeatureStream);
        SetCallback(postFaceFeatureStream, [osdStream, osdPlugin, vdecOutInfo,
                                            pHostBoxesInfo, pDevBoxesInfo,
                                            pFrameTmp, bEos, videoEncoder,
                                            boxInfoEncode, this,
                                            postFaceFeatureBeginEvt,
                                            postFaceFeatureEndEvt] {
          float elapsedTime = getEventElapsedTime(postFaceFeatureBeginEvt,
                                                  postFaceFeatureEndEvt);
          m_statsInfo->UpdateStats(
              FACE_RECOGNITION_STATS_FEATURE_POSTPROCESS_TIME, 1,
              static_cast<uint64_t>(elapsedTime * 1000));
          m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_FRAME_RATE, 1, 0);
          CHECK_ERR(lynDestroyEvent(postFaceFeatureBeginEvt));
          CHECK_ERR(lynDestroyEvent(postFaceFeatureEndEvt));

          // 选择 OnlyApu 时，不进行画框处理
          if (argShowType != ShowType::OnlyApu) {
            CHECK_ERR(lynMemcpyAsync(osdStream, pDevBoxesInfo, pHostBoxesInfo,
                                     sizeof(lynBoxesInfo), ClientToServer));
            lynDrawBoxAndTextPara para;
            para.imgData = pFrameTmp->data;
            para.imgFmt = LYN_PIX_FMT_NV12;
            para.imgW = vdecOutInfo.width;
            para.imgH = vdecOutInfo.height;
            para.boxesInfo = pDevBoxesInfo;
            para.boxColor = DRAW_COLOR_BLUE;
            para.boxThick = DRAW_THICK_2;
            para.fontSize = FONT_SIZE_24;
            para.fontColor = DRAW_COLOR_BLUE;
            CHECK_ERR(lynPluginRunAsync(osdStream, osdPlugin,
                                        "lynDrawBoxAndText", &para,
                                        sizeof(para)));
          }
          // 15. 根据传入的 ShowType 决定是进行视频编码还是直接显示
          if (argShowType == ShowType::SaveFile) {
            // 视频编码
            pFrameTmp->eos = bEos;
            videoEncoder->WaitForStream(osdStream);
            videoEncoder->EncodeImage(pFrameTmp);
            auto freeCallback = [pHostBoxesInfo]() { delete pHostBoxesInfo; };
            SetCallback(osdStream, freeCallback);
          } else if (argShowType == ShowType::DirectShow) {
            auto showCallback = [this, pFrameTmp, pHostBoxesInfo]() {
              m_queue.put(pFrameTmp);
              delete pHostBoxesInfo;
            };
            SetCallback(osdStream, showCallback);
          } else if (argShowType == ShowType::OnlyApu) {
            auto saveCallback = [this, pFrameTmp, pHostBoxesInfo,
                                 boxInfoEncode]() {
              boxInfoEncode->EncodeToJson(*pHostBoxesInfo, pFrameTmp->eos);
              delete pHostBoxesInfo;
              m_framePool->Push(pFrameTmp);
            };
            SetCallback(osdStream, saveCallback);
          }
        });

        iBatchIndex = 0;
      }
    }

    m_statsInfo->StopPrint();
    // 16. 等待流中事件处理完，并销毁资源
    CHECK_ERR(lynSynchronizeStream(ipeFaceDetectStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceDetectSendStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceDetectRecvStream));
    CHECK_ERR(lynSynchronizeStream(postFaceDetectStream));
    CHECK_ERR(lynSynchronizeStream(ipeFaceFeatureStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceFeatureSendStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceFeatureRecvStream));
    CHECK_ERR(lynSynchronizeStream(postFaceFeatureStream));
    CHECK_ERR(lynDestroyStream(ipeFaceDetectStream));
    CHECK_ERR(lynDestroyStream(apuFaceDetectSendStream));
    CHECK_ERR(lynDestroyStream(apuFaceDetectRecvStream));
    CHECK_ERR(lynDestroyStream(postFaceDetectStream));
    CHECK_ERR(lynDestroyStream(ipeFaceFeatureStream));
    CHECK_ERR(lynDestroyStream(apuFaceFeatureSendStream));
    CHECK_ERR(lynDestroyStream(apuFaceFeatureRecvStream));
    CHECK_ERR(lynDestroyStream(postFaceFeatureStream));
    CHECK_ERR(lynDestroyEvent(ipeFaceDetectEvent));
    CHECK_ERR(lynDestroyEvent(apuFaceDetectEvent));
    CHECK_ERR(lynDestroyEvent(ipeFaceFeatureEvent));
    CHECK_ERR(lynDestroyEvent(apuFaceFeatureEvent));
    CHECK_ERR(lynPluginUnregister(faceDetectPostPlugin));  // Unregister plugin
    if (argShowType != OnlyApu) {
      CHECK_ERR(lynPluginUnregister(osdPlugin));             // Unregister plugin
    }
    CHECK_ERR(lynFree(pDevBoxesInfo));
    CHECK_ERR(lynFree(pDevDetectInfo));
    CHECK_ERR(lynFree(face_lib_features));

    if (argShowType == ShowType::DirectShow) {
      // OpencvWindowManager::GetInstance()->Close(strWindowName);
    } else if (argShowType == ShowType::SaveFile) {
      videoEncoder->UnInit();
      delete videoEncoder;
      videoEncoder = nullptr;
    } else if (argShowType == ShowType::OnlyApu) {
      boxInfoEncode->UnInit();
      delete boxInfoEncode;
      boxInfoEncode = nullptr;
    }
    videoDecoder.Stop();
    videoDecoder.UnInit();
    faceDetectModelInfo.UnLoadModel();
    faceFeatureModelInfo.UnLoadModel();
    CHECK_ERR(lynCosineUninit());
  }
};
