/**
 * @file FaceDetectionChannel.hpp
 * @author SDK_TEAM
 * @brief
 * @version 0.1
 * @date 2022-12-01
 *
 * Copyright:
 * © 2018 北京灵汐科技有限公司 版权所有。
 * 注意：以下内容均为北京灵汐科技有限公司原创，未经本公司允许，不得转载，否则将视为侵权；对于不遵守此声明或者其他违法使用以下内容者，本公司依法保留追究权。
 * © 2018 Lynxi Technologies Co., Ltd. All rights reserved.
 * NOTICE: All information contained here is, and remains the property of Lynxi.
 * This file can not be copied or distributed without the permission of Lynxi
 * Technologies Co., Ltd.
 *
 */

#pragma once

#include <lyn_api.h>
#include <lyn_blas.h>
#include <lyn_plugin.h>
#include <unistd.h>
#include <iostream>
#include "batchMem.hpp"
#include "blockQueue.hpp"
#include "boxInfoEncode.h"
#include "bufferPool.hpp"
#include "faceDetectionParams.hpp"
#include "face_post_process.h"
#include "framePool.hpp"
#include "ipeParamEx.h"
#include "nlohmann/json.hpp"
#include "opencvWindow.hpp"
#include "osd_plugin.h"
#include "statsInfo.h"
#include "sys/time.h"
#include "typeConv.hpp"
#include "util.hpp"
#include "videoDecoder.h"
#include "videoEncoder.h"

// 统计信息
#define FACE_RECOGNITION_STATS_FRAME_RATE 0
#define FACE_RECOGNITION_STATS_DECODE_TIME 1
#define FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME 2
#define FACE_RECOGNITION_STATS_DETECT_INFER_TIME 3
#define FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME 4

// 设备context结构体，用于设备对应的相关资源存储
typedef struct {
  lynContext_t ctx;        // context 句柄
  const char *pModelPath;  // 模型路径
} DEVICE_CONTEXT_T;

struct FaceDetectionChannelInfo {
  std::string inputPath;
  std::string outputPath;
  std::string channelName;
  std::string faceDetectModelPath;
  std::string faceDetectPostPluginPath;
  int deviceID;
  lynContext_t *context;
  InputType inputType;
  int maxFps;
};

struct FaceDetectionChannel {
  std::string m_path = "";
  VideoDecoder m_decoder;
  BlockQueue<lynFrame_t *> m_queue;  //专门给显示用的队列
  FramePool *m_framePool;            //三种模式下公用的对象池
  lynContext_t *m_context;
  lynVdecOutInfo_t m_videoInfo;
  std::thread *m_thread;
  std::string m_channelName;
  StatsInfo *m_statsInfo;

  FaceDetectionChannel() : m_queue(5), m_statsInfo(nullptr) {}

  ~FaceDetectionChannel() {
    if (m_framePool) {
      CHECK_ERR(lynSetCurrentContext(*m_context));
      delete m_framePool;
    }
  }

  bool Init(FaceDetectionChannelInfo &channelInfo) {
    m_channelName = channelInfo.channelName;
    m_context = channelInfo.context;
    map<uint32_t, std::string> face_recognition_Stats;
    face_recognition_Stats[FACE_RECOGNITION_STATS_FRAME_RATE] =
        "frame rate(fps)";
    if (argPrintStats != 0) {
      face_recognition_Stats[FACE_RECOGNITION_STATS_DECODE_TIME] =
          "decode cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME] =
          "detect preprocess cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_INFER_TIME] =
          "detect infer cost time(ms)";
      face_recognition_Stats[FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME] =
          "detect postprocess cost time(ms)";
    }
    m_statsInfo = new StatsInfo(m_channelName, face_recognition_Stats);
    m_thread =
        new std::thread(&FaceDetectionChannel::ThreadFunc, this, channelInfo);
    return true;
  }

  void Close() {
    if (m_thread != nullptr) {
      m_thread->join();
      delete m_thread;
      m_thread = nullptr;
    }
    if (m_statsInfo) {
      m_statsInfo->StopPrint();
      delete m_statsInfo;
    }
  }

  void putFrame(lynFrame_t *frame) { m_framePool->Push(frame); }

  bool getFrame(lynFrame_t **frame, int timeout = 1000) {
    bool ret = m_queue.take(*frame, timeout);
    if (!ret) {
      // std::cout << "failed to take frame to video!" << std::endl;
    }
    return ret;
  }

  void getVideoInfo(lynVdecOutInfo_t &videoInfo) { videoInfo = m_videoInfo; }

  void ThreadFunc(FaceDetectionChannelInfo channelInfo) {
    CHECK_ERR(lynSetCurrentContext(*channelInfo.context));

    // 1. 加载模型
    ModelInfo faceDetectModelInfo;
    faceDetectModelInfo.LoadModelByPath(
        channelInfo.faceDetectModelPath.c_str());

    // 2. 根据传入的ShowType，生成输出文件路径，并删除上次运行输出文件
    std::string outPath = channelInfo.outputPath;
    if (argShowType == ShowType::SaveFile) {
      outPath += ".264";
      std::remove(outPath.c_str());
    } else if (argShowType == ShowType::OnlyApu) {
      outPath += "_BoxInfo.json";
      std::remove(outPath.c_str());
    }

    // 3. 初始化视频解码类，并获取输出信息
    VideoDecoder videoDecoder;
    videoDecoder.Init(channelInfo.inputPath, channelInfo.inputType,
                      channelInfo.maxFps);
    lynVdecOutInfo_t vdecOutInfo;
    videoDecoder.GetVdecOutInfo(&vdecOutInfo);
    m_videoInfo = vdecOutInfo;
    uint32_t nVdecOutSize = vdecOutInfo.predictBufSize;

    // 4. 创建 stream 与 event
    lynStream_t ipeFaceDetectStream = nullptr;
    lynStream_t apuFaceDetectSendStream = nullptr;
    lynStream_t apuFaceDetectRecvStream = nullptr;
    lynStream_t postFaceDetectStream = nullptr;
    CHECK_ERR(lynCreateStream(&ipeFaceDetectStream));
    CHECK_ERR(lynCreateStream(&apuFaceDetectSendStream));
    CHECK_ERR(lynCreateStream(&apuFaceDetectRecvStream));
    CHECK_ERR(lynCreateStream(&postFaceDetectStream));

    lynEvent_t ipeFaceDetectEvent = nullptr;
    lynEvent_t apuFaceDetectEvent = nullptr;
    CHECK_ERR(lynCreateEvent(&ipeFaceDetectEvent));
    CHECK_ERR(lynCreateEvent(&apuFaceDetectEvent));

    lynStream_t osdStream = nullptr;
    CHECK_ERR(lynCreateStream(&osdStream));

    // 5. 创建各个资源池，避免重复申请与释放
    void *apuFaceDetectBuffer;
    BufferPool apuFaceDetectBufferPool(
        faceDetectModelInfo.outputSize * faceDetectModelInfo.batchSize, 5);
    BatchMem *pIpeFaceDetectBuf = nullptr;
    BatchMemPool oIpeFaceDetectBufPool(false, faceDetectModelInfo.inputSize,
                                       faceDetectModelInfo.batchSize, 5);
    BlockQueue<lynFrame_t *> blockQueue(5);
    m_framePool = new FramePool(nVdecOutSize, 5);
    FramePool vencRecvFramePool(nVdecOutSize, 5);

    // 6. 开启解码线程
    videoDecoder.Start(*channelInfo.context, std::ref(blockQueue),
                       std::ref(*m_framePool));

    // 7. 加载 Plugin, 并设置后处理参数
    lynPlugin_t faceDetectPostPlugin;
    CHECK_ERR(lynPluginRegister(&faceDetectPostPlugin,
                                channelInfo.faceDetectPostPluginPath.c_str()));

    lynPlugin_t osdPlugin;
    if (argShowType != OnlyApu) {
      CHECK_ERR(lynPluginRegister(&osdPlugin, argOsdPluginPath.c_str()));
    }

    FacePostProcessInfo_t post_info;
    memset(&post_info, 0, sizeof(FacePostProcessInfo_t));
    post_info.score_threshold = 0.25;
    post_info.nms_threshold = 0.45;
    post_info.width = faceDetectModelInfo.width;
    post_info.height = faceDetectModelInfo.height;
    post_info.ori_width = vdecOutInfo.width;
    post_info.ori_height = vdecOutInfo.height;
    FaceDetectInfo *pDevDetectInfo;
    CHECK_ERR(lynMalloc((void **)&pDevDetectInfo, sizeof(FaceDetectInfo)));
    post_info.detectInfo = (FaceDetectInfo *)pDevDetectInfo;

    // 8. 初始化 IPE 处理类
    IpeParamScrfd ipeScrfd(faceDetectModelInfo.width,
                           faceDetectModelInfo.height);
    ipeScrfd.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                        LYN_PIX_FMT_NV12);

    // 9. 根据传入的 ShowType 决定是使用显示窗口类, 视频编码类，BoxInfo 编码类
    VideoEncoder *videoEncoder = nullptr;
    BoxInfoEncode *boxInfoEncode = nullptr;
    if (argShowType == ShowType::SaveFile) {
      videoEncoder = new VideoEncoder;
      videoEncoder->Init(vdecOutInfo, outPath, m_framePool, &vencRecvFramePool);
    } else if (argShowType == ShowType::DirectShow) {
      // OpencvWindowManager::GetInstance()->SetWindowSize(strWindowName,
      // vdecOutInfo.height,
      //                                                   vdecOutInfo.width);
    } else if (argShowType == ShowType::OnlyApu) {
      boxInfoEncode = new BoxInfoEncode;
      boxInfoEncode->Init(outPath);
    }

    lynBoxesInfo *pDevBoxesInfo;
    CHECK_ERR(lynMalloc((void **)&pDevBoxesInfo, sizeof(lynBoxesInfo)));
    CHECK_ERR(lynMemset(pDevBoxesInfo, 0, sizeof(lynBoxesInfo)));

    bool bEos = false;
    int iBatchIndex = 0;
    bool resetEncoder = false;
    m_statsInfo->StartPrint();
    while (!bEos) {
      // 10. 取出一个解码 Frame
      lynFrame_t *pFrameTmp;
      int ret = blockQueue.take(pFrameTmp);
      if (!ret) {
        continue;
      }

      bEos = pFrameTmp->eos;

      if (!bEos) {
        m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DECODE_TIME, 1,
                                 (uint64_t)pFrameTmp->userPtr);
      }
      //分辨率发生变化，更新ipe参数
      if (bEos && videoDecoder.m_resolutionchange) {
        if (argShowType == ShowType::SaveFile) {
          videoEncoder->EncodeImage(pFrameTmp);
          videoEncoder->UnInit();
          resetEncoder = true;
        } else {
          m_framePool->Push(pFrameTmp);
        }
        bEos = false;
        videoDecoder.m_resolutionchange = false;
        continue;  //
      }

      videoDecoder.GetVdecOutInfo(&vdecOutInfo);
      ipeScrfd.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                          LYN_PIX_FMT_NV12);
      m_videoInfo = vdecOutInfo;

      if (resetEncoder) {
        vencRecvFramePool.ResetSize(vdecOutInfo.predictBufSize);
        videoEncoder->Init(vdecOutInfo, outPath, m_framePool,
                           &vencRecvFramePool);
        resetEncoder = false;
      }

      if (iBatchIndex == 0) {
        pIpeFaceDetectBuf = oIpeFaceDetectBufPool.GetBatchMem();
      }

      // 11. IPE 处理
      lynEvent_t ipeFaceDetectBeginEvt =
          getDeviceTimePoint(ipeFaceDetectStream);
      ipeScrfd.CalcParam(ipeFaceDetectStream, pFrameTmp->data,
                         pIpeFaceDetectBuf->GetElement());
      lynEvent_t ipeFaceDetectEndEvt = getDeviceTimePoint(ipeFaceDetectStream);

      auto ipeFaceDetectCallback = [this, ipeFaceDetectBeginEvt,
                                    ipeFaceDetectEndEvt]() {
        float elapsedTime =
            getEventElapsedTime(ipeFaceDetectBeginEvt, ipeFaceDetectEndEvt);
        m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DETECT_PREPROCESS_TIME,
                                 1, static_cast<uint64_t>(elapsedTime * 1000));
        CHECK_ERR(lynDestroyEvent(ipeFaceDetectBeginEvt));
        CHECK_ERR(lynDestroyEvent(ipeFaceDetectEndEvt));
      };
      SetCallback(ipeFaceDetectStream, ipeFaceDetectCallback);

      ++iBatchIndex;

      // 12. IPE输出满了或是最后一个包，进行apu处理
      if (iBatchIndex == faceDetectModelInfo.batchSize || bEos) {
        CHECK_ERR(lynRecordEvent(ipeFaceDetectStream, ipeFaceDetectEvent));
        CHECK_ERR(
            lynStreamWaitEvent(apuFaceDetectSendStream, ipeFaceDetectEvent));
        apuFaceDetectBuffer = apuFaceDetectBufferPool.Pop();
        // 13. 调用 APU 推理接口
        CHECK_ERR(lynModelSendInputAsync(
            apuFaceDetectSendStream, faceDetectModelInfo.model,
            pIpeFaceDetectBuf->Buffer(), apuFaceDetectBuffer,
            faceDetectModelInfo.batchSize));

        lynEvent_t apuFaceDetectBeginEvt =
            getDeviceTimePoint(apuFaceDetectRecvStream);
        CHECK_ERR(lynModelRecvOutputAsync(apuFaceDetectRecvStream,
                                          faceDetectModelInfo.model));
        lynEvent_t apuFaceDetectEndEvt =
            getDeviceTimePoint(apuFaceDetectRecvStream);

        auto inferFaceDetectCallback = [this, apuFaceDetectBeginEvt,
                                        apuFaceDetectEndEvt, iBatchIndex]() {
          float elapsedTime =
              getEventElapsedTime(apuFaceDetectBeginEvt, apuFaceDetectEndEvt);
          m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_DETECT_INFER_TIME,
                                   iBatchIndex,
                                   static_cast<uint64_t>(elapsedTime * 1000));
          CHECK_ERR(lynDestroyEvent(apuFaceDetectBeginEvt));
          CHECK_ERR(lynDestroyEvent(apuFaceDetectEndEvt));
        };
        SetCallback(apuFaceDetectRecvStream, inferFaceDetectCallback);

        // 14. 在 APU 推理完之后，调用 Plugin 进行后处理
        CHECK_ERR(lynRecordEvent(apuFaceDetectRecvStream, apuFaceDetectEvent));
        CHECK_ERR(lynStreamWaitEvent(postFaceDetectStream, apuFaceDetectEvent));

        post_info.output_tensor = apuFaceDetectBuffer;
        lynEvent_t postFaceDetectBeginEvt =
            getDeviceTimePoint(postFaceDetectStream);
        CHECK_ERR(lynPluginRunAsync(postFaceDetectStream, faceDetectPostPlugin,
                                    "lynFacePostProcess", &post_info,
                                    sizeof(post_info)));
        lynEvent_t postFaceDetectEndEvt =
            getDeviceTimePoint(postFaceDetectStream);

        auto postFaceDetectCallback =
            [pIpeFaceDetectBuf, &oIpeFaceDetectBufPool, apuFaceDetectBuffer,
             &apuFaceDetectBufferPool, this, postFaceDetectBeginEvt,
             postFaceDetectEndEvt]() {
              float elapsedTime = getEventElapsedTime(postFaceDetectBeginEvt,
                                                      postFaceDetectEndEvt);
              m_statsInfo->UpdateStats(
                  FACE_RECOGNITION_STATS_DETECT_POSTPROCESS_TIME, 1,
                  static_cast<uint64_t>(elapsedTime * 1000));
              apuFaceDetectBufferPool.Push(apuFaceDetectBuffer);
              oIpeFaceDetectBufPool.PutBatchMem(pIpeFaceDetectBuf);
              CHECK_ERR(lynDestroyEvent(postFaceDetectBeginEvt));
              CHECK_ERR(lynDestroyEvent(postFaceDetectEndEvt));
            };
        SetCallback(postFaceDetectStream, postFaceDetectCallback);

        FaceDetectInfo *pHostDetectInfo = new FaceDetectInfo;
        CHECK_ERR(lynMemcpyAsync(postFaceDetectStream, pHostDetectInfo,
                                 pDevDetectInfo, sizeof(FaceDetectInfo),
                                 ServerToClient));

        CHECK_ERR(lynSynchronizeStream(postFaceDetectStream));

        lynBoxesInfo *pHostBoxesInfo = new lynBoxesInfo;
        memset(pHostBoxesInfo, 0, sizeof(lynBoxesInfo));
        pHostBoxesInfo->boxesNum = pHostDetectInfo->boxesNum;

        for (int faceIndex = 0; faceIndex < pHostDetectInfo->boxesNum;
             faceIndex++) {
          const auto box = pHostDetectInfo->boxes[faceIndex];
          pHostBoxesInfo->boxes[faceIndex].xmin = box.xmin;
          pHostBoxesInfo->boxes[faceIndex].ymin = box.ymin;
          pHostBoxesInfo->boxes[faceIndex].xmax = box.xmax;
          pHostBoxesInfo->boxes[faceIndex].ymax = box.ymax;
        }

        m_statsInfo->UpdateStats(FACE_RECOGNITION_STATS_FRAME_RATE, 1, 0);

        // 选择 OnlyApu 时，不进行画框处理
        if (argShowType != ShowType::OnlyApu) {
          CHECK_ERR(lynMemcpyAsync(osdStream, pDevBoxesInfo, pHostBoxesInfo,
                                   sizeof(lynBoxesInfo), ClientToServer));
          lynDrawBoxAndTextPara para;
          para.imgData = pFrameTmp->data;
          para.imgFmt = LYN_PIX_FMT_NV12;
          para.imgW = vdecOutInfo.width;
          para.imgH = vdecOutInfo.height;
          para.boxesInfo = pDevBoxesInfo;
          para.boxColor = DRAW_COLOR_BLUE;
          para.boxThick = DRAW_THICK_2;
          para.fontSize = FONT_SIZE_24;
          para.fontColor = DRAW_COLOR_BLUE;
          CHECK_ERR(lynPluginRunAsync(osdStream, osdPlugin, "lynDrawBoxAndText",
                                      &para, sizeof(para)));
        }
        // 15. 根据传入的 ShowType 决定是进行视频编码还是直接显示
        if (argShowType == ShowType::SaveFile) {
          // 视频编码
          pFrameTmp->eos = bEos;
          videoEncoder->WaitForStream(osdStream);
          videoEncoder->EncodeImage(pFrameTmp);
          auto freeCallback = [pHostBoxesInfo]() { delete pHostBoxesInfo; };
          SetCallback(osdStream, freeCallback);
        } else if (argShowType == ShowType::DirectShow) {
          auto showCallback = [this, pFrameTmp, pHostBoxesInfo]() {
            m_queue.put(pFrameTmp);
            delete pHostBoxesInfo;
          };
          SetCallback(osdStream, showCallback);
        } else if (argShowType == ShowType::OnlyApu) {
          auto saveCallback = [this, pFrameTmp, pHostBoxesInfo,
                               boxInfoEncode]() {
            boxInfoEncode->EncodeToJson(*pHostBoxesInfo, pFrameTmp->eos);
            delete pHostBoxesInfo;
            m_framePool->Push(pFrameTmp);
          };
          SetCallback(osdStream, saveCallback);
        }

        iBatchIndex = 0;
      }
    }

    m_statsInfo->StopPrint();
    // 16. 等待流中事件处理完，并销毁资源
    CHECK_ERR(lynSynchronizeStream(ipeFaceDetectStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceDetectSendStream));
    CHECK_ERR(lynSynchronizeStream(apuFaceDetectRecvStream));
    CHECK_ERR(lynSynchronizeStream(postFaceDetectStream));
    CHECK_ERR(lynDestroyStream(ipeFaceDetectStream));
    CHECK_ERR(lynDestroyStream(apuFaceDetectSendStream));
    CHECK_ERR(lynDestroyStream(apuFaceDetectRecvStream));
    CHECK_ERR(lynDestroyStream(postFaceDetectStream));
    CHECK_ERR(lynDestroyEvent(ipeFaceDetectEvent));
    CHECK_ERR(lynDestroyEvent(apuFaceDetectEvent));
    CHECK_ERR(lynPluginUnregister(faceDetectPostPlugin));  // Unregister plugin
    if (argShowType != OnlyApu) {
      CHECK_ERR(lynPluginUnregister(osdPlugin));             // Unregister plugin
    }
    CHECK_ERR(lynFree(pDevBoxesInfo));
    CHECK_ERR(lynFree(pDevDetectInfo));

    if (argShowType == ShowType::DirectShow) {
      // OpencvWindowManager::GetInstance()->Close(strWindowName);
    } else if (argShowType == ShowType::SaveFile) {
      videoEncoder->UnInit();
      delete videoEncoder;
      videoEncoder = nullptr;
    } else if (argShowType == ShowType::OnlyApu) {
      boxInfoEncode->UnInit();
      delete boxInfoEncode;
      boxInfoEncode = nullptr;
    }
    videoDecoder.Stop();
    videoDecoder.UnInit();
    faceDetectModelInfo.UnLoadModel();
  }
};
