/**
 *@file PlateRecognition.hpp
 *@author lynxi
 *@version v1.0
 *@date 2023-03-07
 *@par Copyright:
 *© 2022 北京灵汐科技有限公司 版权所有。
 * 注意：以下内容均为北京灵汐科技有限公司原创，未经本公司允许，不得转载，否则将视为侵权；对于不遵守此声明或者其他违法使用以下内容者，本公司依法保留追究权。\n
 *© 2022 Lynxi Technologies Co., Ltd. All rights reserved.
 * NOTICE: All information contained here is, and remains the property of Lynxi.
 *This file can not be copied or distributed without the permission of Lynxi
 *Technologies Co., Ltd.
 *@brief 车牌检测流程定义
 */

#ifndef PLATE_RECOGNITION_H
#define PLATE_RECOGNITION_H
#include <lyn_api.h>
#include <lyn_event.h>
#include <lyn_memory.h>
#include <lyn_plugin.h>
#include <lyn_stream.h>

#include <sys/time.h>
#include <unistd.h>
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <functional>
#include "batchMem.hpp"
#include "blockQueue.hpp"
#include "bufferPool.hpp"
#include "ipeParamEx.h"
#include "deepsort.h"
// #include "detect.h"
#include "framePool.hpp"
#include "frameRater.h"
#include "osd_plugin.h"
#include "videoDecoder.h"
#include "videoEncoder.h"

InputType g_inputType = File;
int getFrameTimeout = 1000;
int POOL_SIZE(5);
const uint32_t MAX_BATCH = 20;

typedef struct{
    lynBoxesInfo* hostBoxInfo;    // host侧的boxInfo信息，从deviceBoxInfo复制而来
    lynBoxesInfo* deviceBoxInfo;  // device侧的boxInfo信息
    lynFrame_t* frame;
} DetectionFrame;

typedef struct OsdFrame {
	lynDrawBoxAndTextPara *osdParam;
	lynFrame_t *frame;
} OsdFrame;

template <typename T>
const T &clamp(const T &value, const T &minValue, const T &maxValue) {
  return std::max(minValue, std::min(value, maxValue));
}

/**
 * @brief 添加异步回调
 *
 * @param stream 要添加回调的流
 * @param cb 回调函数
 */
void addAsyncCallback(lynStream_t stream, std::function<void()> cb) {
  CHECK_ERR(lynStreamAddCallback(
      stream,
      [](void *_ud) -> lynError_t {
        try {
          auto ud = static_cast<std::function<void()> *>(_ud);
          (*ud)();
          delete ud;
        } catch (const std::exception &e) {
          return -1;
        }
        return 0;
      },
      new std::function<void()>(std::move(cb))));
}

/**
 * @brief 添加同步回调
 *
 * @param stream 要添加回调的流
 * @param cb 回调函数
 */
void addCallback(lynStream_t stream, std::function<void()> cb) {
  CHECK_ERR(lynStreamAddCallback(
      stream,
      [](void *_ud) -> lynError_t {
        try {
          auto ud = static_cast<std::function<void()> *>(_ud);
          (*ud)();
          delete ud;
        } catch (const std::exception &e) {
          return -1;
        }
        return 0;
      },
      new std::function<void()>(std::move(cb))));
}

/**
 * @brief 在流之间同步
 *
 * @param before 要同步的流（其中的命令先于middle发生）
 * @param middle 事件
 * @param after 要同步的流（其中的命令晚于middle发生）
 */
void syncBetweenStreams(lynStream_t before, lynEvent_t middle,
                        lynStream_t after) {
  CHECK_ERR(lynRecordEvent(before, middle));
  CHECK_ERR(lynStreamWaitEvent(after, middle));
}

YoloxPostProcessInfo_t getPostInfo(ModelInfo modelInfo,
                               lynVdecOutInfo_t vdecOutInfo, void *apuBuffer,
                               lynBoxesInfo *deviceBoxInfo) {
  YoloxPostProcessInfo_t post_info;
  memset(&post_info, 0, sizeof(YoloxPostProcessInfo_t));
  post_info.is_pad_resize = 1;
  post_info.score_threshold = 0.25;
  post_info.nms_threshold = 0.45;
  post_info.nms_top_k = 500;
  post_info.width = modelInfo.width;
  post_info.height = modelInfo.height;
  post_info.ori_width = vdecOutInfo.width;
  post_info.ori_height = vdecOutInfo.height;
  post_info.output_tensor = apuBuffer;
  post_info.boxesInfo = deviceBoxInfo;
  post_info.class_num = modelInfo.classNum;
  post_info.anchorSize =
      modelInfo.modelDesc->outputTensorAttrArray->dims[1];

  return post_info;
}

class ObjectTrack {
 public:
  void stop();
  void start();
  void createStartThread();
  void carDetect();
  void featureExtract();
  void osdProcess();
  void encodeProcess();
  void putFrame(lynFrame_t *frame);
  bool getFrame(lynFrame_t **frame, int timeout = 1000);
  void getVideoInfo(lynVdecOutInfo_t &videoInfo) const;

 public:
  std::thread m_thread;
  int64_t m_trackerID;
  lynContext_t *context;
  ShowType showType = SaveFile;
  InputType inputType = File;
  lynPlugin_t yoloPlugin;
  lynPlugin_t osdPlugin;
  lynPlugin_t deepsortPlugin;
  std::string carDetectModelPath;
  std::string featureExtractModelPath;
  std::string yoloPluginPath;
  std::string deepsortPluginPath;
  std::string argOsdPluginPath;
  std::string strVideoInputPath;
  std::string strVideoOutputPath;
  std::string channelName;

  VideoDecoder videoDecoder;
  lynVdecOutInfo_t vdecOutInfo;
  BlockQueue<lynFrame_t *> videoDecodeResult{5};   // 解码结果
  BlockQueue<DetectionFrame> detectFrameQueue{5};  // 车牌检测结果
  BlockQueue<OsdFrame> osdFrameQueue{5};           // 车牌检测结果
  BlockQueue<lynFrame_t *> trackFrameQueue{5};     // 车牌识别结果
  BlockQueue<lynFrame_t *> m_opencvQueue;
  FramePool *vdecFramePool = nullptr;
  FramePool *vencRecvFramePool = nullptr;
};

void ObjectTrack::putFrame(lynFrame_t *frame) { vdecFramePool->Push(frame); }

bool ObjectTrack::getFrame(lynFrame_t **frame, int timeout) {
  bool ret = m_opencvQueue.take(*frame, timeout);
  if (!ret) {
    // std::cout << "failed to take frame to video!" << std::endl;
  }
  return ret;
}

void ObjectTrack::getVideoInfo(lynVdecOutInfo_t &videoInfo) const {
  videoInfo = vdecOutInfo;
}

void ObjectTrack::createStartThread() {
  m_thread = std::thread(&ObjectTrack::start, this);
}

void ObjectTrack::stop() {
  lynSetCurrentContext(*context);
  m_thread.join();
  delete vdecFramePool;
  delete vencRecvFramePool;
}

/**
 * @brief 启动车牌号码识别流程
 *
 */
void ObjectTrack::start() {
  m_trackerID = reinterpret_cast<int64_t>(this);
  CHECK_ERR(lynSetCurrentContext(*context));
  std::remove(strVideoOutputPath.c_str());
  int getFrameTimeout = (g_inputType == File) ? 1000 : 3000;
  videoDecoder.Init(strVideoInputPath, g_inputType, 150);
  videoDecoder.GetVdecOutInfo(&vdecOutInfo);
  vdecFramePool = new FramePool(vdecOutInfo.predictBufSize, POOL_SIZE);
  vencRecvFramePool = new FramePool(vdecOutInfo.predictBufSize, POOL_SIZE);
  videoDecoder.Start(*context, std::ref(videoDecodeResult),
                     std::ref(*vdecFramePool));
  CHECK_ERR(lynPluginRegister(&osdPlugin, argOsdPluginPath.c_str()));
  CHECK_ERR(lynPluginRegister(&yoloPlugin, yoloPluginPath.c_str()));
  CHECK_ERR(lynPluginRegister(&deepsortPlugin, deepsortPluginPath.c_str()));

  std::thread plateDetectThread(&ObjectTrack::carDetect, this);
  std::thread plateRecognizeThread(&ObjectTrack::featureExtract, this);
  std::thread osdThread(&ObjectTrack::osdProcess, this);
  std::thread encodeProcessThread(&ObjectTrack::encodeProcess, this);

  plateDetectThread.join();
  plateRecognizeThread.join();
  osdThread.join();
  encodeProcessThread.join();
  videoDecoder.Stop();
  videoDecoder.UnInit();  
  CHECK_ERR(lynPluginUnregister(osdPlugin));
  CHECK_ERR(lynPluginUnregister(yoloPlugin));
  CHECK_ERR(lynPluginUnregister(deepsortPlugin));
}

/**
 * @brief 根据解码结果，检测车牌位置
 *
 */
void ObjectTrack::carDetect() {
  CHECK_ERR(lynSetCurrentContext(*context));
  ModelInfo carDetect{carDetectModelPath.c_str()};
  IpeParamPlateRecognition ipeParam(carDetect.width, carDetect.height);
  lynStream_t ipeStream = nullptr;
  lynStream_t apuStream = nullptr;
  lynStream_t pluginStream = nullptr;
  lynStream_t postStream = nullptr;
  lynEvent_t ipeEvent = nullptr;
  lynEvent_t apuEvent = nullptr;
  lynEvent_t postEvent = nullptr;
  CHECK_ERR(lynCreateStream(&ipeStream));
  CHECK_ERR(lynCreateStream(&apuStream));
  CHECK_ERR(lynCreateStream(&pluginStream));
  CHECK_ERR(lynCreateStream(&postStream));
  CHECK_ERR(lynCreateEvent(&ipeEvent));
  CHECK_ERR(lynCreateEvent(&apuEvent));
  CHECK_ERR(lynCreateEvent(&postEvent));
  void *apuBuffer = nullptr;
  BufferPool apuBufferPool(carDetect.outputSize * carDetect.batchSize,
                           POOL_SIZE);
  BatchMem *pIpeOutBuf = nullptr;
  BatchMemPool oIpeOutMemPool(false, carDetect.inputSize, carDetect.batchSize,
                              POOL_SIZE);
  BufferPool inferResultHostPool(carDetect.outputSize * carDetect.batchSize, 5,
                                 HOST);
  BufferPool postInfoPool(sizeof(YoloxPostProcessInfo_t), POOL_SIZE);

  FrameRater frameRate(channelName);
  frameRate.SetInterval(1);
  frameRate.SetTitle("detection model " + channelName);
  bool bEos = false;
  ipeParam.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height, LYN_PIX_FMT_NV12);

  while (!bEos) {
    lynFrame_t *pFrameTmp;
    if (!videoDecodeResult.take(pFrameTmp, getFrameTimeout)) continue;

    bEos = pFrameTmp->eos;
    pIpeOutBuf = oIpeOutMemPool.GetBatchMem();
    apuBuffer = apuBufferPool.Pop();

    // ipe
    ipeParam.CalcParam(ipeStream, pFrameTmp->data, pIpeOutBuf->GetElement());
    CHECK_ERR(lynRecordEvent(ipeStream, ipeEvent));
    CHECK_ERR(lynStreamWaitEvent(apuStream, ipeEvent));
    // 推理
    CHECK_ERR(lynExecuteModelAsync(apuStream, carDetect.model,
                                   pIpeOutBuf->Buffer(), apuBuffer,
                                   carDetect.batchSize));
    CHECK_ERR(lynRecordEvent(apuStream, apuEvent));
    CHECK_ERR(lynStreamWaitEvent(pluginStream, apuEvent));
    lynBoxesInfo *deviceBoxInfo = nullptr;
    CHECK_ERR(lynMalloc((void **)&deviceBoxInfo, sizeof(lynBoxesInfo)));
    auto post_info =
        getPostInfo(carDetect, vdecOutInfo, apuBuffer, deviceBoxInfo);

    // 后处理，获取boxInfo
    lynPluginRunAsync(pluginStream, yoloPlugin, "lynYoloxPostProcess", &post_info,
                      sizeof(YoloxPostProcessInfo_t));
 
    CHECK_ERR(lynRecordEvent(pluginStream, postEvent));
    CHECK_ERR(lynStreamWaitEvent(postStream, postEvent));
  
    // 将后处理结果和frame放入detectFrameQueue，供识别线程使用
    addCallback(postStream, [this, pFrameTmp, deviceBoxInfo] {
      auto hostBoxInfo = new lynBoxesInfo();
      lynMemcpy(hostBoxInfo, deviceBoxInfo, sizeof(lynBoxesInfo),
                ServerToClient);
      DetectionFrame detectionPara;
      detectionPara.hostBoxInfo = hostBoxInfo;
      detectionPara.deviceBoxInfo = deviceBoxInfo;
      detectionPara.frame = pFrameTmp;
      detectFrameQueue.put(detectionPara);
    });
    // 释放内存
    addAsyncCallback(postStream, [&apuBufferPool, apuBuffer] {
      apuBufferPool.Push(apuBuffer);
    });
    addAsyncCallback(postStream, [&oIpeOutMemPool, pIpeOutBuf] {
      oIpeOutMemPool.PutBatchMem(pIpeOutBuf);
    });
    addAsyncCallback(postStream, [&frameRate] { frameRate.AddFrame(1); });
  }

  CHECK_ERR(lynSynchronizeStream(ipeStream));
  CHECK_ERR(lynSynchronizeStream(apuStream));
  CHECK_ERR(lynSynchronizeStream(pluginStream));
  CHECK_ERR(lynSynchronizeStream(postStream));
  CHECK_ERR(lynDestroyEvent(ipeEvent));
  CHECK_ERR(lynDestroyEvent(apuEvent));
  CHECK_ERR(lynDestroyEvent(postEvent));
  CHECK_ERR(lynDestroyStream(ipeStream));
  CHECK_ERR(lynDestroyStream(apuStream));
  CHECK_ERR(lynDestroyStream(pluginStream));
  CHECK_ERR(lynDestroyStream(postStream));
  carDetect.UnLoadModel();
  std::cout << "carDetect end" << std::endl;
}

/**
 * @brief 根据车牌检测的结果，提取车牌图片，执行号码识别
 *
 */
void ObjectTrack::featureExtract() {
  CHECK_ERR(lynSetCurrentContext(*context));

  ModelInfo featureExtractionModel(featureExtractModelPath.c_str());
  BatchMem *pIpeOutBuf = nullptr;
  BatchMemPool oIpeOutMemPool(false, featureExtractionModel.inputSize,
                              featureExtractionModel.batchSize, POOL_SIZE * 20);
  void *apuBuffer;
  BufferPool apuBufferPool(
      featureExtractionModel.outputSize * featureExtractionModel.batchSize,
      POOL_SIZE * 20);
  void *ipeBuffer;
  BufferPool ipeBufferPool(featureExtractionModel.inputSize * MAX_BATCH,
                           POOL_SIZE * 20);
  IpeParamPlateRecognition ipeParam(featureExtractionModel.width,
                                    featureExtractionModel.height);
  ipeParam.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height, LYN_PIX_FMT_NV12);
  lynEvent_t ipeEvent = nullptr;
  lynEvent_t apuEvent = nullptr;
  lynEvent_t deepsortEvent = nullptr;
  lynStream_t ipeStream = nullptr;
  lynStream_t apuStream = nullptr;
  lynStream_t deepsortStream = nullptr;
  lynStream_t postStream = nullptr;
  CHECK_ERR(lynCreateStream(&ipeStream));
  CHECK_ERR(lynCreateStream(&apuStream));
  CHECK_ERR(lynCreateStream(&deepsortStream));
  CHECK_ERR(lynCreateStream(&postStream));
  CHECK_ERR(lynCreateEvent(&ipeEvent));
  CHECK_ERR(lynCreateEvent(&apuEvent));
  CHECK_ERR(lynCreateEvent(&deepsortEvent));
  FrameRater frameRate(channelName);
  frameRate.SetInterval(1);
  frameRate.SetTitle("recognize model");
  bool bEos = false;

  lynPluginRunAsync(deepsortStream, deepsortPlugin, "lynCreateTracker", &m_trackerID, sizeof(int64_t));

  while (!bEos) {
    DetectionFrame detectionFrame;
    if (!detectFrameQueue.take(detectionFrame, 60)) continue;
    bEos = detectionFrame.frame->eos;
    // ipe数次
    auto ipeBuffer = ipeBufferPool.Pop();
    for (size_t i = 0;
         i < std::min((uint32_t)detectionFrame.hostBoxInfo->boxesNum, MAX_BATCH); ++i) {
      auto plateRect = get_rect(detectionFrame.hostBoxInfo->boxes[i],
                                vdecOutInfo.width, vdecOutInfo.height);
      if (!plateRect.is_size_valid()) continue;
      // ipe
      ipeParam.CalcParam(
          ipeStream, plateRect, detectionFrame.frame->data,
          (char *)ipeBuffer + featureExtractionModel.inputSize * i);
    }

    // 一次性推理所有ipe结果
    CHECK_ERR(lynRecordEvent(ipeStream, ipeEvent));
    CHECK_ERR(lynStreamWaitEvent(apuStream, ipeEvent));
    apuBuffer = apuBufferPool.Pop();
    // 推理
    CHECK_ERR(lynExecuteModelAsync(apuStream, featureExtractionModel.model,
                                   ipeBuffer, apuBuffer,
                                   featureExtractionModel.batchSize));

    CHECK_ERR(lynRecordEvent(apuStream, apuEvent));
    CHECK_ERR(lynStreamWaitEvent(deepsortStream, apuEvent));

    auto args = new DeepsortArg{};
    args->objectCount =
        featureExtractionModel.outputSize * featureExtractionModel.batchSize;
    args->width = vdecOutInfo.width;
    args->height = vdecOutInfo.height;
    args->BoxInfo = detectionFrame.deviceBoxInfo;
    args->apuBuffer = apuBuffer;
    args->imageData = detectionFrame.frame->data;
    args->trackerID = m_trackerID;
    lynMalloc((void **)&args->targetBoxInfo, sizeof(lynBoxesInfo));
 
    lynPluginRunAsync(deepsortStream, deepsortPlugin, "lynDeepsort", args,
                      sizeof(DeepsortArg));

    addCallback(deepsortStream, [apuBuffer, &apuBufferPool]() {
      apuBufferPool.Push(apuBuffer);
    });
    addCallback(deepsortStream, [this, args, detectionFrame]() {
      OsdFrame osdFrame;
      osdFrame.osdParam = new lynDrawBoxAndTextPara();
      osdFrame.osdParam->boxesInfo = args->targetBoxInfo;
      osdFrame.osdParam->imgW = args->width;
      osdFrame.osdParam->imgH = args->height;
      osdFrame.osdParam->imgFmt = LYN_PIX_FMT_NV12;
      osdFrame.osdParam->boxColor = DRAW_COLOR_BLUE;
      osdFrame.osdParam->boxThick = DRAW_THICK_2;
      osdFrame.osdParam->fontSize = FONT_SIZE_24;
      osdFrame.osdParam->fontColor = DRAW_COLOR_BLUE;
      osdFrame.osdParam->imgData = args->imageData;
      osdFrame.frame = detectionFrame.frame;
      osdFrameQueue.put(osdFrame);
    });
    addAsyncCallback(deepsortStream, [ipeBuffer, &ipeBufferPool]() {
      ipeBufferPool.Push(ipeBuffer);
    });
    addAsyncCallback(deepsortStream, [detectionFrame]() {
      delete detectionFrame.hostBoxInfo;
      lynFree(detectionFrame.deviceBoxInfo);
    });
  }

  lynPluginRunAsync(deepsortStream, deepsortPlugin, "lynDestroyTracker", &m_trackerID, sizeof(int64_t));

  CHECK_ERR(lynSynchronizeStream(ipeStream));
  CHECK_ERR(lynSynchronizeStream(apuStream));
  CHECK_ERR(lynSynchronizeStream(deepsortStream));
  CHECK_ERR(lynSynchronizeStream(postStream));

  CHECK_ERR(lynDestroyEvent(ipeEvent));
  CHECK_ERR(lynDestroyEvent(apuEvent));
  CHECK_ERR(lynDestroyEvent(deepsortEvent));
  CHECK_ERR(lynDestroyStream(ipeStream));
  CHECK_ERR(lynDestroyStream(apuStream));
  CHECK_ERR(lynDestroyStream(deepsortStream));
  CHECK_ERR(lynDestroyStream(postStream));
  featureExtractionModel.UnLoadModel();
  std::cout << "feature end" << std::endl;
}
void ObjectTrack::osdProcess() {
  CHECK_ERR(lynSetCurrentContext(*context));
  lynStream_t pluginStream;
  lynCreateStream(&pluginStream);
  lynStream_t postStream;
  lynCreateStream(&postStream);
  lynEvent_t pluginEvent;
  lynCreateEvent(&pluginEvent);
  bool bEos = false;
  while (!bEos) {
    OsdFrame osdFrame;
    if (!osdFrameQueue.take(osdFrame, 100)) continue;
    bEos = osdFrame.frame->eos;
    CHECK_ERR(lynPluginRunAsync(pluginStream, osdPlugin, "lynDrawBoxAndText",
                                osdFrame.osdParam, sizeof(lynDrawBoxAndTextPara)));
    lynRecordEvent(pluginStream, pluginEvent);
    lynStreamWaitEvent(postStream, pluginEvent);
    addAsyncCallback(postStream, [this, osdFrame]() {
      trackFrameQueue.put(osdFrame.frame);
      delete osdFrame.osdParam;
    });
  }
  lynSynchronizeStream(pluginStream);
  lynSynchronizeStream(postStream);
  lynDestroyEvent(pluginEvent);
  lynDestroyStream(pluginStream);
  lynDestroyStream(postStream);
  std::cout << "osd end" << std::endl;
}

/**
 * @brief 根据推理和后处理结果，显示或者保存视频
 *
 */
void ObjectTrack::encodeProcess() {
  CHECK_ERR(lynSetCurrentContext(*context));
  VideoEncoder *videoEncoder = nullptr;
  if (ArgsParse::argShowType == ShowType::SaveFile) {
    videoEncoder = new VideoEncoder;
    videoEncoder->Init(vdecOutInfo, strVideoOutputPath, vdecFramePool,
                       vencRecvFramePool);
  }
  bool bEos = false;
  while (!bEos) {
    lynFrame_t *frame;
    if (!trackFrameQueue.take(frame, 60)) continue;
    bEos = frame->eos;
    if (ArgsParse::argShowType == ShowType::SaveFile) {
      videoEncoder->EncodeImage(frame);
    } else if (ArgsParse::argShowType == ShowType::DirectShow) {
      m_opencvQueue.put(frame);
    }
  }
  if (ArgsParse::argShowType == ShowType::SaveFile) {
    videoEncoder->UnInit();
    delete videoEncoder;
  }
  std::cout << "encode end" << std::endl;
}
#endif
