/**
 *@file PlateRecognition.hpp
 *@author lynxi
 *@version v1.0
 *@date 2023-03-07
 *@par Copyright:
 *© 2022 北京灵汐科技有限公司 版权所有。
 * 注意：以下内容均为北京灵汐科技有限公司原创，未经本公司允许，不得转载，否则将视为侵权；对于不遵守此声明或者其他违法使用以下内容者，本公司依法保留追究权。\n
 *© 2022 Lynxi Technologies Co., Ltd. All rights reserved.
 * NOTICE: All information contained here is, and remains the property of Lynxi.
 *This file can not be copied or distributed without the permission of Lynxi
 *Technologies Co., Ltd.
 *@brief 车牌检测流程定义
 */

#ifndef PLATE_RECOGNITION_H
#define PLATE_RECOGNITION_H
#include <lyn_api.h>
#include <lyn_context.h>
#include <lyn_memory.h>
#include <lyn_plugin.h>
#include <lyn_stream.h>
#include <sys/time.h>
#include <unistd.h>
#include <functional>
#include "batchMem.hpp"
#include "blockQueue.hpp"
#include "bufferPool.hpp"
#include "ipeParamEx.h"
#include "framePool.hpp"
#include "frameRater.h"
#include "osd_plugin.h"
#include "videoDecoder.h"
#include "videoEncoder.h"
InputType g_inputType = File;
int getFrameTimeout = 1000;
int POOL_SIZE(5);

typedef struct{
    lynBoxesInfo* hostBoxInfo;    // host侧的boxInfo信息，从deviceBoxInfo复制而来
    lynBoxesInfo* deviceBoxInfo;  // device侧的boxInfo信息
    lynFrame_t* frame;
} DetectionFrame;

/**
 * @brief 添加异步回调
 *
 * @param stream 要添加回调的流
 * @param cb 回调函数
 */
void addAsyncCallback(lynStream_t stream, std::function<void()> cb) {
    CHECK_ERR(lynStreamAddCallback(
        stream,
        [](void *_ud) -> lynError_t {
            try {
                auto ud = static_cast<std::function<void()> *>(_ud);
                (*ud)();
                delete ud;
            } catch (const std::exception &e) {
                return -1;
            }
            return 0;
        },
        new std::function<void()>(std::move(cb))));
}

/**
 * @brief 添加同步回调
 *
 * @param stream 要添加回调的流
 * @param cb 回调函数
 */
void addCallback(lynStream_t stream, std::function<void()> cb) {
    CHECK_ERR(lynStreamAddCallback(
        stream,
        [](void *_ud) -> lynError_t {
            try {
                auto ud = static_cast<std::function<void()> *>(_ud);
                (*ud)();
                delete ud;
            } catch (const std::exception &e) {
                return -1;
            }
            return 0;
        },
        new std::function<void()>(std::move(cb))));
}

/**
 * @brief 在流之间同步
 *
 * @param before 要同步的流（其中的命令先于middle发生）
 * @param middle 事件
 * @param after 要同步的流（其中的命令晚于middle发生）
 */
void syncBetweenStreams(lynStream_t before, lynEvent_t middle,
                        lynStream_t after) {
    CHECK_ERR(lynRecordEvent(before, middle));
    CHECK_ERR(lynStreamWaitEvent(after, middle));
}

/**
 * @brief 获取 YoloPostProcessInfo_t 对象
 *
 * @param carPlateDetect 检测模型
 * @param vdecOutInfo 解码器输出信息
 * @param apuBuffer 推理结果
 * @param deviceBoxInfo device侧的box信息
 * @return YoloPostProcessInfo_t
 */
YoloPostProcessInfo_t getPostInfo(ModelInfo carPlateDetect,
                                  lynVdecOutInfo_t vdecOutInfo, void *apuBuffer,
                                  lynBoxesInfo *deviceBoxInfo) {
    YoloPostProcessInfo_t post_info;
    memset(&post_info, 0, sizeof(YoloPostProcessInfo_t));
    post_info.is_pad_resize = 1;
    post_info.score_threshold = 0.25;
    post_info.nms_threshold = 0.45;
    post_info.nms_top_k = 500;
    post_info.width = carPlateDetect.width;
    post_info.height = carPlateDetect.height;
    post_info.ori_width = vdecOutInfo.width;
    post_info.ori_height = vdecOutInfo.height;
    post_info.output_tensor = apuBuffer;
    post_info.boxesInfo = deviceBoxInfo;
    return post_info;
}

class PlateRecognition {
   public:
    void stop();
    void start();
    void createStartThread();
    void plateDetect();
    void plateRecognize();
    void encodeProcess();
    void putFrame(lynFrame_t *frame);
    bool getFrame(lynFrame_t **frame, int timeout = 1000);
    void getVideoInfo(lynVdecOutInfo_t &videoInfo) const;

   public:
    std::thread m_thread;
    lynContext_t *context;
    ShowType showType = SaveFile;
    InputType inputType = File;
    lynPlugin_t postPlugin;
    lynPlugin_t osdPlugin;

    std::string plateDetectModelPath;
    std::string plateRecognizeModelPath;
    std::string argPostPluginPath;
    std::string argOsdPluginPath;
    std::string strVideoInputPath;
    std::string strVideoOutputPath;
    std::string channelName;

    VideoEncoder videoEncoder;
    VideoDecoder videoDecoder;
    lynVdecOutInfo_t vdecOutInfo;
    BlockQueue<lynFrame_t *> videoDecodeResult{5};    //解码结果
    BlockQueue<DetectionFrame> detectFrameQueue{5};   //车牌检测结果
    BlockQueue<lynFrame_t *> recognizeFrameQueue{5};  //车牌识别结果
    BlockQueue<lynFrame_t *> m_opencvQueue;
    FramePool *vdecFramePool = nullptr;
    FramePool *vencRecvFramePool = nullptr;
};

void PlateRecognition::putFrame(lynFrame_t *frame) {
    vdecFramePool->Push(frame);
}

bool PlateRecognition::getFrame(lynFrame_t **frame, int timeout) {
    bool ret = m_opencvQueue.take(*frame, timeout);
    if (!ret) {
        // std::cout << "failed to take frame to video!" << std::endl;
    }
    return ret;
}

void PlateRecognition::getVideoInfo(lynVdecOutInfo_t &videoInfo) const {
    videoInfo = vdecOutInfo;
}

void PlateRecognition::createStartThread() {
    m_thread = std::thread(&PlateRecognition::start, this);
}

void PlateRecognition::stop() {
    lynSetCurrentContext(*context);
    m_thread.join();
    delete vdecFramePool;
    delete vencRecvFramePool;
}

/**
 * @brief 启动车牌号码识别流程
 *
 */
void PlateRecognition::start() {
    CHECK_ERR(lynSetCurrentContext(*context));
    std::remove(strVideoOutputPath.c_str());
    int getFrameTimeout = (g_inputType == File) ? 1000 : 3000;
    videoDecoder.Init(strVideoInputPath, g_inputType, 150);
    videoDecoder.GetVdecOutInfo(&vdecOutInfo);
    vdecFramePool = new FramePool(vdecOutInfo.predictBufSize, POOL_SIZE);
    vencRecvFramePool = new FramePool(vdecOutInfo.predictBufSize, POOL_SIZE);
    if (ArgsParse::argShowType == ShowType::SaveFile) {
        videoEncoder.Init(vdecOutInfo, strVideoOutputPath, vdecFramePool,
                           vencRecvFramePool);
    }
    videoDecoder.Start(*context, std::ref(videoDecodeResult),
                       std::ref(*vdecFramePool));
    CHECK_ERR(lynPluginRegister(&postPlugin, argPostPluginPath.c_str()));
    CHECK_ERR(lynPluginRegister(&osdPlugin, argOsdPluginPath.c_str()));
    std::thread plateDetectThread(&PlateRecognition::plateDetect, this);
    std::thread plateRecognizeThread(&PlateRecognition::plateRecognize, this);
    std::thread encodeProcessThread(&PlateRecognition::encodeProcess, this);
    plateDetectThread.join();
    plateRecognizeThread.join();
    encodeProcessThread.join();
    videoDecoder.Stop();
    videoDecoder.UnInit();    
    CHECK_ERR(lynPluginUnregister(postPlugin));
    CHECK_ERR(lynPluginUnregister(osdPlugin));
}

/**
 * @brief 根据解码结果，检测车牌位置
 *
 */
void PlateRecognition::plateDetect() {
    CHECK_ERR(lynSetCurrentContext(*context));
    ModelInfo carPlateDetect{plateDetectModelPath.c_str()};
    IpeParamPlateRecognition ipeParam(carPlateDetect.width,
                                      carPlateDetect.height);
    lynStream_t ipeStream = nullptr;
    lynStream_t apuStream = nullptr;
    lynStream_t pluginStream = nullptr;
    lynStream_t postStream = nullptr;
    lynEvent_t ipeEvent = nullptr;
    lynEvent_t apuEvent = nullptr;
    lynEvent_t postEvent = nullptr;
    CHECK_ERR(lynCreateStream(&ipeStream));
    CHECK_ERR(lynCreateStream(&apuStream));
    CHECK_ERR(lynCreateStream(&pluginStream));
    CHECK_ERR(lynCreateStream(&postStream));
    CHECK_ERR(lynCreateEvent(&ipeEvent));
    CHECK_ERR(lynCreateEvent(&apuEvent));
    CHECK_ERR(lynCreateEvent(&postEvent));
    void *apuBuffer = nullptr;
    BufferPool apuBufferPool(
        carPlateDetect.outputSize * carPlateDetect.batchSize, POOL_SIZE);
    BatchMem *pIpeOutBuf = nullptr;
    BatchMemPool oIpeOutMemPool(false, carPlateDetect.inputSize,
                                carPlateDetect.batchSize, POOL_SIZE);
    FrameRater frameRate(channelName);
    frameRate.SetInterval(1);
    frameRate.SetTitle("detection model " + channelName);
    bool bEos = false;
    ipeParam.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                        LYN_PIX_FMT_NV12);
    bool resetResolution = false;

    while (!bEos) {
        lynFrame_t *pFrameTmp;
        if (!videoDecodeResult.take(pFrameTmp, getFrameTimeout)) continue;
        
        bEos = pFrameTmp->eos;
        if(bEos && videoDecoder.m_resolutionchange) {
            resetResolution = true;
            if (showType == ShowType::SaveFile)
            {
                //等待已有的encoder处理完毕
                std::this_thread::sleep_for(std::chrono::milliseconds(400));
                videoEncoder.UnInit();
            }
            vdecFramePool->Push(pFrameTmp);
            bEos = false;
            continue;
        }
        if(resetResolution)
        {
            std::cout << strVideoInputPath << " resolution changed!" << std::endl;
            videoDecoder.GetVdecOutInfo(&vdecOutInfo);
            ipeParam.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                        LYN_PIX_FMT_NV12);
            if (showType == ShowType::SaveFile) {
                vencRecvFramePool->ResetSize(vdecOutInfo.predictBufSize);
                videoEncoder.Init(vdecOutInfo, strVideoOutputPath, vdecFramePool,
                           vencRecvFramePool);
            }
            resetResolution = false;
        }

        pIpeOutBuf = oIpeOutMemPool.GetBatchMem();
        apuBuffer = apuBufferPool.Pop();
        // ipe
        ipeParam.CalcParam(ipeStream, pFrameTmp->data,
                           pIpeOutBuf->GetElement());
        CHECK_ERR(lynRecordEvent(ipeStream, ipeEvent));
        CHECK_ERR(lynStreamWaitEvent(apuStream, ipeEvent));
        // 推理
        CHECK_ERR(lynExecuteModelAsync(apuStream, carPlateDetect.model,
                                       pIpeOutBuf->Buffer(), apuBuffer,
                                       carPlateDetect.batchSize));
        CHECK_ERR(lynRecordEvent(apuStream, apuEvent));
        CHECK_ERR(lynStreamWaitEvent(pluginStream, apuEvent));
        lynBoxesInfo *deviceBoxInfo = nullptr;
        CHECK_ERR(lynMalloc((void **)&deviceBoxInfo, sizeof(lynBoxesInfo)));
        auto post_info =
            getPostInfo(carPlateDetect, vdecOutInfo, apuBuffer, deviceBoxInfo);
        // 后处理，获取boxInfo
        lynPluginRunAsync(pluginStream, postPlugin, "carPlateDetectPostProcess",
                          &post_info, sizeof(YoloPostProcessInfo_t));
        CHECK_ERR(lynRecordEvent(pluginStream, postEvent));
        CHECK_ERR(lynStreamWaitEvent(postStream, postEvent));
        // 将后处理结果和frame放入detectFrameQueue，供识别线程使用
        addCallback(postStream, [this, pFrameTmp, deviceBoxInfo] {
            auto hostBoxInfo = new lynBoxesInfo();
            lynMemcpy(hostBoxInfo, deviceBoxInfo, sizeof(lynBoxesInfo),
                      ServerToClient);
            DetectionFrame detectionPara;
            detectionPara.hostBoxInfo = hostBoxInfo;
            detectionPara.deviceBoxInfo = deviceBoxInfo;
            detectionPara.frame = pFrameTmp;
            detectFrameQueue.put(detectionPara);
        });
        // 释放内存
        addAsyncCallback(postStream, [&apuBufferPool, apuBuffer] {
            apuBufferPool.Push(apuBuffer);
        });
        addAsyncCallback(postStream, [&oIpeOutMemPool, pIpeOutBuf] {
            oIpeOutMemPool.PutBatchMem(pIpeOutBuf);
        });
        addAsyncCallback(postStream, [&frameRate] { frameRate.AddFrame(1); });
    }

    CHECK_ERR(lynSynchronizeStream(ipeStream));
    CHECK_ERR(lynSynchronizeStream(apuStream));
    CHECK_ERR(lynSynchronizeStream(pluginStream));
    CHECK_ERR(lynSynchronizeStream(postStream));
    CHECK_ERR(lynDestroyEvent(ipeEvent));
    CHECK_ERR(lynDestroyEvent(apuEvent));
    CHECK_ERR(lynDestroyEvent(postEvent));
    CHECK_ERR(lynDestroyStream(ipeStream));
    CHECK_ERR(lynDestroyStream(apuStream));
    CHECK_ERR(lynDestroyStream(pluginStream));
    CHECK_ERR(lynDestroyStream(postStream));
    carPlateDetect.UnLoadModel();
}

/**
 * @brief 根据车牌检测的结果，提取车牌图片，执行号码识别
 *
 */
void PlateRecognition::plateRecognize() {
    CHECK_ERR(lynSetCurrentContext(*context));

    ModelInfo carPlateRecognition(plateRecognizeModelPath.c_str());
    BatchMem *pIpeOutBuf = nullptr;
    BatchMemPool oIpeOutMemPool(false, carPlateRecognition.inputSize,
                                carPlateRecognition.batchSize, POOL_SIZE * 20);
    void *apuBuffer;
    BufferPool apuBufferPool(
        carPlateRecognition.outputSize * carPlateRecognition.batchSize,
        POOL_SIZE * 20);
    IpeParamPlateRecognition ipeParam(carPlateRecognition.width,
                                      carPlateRecognition.height);
    ipeParam.SetImgInfo(vdecOutInfo.width, vdecOutInfo.height,
                        LYN_PIX_FMT_NV12);
    lynEvent_t ipeEvent = nullptr;
    lynEvent_t apuEvent = nullptr;
    lynEvent_t osdEvent = nullptr;
    lynStream_t ipeStream = nullptr;
    lynStream_t apuStream = nullptr;
    lynStream_t osdStream = nullptr;
    // lynStream_t postStream = nullptr;
    CHECK_ERR(lynCreateStream(&ipeStream));
    CHECK_ERR(lynCreateStream(&apuStream));
    CHECK_ERR(lynCreateStream(&osdStream));
    CHECK_ERR(lynCreateEvent(&ipeEvent));
    CHECK_ERR(lynCreateEvent(&apuEvent));
    FrameRater frameRate(channelName);
    frameRate.SetInterval(1);
    frameRate.SetTitle("recognize model");
    bool bEos = false;
    while (!bEos) {
        DetectionFrame detectionFrame;
        if (!detectFrameQueue.take(detectionFrame, 60)) continue;
        bEos = detectionFrame.frame->eos;
        // 将apuBuffer和ipeBuffer暂存
        std::vector<void *> apuBuffers;
        std::vector<BatchMem *> ipeBuffers;

        for (size_t i = 0; i < detectionFrame.hostBoxInfo->boxesNum; ++i) {
            auto plateRect = get_rect(detectionFrame.hostBoxInfo->boxes[i],
                                      vdecOutInfo.width, vdecOutInfo.height);
            if (!plateRect.is_size_valid()) continue;
            pIpeOutBuf = oIpeOutMemPool.GetBatchMem();
            // ipe
            ipeParam.CalcParam(ipeStream, plateRect, detectionFrame.frame->data,
                               pIpeOutBuf->GetElement());
            CHECK_ERR(lynRecordEvent(ipeStream, ipeEvent));
            CHECK_ERR(lynStreamWaitEvent(apuStream, ipeEvent));
            apuBuffer = apuBufferPool.Pop();
            apuBuffers.push_back(apuBuffer);
            ipeBuffers.push_back(pIpeOutBuf);
            // 推理
            CHECK_ERR(lynExecuteModelAsync(apuStream, carPlateRecognition.model,
                                           pIpeOutBuf->Buffer(), apuBuffer,
                                           carPlateRecognition.batchSize));
        }

        // 如果这一帧中有车牌信息
        if (apuBuffers.size() > 0) {
            // 将推理结果的地址复制到device侧
            void *apuBufferDevice;
            lynMalloc(&apuBufferDevice,
                      apuBuffers.size() * sizeof(apuBuffers[0]));
            lynMemcpy(apuBufferDevice, apuBuffers.data(),
                      apuBuffers.size() * sizeof(apuBuffers[0]),
                      ClientToServer);

            CHECK_ERR(lynRecordEvent(apuStream, apuEvent));
            CHECK_ERR(lynStreamWaitEvent(osdStream, apuEvent));

            PlateRecogPostProcessInfo_t postInfo;
            postInfo.imgH = vdecOutInfo.height;
            postInfo.imgW = vdecOutInfo.width;
            postInfo.targetNum = apuBuffers.size();
            postInfo.boxesInfo = detectionFrame.deviceBoxInfo;
            postInfo.output_tensor = apuBufferDevice;

            lynPluginRunAsync(osdStream, postPlugin, "carPlateRecogPostProcess", &postInfo,
                              sizeof(PlateRecogPostProcessInfo_t));

            lynDrawBoxAndTextPara para;
            para.imgData = detectionFrame.frame->data;
            para.imgFmt = LYN_PIX_FMT_NV12;
            para.imgW = vdecOutInfo.width;
            para.imgH = vdecOutInfo.height;
            para.boxesInfo = detectionFrame.deviceBoxInfo;
            para.boxColor = DRAW_COLOR_BLUE;
            para.boxThick = DRAW_THICK_2;
            para.fontSize = FONT_SIZE_24;
            para.fontColor = DRAW_COLOR_BLUE;
            CHECK_ERR(lynPluginRunAsync(osdStream, osdPlugin,
                                        "lynDrawBoxAndText", &para,
                                        sizeof(para)));

            // 释放apuBuffer和ipeBuffer的内存
            addAsyncCallback(osdStream, [apuBuffers, &apuBufferPool]() {
                for (auto apu : apuBuffers) {
                    apuBufferPool.Push(apu);
                }
            });
            addAsyncCallback(osdStream, [ipeBuffers, &oIpeOutMemPool]() {
                for (auto ipeBuffer : ipeBuffers) {
                    oIpeOutMemPool.PutBatchMem(ipeBuffer);
                }
            });
            addAsyncCallback(osdStream,
                             [apuBufferDevice]() { lynFree(apuBufferDevice); });
        }
        addAsyncCallback(osdStream, [this, detectionFrame]() {
            recognizeFrameQueue.put(detectionFrame.frame);
        });
        addAsyncCallback(osdStream, [detectionFrame]() {
            delete detectionFrame.hostBoxInfo;
            lynFree(detectionFrame.deviceBoxInfo);
        });
    }

    CHECK_ERR(lynSynchronizeStream(ipeStream));
    CHECK_ERR(lynSynchronizeStream(apuStream));
    CHECK_ERR(lynSynchronizeStream(osdStream));
    CHECK_ERR(lynDestroyEvent(ipeEvent));
    CHECK_ERR(lynDestroyEvent(apuEvent));
    CHECK_ERR(lynDestroyStream(ipeStream));
    CHECK_ERR(lynDestroyStream(apuStream));
    CHECK_ERR(lynDestroyStream(osdStream));
    carPlateRecognition.UnLoadModel();
}

/**
 * @brief 根据推理和后处理结果，显示或者保存视频
 *
 */
void PlateRecognition::encodeProcess() {
    CHECK_ERR(lynSetCurrentContext(*context));
    
    bool bEos = false;
    while (!bEos) {
        lynFrame_t *frame;
        if (!recognizeFrameQueue.take(frame, 60)) continue;
        bEos = frame->eos;
        if (ArgsParse::argShowType == ShowType::SaveFile) {
            videoEncoder.EncodeImage(frame);
        } else if (ArgsParse::argShowType == ShowType::DirectShow) {
            m_opencvQueue.put(frame);
        }
    }
    if (ArgsParse::argShowType == ShowType::SaveFile) {
        videoEncoder.UnInit();
    }
}
#endif
