/**
 * @file Detector.cpp
 * @brief 识别器的实现 通过输入模型路径实例化识别器
 *        识别器通过导入模型并在OPENVINO上进行部署，分别对传入
 *        影像流进行预处理、异步推理、后处理等过程，最终得到检
 *        测到的对象的预测标签、预测位置、置信度等数据
 *        只开放推理、以及绘制Bounding Box的接口
 * @author Rosen (1018477962@qq.com)
 * @version 1.0
 * @date 2021-09-26
 * 
 * @copyright Copyright (c) 2021 by Rosen.
 */
#include "Detector.h"
#include "Parameter.h"

using namespace cv;

/**
 * @brief Construct a new Detector :: Detector object
 * 
 * @param model_path 模型路径
 */
Detector::Detector(const std::string &model_path)
{
    this->_confThres = detectParam.ConfThres;
    this->_nmsThres = detectParam.NMSThres;
    this->_itemSize = detectParam.ItemSize;
    // 随机颜色生成器
    this->_colors = randColor(3);

    Core ie;
    // 加载xml模型
    CNNNetwork cnnNetwork = ie.ReadNetwork(model_path);
    cnnNetwork.setBatchSize(1);
    // 输入设置
    InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
    InputInfo::Ptr &input = inputInfo.begin()->second;
    this->_inputName = inputInfo.begin()->first;
    // 设置推论过程中的计算精确度
    input->setPrecision(Precision::FP32);
    input->getInputData()->setLayout(Layout::NCHW);
    ICNNNetwork::InputShapes inputShapes = cnnNetwork.getInputShapes();
    cnnNetwork.reshape(inputShapes);
    //输出设置
    _outputInfo = OutputsDataMap(cnnNetwork.getOutputsInfo());
    for (auto &output : _outputInfo)
    {
        output.second->setPrecision(Precision::FP32);
    }
    _network = ie.LoadNetwork(cnnNetwork, "CPU");
}

/**
 * @brief 预处理
 * 
 * @param frame 输入图像
 * @param size  resize尺寸
 */
void Detector::_preProcess(const Mat &frame, const Size &size)
{
    assert(!frame.empty());
    resize(frame, this->_copeFrame, size);
    /**
     * @brief OpenCv默认读取图片的储存格式为BGR格式，但是训练时采用
     *        的是RGB格式，所以在这里需要进行转换，再推理
     */
    cvtColor(this->_copeFrame, this->_copeFrame, COLOR_BGR2RGB);
    const int &imgWidth = size.width;
    const int &imgHeight = size.height;
    const int &channels = 3;
    // 创建推理需求指针
    this->_inferRequest = this->_network.CreateInferRequestPtr();
    Blob::Ptr frameBlob = this->_inferRequest->GetBlob(this->_inputName);
    InferenceEngine::LockedMemory<void> blobMapped = InferenceEngine::as<InferenceEngine::MemoryBlob>(frameBlob)->wmap();
    float *blobData = blobMapped.as<float *>();
    // 对图像全通道进行归一化
    for (int i = 0; i < imgWidth; i++)
    {
        for (int j = 0; j < imgHeight; j++)
        {
            for (int k = 0; k < channels; k++)
                blobData[imgWidth * imgHeight * k + i * imgHeight + j] = float(this->_copeFrame.at<Vec3b>(i, j)[k]) / 255.0f;
        }
    }
}

/**
 * @brief 推论函数 公开接口
 * 
 * @param frame 输入图像
 * @param size  网络输入尺寸
 * @return std::vector<Object> 检测到的物体
 */
std::vector<Object> Detector::inference(const Mat &frame, const Size &size, Mat &depth)
{
    this->_frame = frame;
    this->_depth = depth;
    // 图像预处理
    this->_preProcess(frame, size);
    // // OpenVINO的同步操作
    // this->_inferRequest->Infer();

    // OpenVINO的异步操作
    this->_inferRequest->StartAsync();
    this->_inferRequest->Wait(IInferRequest::WaitMode::RESULT_READY);
    // 图像后处理
    this->_objects = this->_postProcess();
    return this->_objects;
}

/**
 * @brief 后处理
 * @param depthImg 深度图像
 * @return std::vector<Object> 识别到的物体 
 */
std::vector<Object> Detector::_postProcess()
{
    this->_originRect.clear();
    this->_originRectConf.clear();
    this->_originTarget.clear();
    std::vector<Object> objects;
    int index = 0;
    // 对每个输出的Bounding Box进行处理
    for (auto &output : this->_outputInfo)
    {
        int &anchorSize = detectParam.AnchorSize[index];
        auto outputName = output.first;
        Blob::Ptr blob = this->_inferRequest->GetBlob(outputName);
        // Bounding Box处理
        this->_boxProcess(blob, anchorSize);
        ++index;
    }
    // 获得最终结果
    std::vector<int> final_Id;
    // 非最大值抑制
    dnn::NMSBoxes(this->_originRect, this->_originRectConf, this->_confThres, this->_nmsThres, final_Id);
    //根据final_id获取最终结果
    for (size_t i = 0; i < final_Id.size(); ++i)
    {
        // Bounding Box置信度
        float &conf = this->_originRectConf[final_Id[i]];
        // 标签
        std::string &target = this->_originTarget[final_Id[i]];
        // Bounding Box
        Rect boundingBox = this->_originRect[final_Id[i]];

        // Bounding Box 映射
        Rect mappingBox = this->_boxMapping(boundingBox);

        // 中心点
        float x = mappingBox.x + mappingBox.width / 2;
        float y = mappingBox.y + mappingBox.height / 2;
        cv::Point2f center = cv::Point2f(x, y);
        // 深度
        uint16_t depth = 0;
        if (!this->_depth.empty())
            depth = this->_depth.at<uint16_t>(y, x);
        else
            depth = 0;
        objects.push_back(Object{conf, target, mappingBox, center, depth});
    }
    return objects;
}

/**
 * @brief 根据得到的网络输出进行筛除，挑选最佳Bounding Box
 * 
 * @param blob 
 * @param netGrid        anchor的边长
 */
void Detector::_boxProcess(const Blob::Ptr &blob, const int &netGrid)
{
    std::vector<int> anchors = this->_getAnchors(netGrid);
    LockedMemory<const void> blobMaped = as<MemoryBlob>(blob)->rmap();
    // 以float的格式读取出
    const float *outputBlob = blobMaped.as<float *>();
    /**
     * @note itemSize的数值等于_itemSize检测物体的数量 + 5
     */
    size_t itemSize = this->_itemSize + 5;
    size_t anchorTypeNum = 3;
    for (size_t n = 0; n < anchorTypeNum; n++)
        for (int i = 0; i < netGrid; i++)
            for (int j = 0; j < netGrid; j++)
            {
                float x, y, w, h;
                // 读取Bounding Box的置信度
                double boxProb = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + 4];
                boxProb = sigmoid(boxProb);

                // 此处读到的是中心点坐标,转化为角点坐标
                x = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + 0];
                y = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + 1];
                w = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + 2];
                h = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + 3];

                //对于Bounding Box置信度小于阈值的边框,不关心其他数值,不进行计算减少计算量
                if (boxProb < this->_confThres)
                    continue;

                // 选取置信度最高的item作为分类结果，并获取对应索引
                double maxProb = 0.f;
                int maxIndex = 0;
                for (size_t k = 5; k < itemSize; k++)
                {
                    // 得到Bounding Box置信度
                    double itemProb = outputBlob[n * netGrid * netGrid * itemSize + i * netGrid * itemSize + j * itemSize + k];
                    itemProb = sigmoid(itemProb);
                    if (itemProb > maxProb)
                    {
                        maxProb = itemProb;
                        maxIndex = k - 5;
                    }
                }
                // 总置信度为Bounding Box置信度与最大分类置信度的乘积
                double conf = boxProb * maxProb;
                // 二次筛除
                if (conf < this->_confThres)
                    continue;

                // 计算Bounding Box的中心点和宽高
                x = (sigmoid(x) * 2 - 0.5 + j) * 640.0f / netGrid;
                y = (sigmoid(y) * 2 - 0.5 + i) * 640.0f / netGrid;
                w = pow(sigmoid(w) * 2, 2) * anchors[n * 2];
                h = pow(sigmoid(h) * 2, 2) * anchors[n * 2 + 1];

                // 获得Bounding Box左上角的坐标
                float topLeft_x = x - w / 2;
                float topLeft_y = y - h / 2;
                // 创建目标边界框
                Rect resultBox = Rect(round(topLeft_x), round(topLeft_y), round(w), round(h));
                this->_originRect.push_back(resultBox);
                this->_originRectConf.push_back(conf);
                // 根据索引获取target
                std::string target = datasetParam.className[maxIndex];
                this->_originTarget.push_back(target);
            }
}

/**
 * @brief Bounding Box映射
 * @param  copeBox 原始Bounding Box
 * @return cv::Rect 映射后的Bounding Box
 */
cv::Rect Detector::_boxMapping(const cv::Rect &copeBox)
{
    float x, y, w, h;

    Size originSize = this->_frame.size();
    Size copeSize = this->_copeFrame.size();
    // 计算宽高缩放比例
    float widthRatio = (float)originSize.width / copeSize.width;
    float heightRatio = (float)originSize.height / copeSize.height;

    // 范围限制
    x = copeBox.x > 0 ? copeBox.x * widthRatio : 0;
    y = copeBox.y > 0 ? copeBox.y * heightRatio : 0;
    // 对原始Box进行宽高比例映射
    w = copeBox.width * widthRatio;
    h = copeBox.height * heightRatio;
    // 范围限制
    w = (w + x > cameraParam.imgWidth ? cameraParam.imgWidth - x - 1 : w);
    h = (h + y > cameraParam.imgHeight ? cameraParam.imgHeight - y - 1 : h);
    Rect2f mappingBox = Rect2f(x, y, w, h);
    return mappingBox;
}

/**
 * @brief 获得三种尺寸Anchors的长宽
 * 
 * @param netGrid 目标检测区域的边长
 * @return std::vector<int> 对应感受野Anchors的尺寸
 */
std::vector<int> Detector::_getAnchors(const int &netGrid)
{
    std::vector<int> anchors(6);
    int a_1[6] = {10, 13, 16, 30, 33, 23};
    int a_2[6] = {30, 61, 62, 45, 59, 119};
    int a_3[6] = {116, 90, 156, 198, 373, 326};
    // 曾经采坑，这里的anchor顺序与模型有关，可以看xml模型中的储存顺序调整
    if (netGrid == detectParam.AnchorSize[0])
    {
        anchors.insert(anchors.begin(), a_2, a_2 + 6);
    }
    else if (netGrid == detectParam.AnchorSize[1])
    {
        anchors.insert(anchors.begin(), a_3, a_3 + 6);
    }
    else if (netGrid == detectParam.AnchorSize[2])
    {
        anchors.insert(anchors.begin(), a_1, a_1 + 6);
    }
    return anchors;
}

/**
 * @brief 绘制Bounding Box 并显示
 */
void Detector::drawBox(cv::Mat &frame)
{
    int x, y, w, h;
    float fontSize = frame.size().width / 1000.f;
    int thickness = frame.size().width / 800.f;
    for (auto object : this->_objects)
    {
        int index = 0;
        for (size_t i = 0; i < datasetParam.className.size(); i++)
        {
            if (object.target == datasetParam.className[i])
                index = i;
        }
        x = object.rect.x;
        y = object.rect.y;
        w = object.rect.width;
        h = object.rect.height;
        Rect Box(x, y, w, h);
        // 在16位深度图像上绘制白线，像素值最高为65535
        rectangle(this->_depth, Box, cv::Scalar((int)pow(2, 16) - 1), thickness);
        rectangle(frame, Box, this->_colors[index], thickness);
        // target + prob(%.2f)
        std::string text = object.target + " " + std::to_string(object.prob).substr(0, 4);
        putText(_depth, text, Point(x, y), FONT_HERSHEY_TRIPLEX, fontSize, cv::Scalar((int)pow(2, 16) - 1), thickness);
        putText(frame, text, Point(x, y), FONT_HERSHEY_TRIPLEX, fontSize, this->_colors[index], thickness);
    }
}

/**
 * @brief  颜色随机生成器
 * @param  steps    颜色种类数
 * @return std::vector<Scalar> 返回随机颜色
 */
std::vector<Scalar> randColor(const size_t &steps)
{
    std::vector<Scalar> colors;
    srand(time(NULL));
    RNG rng(rand());
    for (size_t i = 0; i < steps; i++)
    {
        int icolor = (unsigned)rng;
        Scalar randColor(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
        colors.push_back(randColor);
    }
    return colors;
}