/*
 * Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "fairv2.h"
#include "boost/filesystem.hpp"
#include "boost/algorithm/string.hpp"
#include "MxBase/Maths/FastMath.h"
#include "MxBase/PostProcessBases/ClassPostProcessBase.h"
#include "fstream"
#include "MxBase/Log/Log.h"
#include "MxBase/CV/ObjectDetection/Nms/Nms.h"
#include "MxTools/Proto/MxpiDataType.pb.h"
#include "MxTools/PluginToolkit/buffer/MxpiBufferManager.h"
#include <typeinfo>
#define USE_200DK
using namespace std;
using namespace MxBase;
using namespace MxTools;

namespace
{
    uint32_t VIDEO_WIDTH = 1280;
    uint32_t VIDEO_HEIGHT = 720;
    // confidence thresh for tracking
    const float CONF_THRES = 0.35;
    auto uint8Deleter = [](uint8_t *p) {};
    const float count_center = 2.0;
    const float RESIZE_WIDTH = 1088.0;
    const float RESIZE_HEIGHT = 608.0;
    const uint32_t YUV_BYTE_NU = 3;
    const uint32_t YUV_BYTE_DE = 2;
    const uint32_t CONST_NUMBER_2 = 2;
    const uint32_t CONST_NUMBER_3 = 3;
    const uint32_t CONST_NUMBER_4 = 4;
}

FairMOT::FairMOT(const V2Param &v2Param)
{
    deviceId = v2Param.deviceId;
    modelPath = v2Param.modelPath;
    VideoDecodeConfig config;
    config.width = VIDEO_WIDTH;
    config.height = VIDEO_HEIGHT;
    APP_ERROR ret;

    // global init
    ret = MxInit();
    if (ret != APP_ERR_OK)
    {
        LogError << "MxInit failed, ret=" << ret << ".";
    }

    // imageProcess init
    imageProcessorDptr = std::make_shared<MxBase::ImageProcessor>(deviceId);
    if (imageProcessorDptr == nullptr)
    {
        LogError << "imageProcessorDptr nullptr";
    }

    // model init
    modelDptr = std::make_shared<MxBase::Model>(modelPath, deviceId);
    if (modelDptr == nullptr)
    {
        LogError << "modelDptr nullptr";
    }
};

struct object_rect
{
    int x;
    int y;
    int width;
    int height;
};

APP_ERROR FairMOT::Infer(Image &resizeImage, std::vector<Tensor> &outputs)
{
    APP_ERROR ret;
    // !move image to device!
    Tensor tensorImg = resizeImage.ConvertToTensor();
    ret = tensorImg.ToDevice(deviceId);
    if (ret != APP_ERR_OK)
    {
        LogError << "ToDevice failed, ret=" << ret;
        return ret;
    }

    // make infer input
    std::vector<Tensor> inputs = {tensorImg};
    // do infer
    outputs = modelDptr->Infer(inputs);
    LogInfo << "Outputs len=" << outputs.size();
// !move result to host!
#ifndef USE_200DK
    for (size_t i = 0; i < outputs.size(); i++)
    {
        outputs[i].ToHost();
    }
#endif

    return APP_ERR_OK;
};

APP_ERROR FairMOT::PostProcessone(std::vector<Tensor> &outputs, std::vector<std::vector<ObjectInfo>> &objectInfos)
{
    LogInfo << "FairmotPostProcess::Process start";
    // Compute objects
    std::vector<std::vector<float>> ID_feature;
    vector<ResizedImageInfo> resizedImageInfos;
    int flag = -1;

    auto inputs = outputs;
    // Judge whether the input from tensorinfer is empty
    if (outputs.size() == 0 && flag == -1)
    {
        flag = 0;
    }
    auto shape = outputs[0].GetShape();
    if (shape.size() == 0 && flag == -1)
    {
        flag = 0;
    }
    // @param featLayerData  Vector of 4 output feature data
    std::vector<std::shared_ptr<void>> featLayerData = {};
    std::vector<std::vector<size_t>> featLayerShapes = {};
    for (uint32_t j = 0; j < outputs.size(); j++)
    {
        auto dataPtr = (uint8_t *)outputs[j].GetData();
        std::shared_ptr<void> tmpPointer;
        tmpPointer.reset(dataPtr, uint8Deleter);
        // featLayerData stores the head address of 4 output feature data
        featLayerData.push_back(tmpPointer);
        shape = outputs[j].GetShape();
        std::vector<size_t> featLayerShape = {};
        size_t temp = 1;
        featLayerShape.push_back(temp);
        for (auto s : shape)
        {
            featLayerShape.push_back((size_t)s);
        }
        // featLayerShapes stores the shapes of 4 output feature data
        featLayerShapes.push_back(featLayerShape);
    }
    // tensors[0] matchs hm
    // tensors[1] matchs wh
    // tensors[2] matchs reg
    // tensors[3] matchs id_feature
    // Get the head address of hm
    std::shared_ptr<void> hm_addr = featLayerData[0];
    // Create a vector container XY to store coordinate information
    std::vector<std::vector<int>> XY;
    for (uint32_t i = 0; i < featLayerShapes[0][1] * featLayerShapes[0][CONST_NUMBER_2]; i++)
    {
        // Compared with the threshold CONF_THRES to obtain coordinate information
        if (static_cast<float *>(hm_addr.get())[i] > CONF_THRES)
        {
            std::vector<int> xy;
            int x = i / featLayerShapes[0][CONST_NUMBER_2];
            int y = i - featLayerShapes[0][CONST_NUMBER_2] * x;
            xy.push_back(x);
            xy.push_back(y);
            XY.push_back(xy);
        }
    }
    // Output 1 when no pedestrian is detected
    if (XY.size() == 0 && flag == -1)
    {
        flag = 1;
    }
    // Create a vector container scores to store the information in the corresponding coordinate XY in hm
    std::vector<float> scores;
    for (uint32_t i = 0; i < XY.size(); i++)
    {
        scores.push_back(static_cast<float *>(hm_addr.get())[XY[i][0] * featLayerShapes[0][CONST_NUMBER_2] + XY[i][1]]);
    }
    // Get the head address of wh and reg
    std::shared_ptr<void> wh_addr = featLayerData[1];
    std::shared_ptr<void> reg_addr = featLayerData[CONST_NUMBER_2];
    // WH: n*4
    std::vector<std::vector<float>> WH;
    for (int i = 0; i < XY.size(); i++)
    {
        std::vector<float> wh;
        // featLayerShapes[1][3]: the third dimension of tensor[2]
        for (int j = 0; j < featLayerShapes[1][CONST_NUMBER_3]; j++)
        {
            wh.push_back(static_cast<float *>(wh_addr.get())[(XY[i][0] * featLayerShapes[0][CONST_NUMBER_2] + XY[i][1]) * featLayerShapes[1][CONST_NUMBER_3] + j]);
        }
        WH.push_back(wh);
    }
    // REG: n*2
    std::vector<std::vector<float>> REG;
    for (int i = 0; i < XY.size(); i++)
    {
        std::vector<float> reg;
        // featLayerShapes[2][3]: the third dimension of tensor[1]
        for (int j = 0; j < featLayerShapes[CONST_NUMBER_2][CONST_NUMBER_3]; j++)
        {
            reg.push_back(static_cast<float *>(reg_addr.get())[(XY[i][0] * featLayerShapes[0][CONST_NUMBER_2] + XY[i][1]) * featLayerShapes[CONST_NUMBER_2][CONST_NUMBER_3] + j]);
        }
        REG.push_back(reg);
    }
    // ID_feature: n*128  no use
    std::shared_ptr<void> id_addr = featLayerData[CONST_NUMBER_3];
    for (int i = 0; i < XY.size(); i++)
    {
        // featLayerShapes[3][3]: the third dimension of tensor[0]
        std::vector<float> id_feature;
        for (int j = 0; j < featLayerShapes[CONST_NUMBER_3][CONST_NUMBER_3]; j++)
        {
            id_feature.push_back(static_cast<float *>(id_addr.get())[(XY[i][0] * featLayerShapes[0][CONST_NUMBER_2] + XY[i][1]) * featLayerShapes[CONST_NUMBER_3][CONST_NUMBER_3] + j]);
        }
        ID_feature.push_back(id_feature);
    }
    // XY_f changes the data in XY from int to float
    std::vector<std::vector<float>> XY_f;
    for (int i = 0; i < XY.size(); i++)
    {
        std::vector<float> xy_f;
        xy_f.push_back(XY[i][0]);
        xy_f.push_back(XY[i][1]);
        XY_f.push_back(xy_f);
    }
    for (int i = 0; i < XY_f.size(); i++)
    {
        XY_f[i][1] = XY_f[i][1] + REG[i][0];
        XY_f[i][0] = XY_f[i][0] + REG[i][1];
    }
    // dets: n*6
    std::vector<std::vector<float>> dets;
    for (int i = 0; i < XY.size(); i++)
    {
        std::vector<float> det;
        det.push_back(XY_f[i][1] - WH[i][0]);
        det.push_back(XY_f[i][0] - WH[i][1]);
        det.push_back(XY_f[i][1] + WH[i][CONST_NUMBER_2]);
        det.push_back(XY_f[i][0] + WH[i][CONST_NUMBER_3]);
        det.push_back(scores[i]);
        det.push_back(0);
        dets.push_back(det);
    }
    // Width and height of initial video
    int width = 1280; // resizedImageInfos[0].widthOriginal
    int height = 720; // heightOriginal
    // Scaled width and height
    int inp_height = 608; // resizedImageInfos[0].heightResize
    int inp_width = 1088; // resizedImageInfos[0].widthResize
    // Create a vector container center to store the center point of the original picture
    std::vector<float> c;
    int half = 2;
    c.push_back(width / half);
    c.push_back(height / half);
    std::vector<float> center(c);
    // max_dets
    float scale = 0;
    scale = std::max(float(inp_width) / float(inp_height) * height, float(width)) * 1.0;
    std::vector<float> Scale;
    Scale.push_back(scale);
    Scale.push_back(scale);
    // output_size
    int down_ratio = 4;
    int h = inp_height / down_ratio;
    int w = inp_width / down_ratio;
    std::vector<int> output_size;
    output_size.push_back(w);
    output_size.push_back(h);
    int rot = 0;
    std::vector<float> shift(CONST_NUMBER_2, 0);
    int inv = 1;
    // Input of get_affine_transform used to calculate trans: center, scale, rot, output_size, shift, inv
    // get_affine_transform
    std::vector<float> scale_tmp(Scale);
    float src_w = scale_tmp[0];
    int dst_w = output_size[0];
    int dst_h = output_size[1];

    float pi = 3.1415926;
    int dir = 180;
    float rot_rad = pi * rot / dir;
    const float aim = -0.5;
    const float zaim = 0.5;
    std::vector<float> src_point;
    src_point.push_back(0);
    src_point.push_back(src_w * (aim));

    float sn = sin(rot_rad);
    float cs = cos(rot_rad);
    // get_dir
    // src_dir and dst_dir are used to calculate trans
    std::vector<float> src_dir(CONST_NUMBER_2, 0);
    src_dir[0] = src_point[0] * cs - src_point[1] * sn;
    src_dir[1] = src_point[0] * sn + src_point[1] * cs;
    std::vector<float> dst_dir;
    dst_dir.push_back(0);
    dst_dir.push_back(dst_w * (aim));
    // src and dst are calculated to calculate trans
    float src[3][2] = {0};
    float dst[3][2] = {0};
    src[0][0] = center[0] + scale_tmp[0] * shift[0];
    src[0][1] = center[1] + scale_tmp[1] * shift[1];
    src[1][0] = center[0] + src_dir[0] + scale_tmp[0] * shift[0];
    src[1][1] = center[1] + src_dir[1] + scale_tmp[1] * shift[1];
    dst[0][0] = dst_w * zaim;
    dst[0][1] = dst_h * zaim;
    dst[1][0] = dst_w * zaim + dst_dir[0];
    dst[1][1] = dst_h * zaim + dst_dir[1];
    // get_3rd_point
    std::vector<float> direct;
    direct.push_back(src[0][0] - src[1][0]);
    direct.push_back(src[0][1] - src[1][1]);
    src[CONST_NUMBER_2][0] = src[1][0] - direct[1];
    src[CONST_NUMBER_2][1] = src[1][1] + direct[0];
    // get_3rd_point
    direct[0] = dst[0][0] - dst[1][0];
    direct[1] = dst[0][1] - dst[1][1];
    dst[CONST_NUMBER_2][0] = dst[1][0] - direct[1];
    dst[CONST_NUMBER_2][1] = dst[1][1] + direct[0];

    // change data in src and dst to point2f format
    cv::Point2f SRC[3];
    cv::Point2f DST[3];
    SRC[0] = cv::Point2f(src[0][0], src[0][1]);
    SRC[1] = cv::Point2f(src[1][0], src[1][1]);
    SRC[CONST_NUMBER_2] = cv::Point2f(src[CONST_NUMBER_2][0], src[CONST_NUMBER_2][1]);
    DST[0] = cv::Point2f(dst[0][0], dst[0][1]);
    DST[1] = cv::Point2f(dst[1][0], dst[1][1]);
    DST[CONST_NUMBER_2] = cv::Point2f(dst[CONST_NUMBER_2][0], dst[CONST_NUMBER_2][1]);
    // Calculate trans
    cv::Mat trans(CONST_NUMBER_2, CONST_NUMBER_3, CV_64FC1);
    if (inv == 1)
    {
        trans = cv::getAffineTransform(DST, SRC);
    }
    else
    {
        trans = cv::getAffineTransform(SRC, DST);
    }
    // Get data from mat type trans to array Trans
    float Trans[CONST_NUMBER_2][CONST_NUMBER_3];
    for (int i = 0; i < CONST_NUMBER_2; i++)
    {
        for (int j = 0; j < CONST_NUMBER_3; j++)
        {
            Trans[i][j] = trans.at<double>(i, j);
        }
    }
    // affine_transform
    // Calculate the coordinates of bbox_top_left x, y
    for (int i = 0; i < dets.size(); i++)
    {
        float new_pt[CONST_NUMBER_3] = {dets[i][0], dets[i][1], 1};
        dets[i][0] = Trans[0][0] * new_pt[0] + Trans[0][1] * new_pt[1] + Trans[0][CONST_NUMBER_2] * new_pt[CONST_NUMBER_2];
        dets[i][1] = Trans[1][0] * new_pt[0] + Trans[1][1] * new_pt[1] + Trans[1][CONST_NUMBER_2] * new_pt[CONST_NUMBER_2];
    }
    // Calculate the coordinates of bbox_bottom_right x, y
    for (int i = 0; i < dets.size(); i++)
    {
        float new_pt[CONST_NUMBER_3] = {dets[i][CONST_NUMBER_2], dets[i][CONST_NUMBER_3], 1};
        dets[i][CONST_NUMBER_2] = Trans[0][0] * new_pt[0] + Trans[0][1] * new_pt[1] + Trans[0][CONST_NUMBER_2] * new_pt[CONST_NUMBER_2];
        dets[i][CONST_NUMBER_3] = Trans[1][0] * new_pt[0] + Trans[1][1] * new_pt[1] + Trans[1][CONST_NUMBER_2] * new_pt[CONST_NUMBER_2];
    }
    // output
    std::vector<ObjectInfo> objectInfo;
    for (int i = 0; i < dets.size(); i++)
    {
        ObjectInfo objInfo;
        objInfo.classId = 0;
        objInfo.confidence = dets[i][CONST_NUMBER_4];
        objInfo.className = " ";
        // Normalization
        objInfo.x0 = dets[i][0] / VIDEO_WIDTH;
        objInfo.y0 = dets[i][1] / VIDEO_HEIGHT;
        objInfo.x1 = dets[i][CONST_NUMBER_2] / VIDEO_WIDTH;
        objInfo.y1 = dets[i][CONST_NUMBER_3] / VIDEO_HEIGHT;
        objectInfo.push_back(objInfo);
    }
    objectInfos.push_back(objectInfo);
    // Output 2 when pedestrian is detected
    if (flag == -1)
    {
        flag = CONST_NUMBER_2;
    }

    if (flag == 1)
    {
        // flag: 1 represents no pedestrians are detected
        LogInfo << "No pedestrians are detected";
        return APP_ERR_OK;
    }
    else if (flag == CONST_NUMBER_2)
    {
        //  flag: 2 represents pedestrian detected
        int imgWidth = 1280;
        int imgHeight = 720;
        // only for keepAspectRatioScaling
        float ratio = 608.0 / 720.0;
        for (auto objInfo = objectInfos[0].begin(); objInfo != objectInfos[0].end();)
        {
            objInfo->x0 *= RESIZE_WIDTH / ratio;
            objInfo->y0 *= RESIZE_HEIGHT / ratio;
            objInfo->x1 *= RESIZE_WIDTH / ratio;
            objInfo->y1 *= RESIZE_HEIGHT / ratio;
            if (objInfo->x0 > imgWidth || objInfo->y0 > imgHeight)
            {
                objInfo = objectInfos[0].erase(objInfo);
                continue;
            }
            if (objInfo->x1 > imgWidth)
            {
                objInfo->x1 = imgWidth;
            }
            if (objInfo->y1 > imgHeight)
            {
                objInfo->y1 = imgHeight;
            }
            ++objInfo;
        }
        for (size_t j = 0; j < objectInfos[0].size(); j++)
        {
            LogInfo << "*****objectInfo-" << j;
            LogInfo << "x0 is " << objectInfos[0][j].x0;
            LogInfo << "y0 is " << objectInfos[0][j].y0;
            LogInfo << "x1 is " << objectInfos[0][j].x1;
            LogInfo << "y1 is " << objectInfos[0][j].y1;
            LogInfo << "confidence is " << objectInfos[0][j].confidence;
            LogInfo << "classId is " << objectInfos[0][j].classId;
            LogInfo << "className is " << objectInfos[0][j].className;
        }
        return APP_ERR_OK;
    }
}

APP_ERROR ConvertMatToImage(const cv::Mat &inputMat, MxBase::Image &outputImage, std::shared_ptr<MxBase::ImageProcessor> imageProcessorDptr)
{
    APP_ERROR ret;

    std::vector<uint8_t> buffer;
    cv::imencode(".jpg", inputMat, buffer);
    std::string content(reinterpret_cast<char *>(&buffer[0]), buffer.size());

    char *p = (char *)malloc(content.size());
    memcpy(p, content.data(), content.size());
    auto deleter = [](void *p) -> void
    {
        free(p);
        p = nullptr;
    };

    std::shared_ptr<uint8_t> dataPtr;
    dataPtr.reset(static_cast<uint8_t *>((void *)(p)), deleter);
    uint32_t dataSize = content.size();

    ret = imageProcessorDptr->Decode(dataPtr, dataSize, outputImage);
    if (ret != APP_ERR_OK)
    {
        LogError << "Get image failed, ret=" << ret;
        return ret;
    }

    return APP_ERR_OK;
};

APP_ERROR FairMOT::PostProcesstwo(std::vector<MxBase::ObjectInfo> &objInfos, FrameImage &decodedframeImage)
{
    APP_ERROR ret;
    MxBase::Image resizebgrImage;
    int deviceID = 0;
    std::shared_ptr<MxBase::ImageProcessor> imageProcessorDptrNEW = std::make_shared<MxBase::ImageProcessor>(deviceID);
#ifndef USE_200DK
    decodedframeImage.image.ToHost();
#endif
    auto resizepointer = decodedframeImage.image.GetData();
    cv::Mat imgYuv = cv::Mat(cv::Size(decodedframeImage.image.GetSize().width, decodedframeImage.image.GetSize().height * 3 / 2), CV_8UC1, resizepointer.get());
    cv::Mat imgBgr; // = cv::Mat(resizebgrImage.GetSize().height, resizebgrImage.GetSize().width, CV_8UC3);
    cv::cvtColor(imgYuv, imgBgr, cv::COLOR_YUV2RGB_NV21);
    // 颜色空间转换
    std::vector<MxBase::ObjectInfo> info;
    for (uint32_t i = 0; i < objInfos.size(); i++)
    {
        int index = (int)objInfos[i].classId;
        const cv::Scalar color = cv::Scalar(100, 100, 255); // orange color
        const uint32_t thickness = 2;
        const uint32_t xOffset = 10;
        const uint32_t yOffset = 10;
        const uint32_t lineType = 8;
        const float fontScale = 1.0;
        // 在图像上绘制文字
        cv::putText(imgBgr, std::to_string((int)objInfos[i].classId), cv::Point(objInfos[i].x0 + xOffset, objInfos[i].y0 + yOffset),
                    cv::FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, lineType);
        // 绘制矩形
        cv::rectangle(imgBgr, cv::Rect(objInfos[i].x0, objInfos[i].y0, objInfos[i].x1 - objInfos[i].x0, objInfos[i].y1 - objInfos[i].y0),
                      color, thickness);
    }
    MxBase::Image decodedImage;
    ConvertMatToImage(imgBgr, decodedImage, imageProcessorDptrNEW);

    decodedframeImage.image = decodedImage;
    return APP_ERR_OK;
}
APP_ERROR FairMOT::ReadImage(const std::string &imgPath, MxBase::Image &decodedImage)
{
    APP_ERROR ret;

    ret = imageProcessorDptr->Decode(imgPath, decodedImage, ImageFormat::YUV_SP_420);
    if (ret != APP_ERR_OK)
    {
        LogError << "Decode failed, ret=" << ret;
        return ret;
    }

    return APP_ERR_OK;
};