#include <cstdlib>
#include <cstring>
#include <iostream>
#include <fstream>
#include <glog/logging.h>
#include "SampleDetector.hpp"

#include <openvino/openvino.hpp>
#include "json.h"
#define JSON_ALERT_FLAG_KEY ("is_alert")
#define JSON_ALERT_FLAG_TRUE 1
#define JSON_ALERT_FLAG_FALSE 0

#define USE_JSONCPP 



template <typename T>
static inline bool SortScorePairDescend(const std::pair<float, T> &pair1,
                                        const std::pair<float, T> &pair2)
{
    return pair1.first > pair2.first;
}

inline void GetMaxScoreIndex(const std::vector<float> &scores, const float threshold, const int top_k,
                             std::vector<std::pair<float, int>> &score_index_vec)
{
    CV_DbgAssert(score_index_vec.empty());
    // Generate index score pairs.
    for (size_t i = 0; i < scores.size(); ++i)
    {
        if (scores[i] > threshold)
        {
            score_index_vec.push_back(std::make_pair(scores[i], i));
        }
    }

    // Sort the score pair according to the scores in descending order
    std::stable_sort(score_index_vec.begin(), score_index_vec.end(),
                     SortScorePairDescend<int>);

    // Keep top_k scores if needed.
    if (top_k > 0 && top_k < (int)score_index_vec.size())
    {
        score_index_vec.resize(top_k);
    }
}
template <typename T>
static inline float rectOverlap(const T &a, const T &b)
{
    return 1.f - static_cast<float>(jaccardDistance(a, b));
}

template <typename BoxType>
inline void NMSBoxes(const std::vector<BoxType> &bboxes,
                     const std::vector<float> &scores, const float score_threshold,
                     const float nms_threshold, std::vector<int> &indices,
                     float (*computeOverlap)(const BoxType &, const BoxType &),
                     int limit = std::numeric_limits<int>::max())
{
    const float eta = 1.0f;
    const int top_k = 0;
    CV_Assert(bboxes.size() == scores.size());

    // Get top_k scores (with corresponding indices).
    std::vector<std::pair<float, int>> score_index_vec;
    GetMaxScoreIndex(scores, score_threshold, top_k, score_index_vec);

    // Do nms.
    float adaptive_threshold = nms_threshold;
    indices.clear();
    for (size_t i = 0; i < score_index_vec.size(); ++i)
    {
        const int idx = score_index_vec[i].second;
        bool keep = true;
        for (int k = 0; k < (int)indices.size() && keep; ++k)
        {
            const int kept_idx = indices[k];
            float overlap = computeOverlap(bboxes[idx], bboxes[kept_idx]);
            keep = overlap <= adaptive_threshold;
        }
        if (keep)
        {
            indices.push_back(idx);
            if (indices.size() >= limit)
            {
                break;
            }
        }
        if (keep && eta < 1 && adaptive_threshold > 0.5)
        {
            adaptive_threshold *= eta;
        }
    }
}

using namespace std;

const float SCORE_THRESHOLD = 0.2;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.4;
vector<string>  namep_map={"electric_scooter","person","bicycle","others"};

// struct Detection
// {
//     int class_id;
//     float confidence;
//     cv::Rect box;


// };

struct Resize
{
    cv::Mat resized_image;
    int dw;
    int dh;
};

Resize resize_and_pad(cv::Mat &img, cv::Size new_shape)
{
    float width = img.cols;
    float height = img.rows;
    float r = float(new_shape.width / max(width, height));
    int new_unpadW = int(round(width * r));
    int new_unpadH = int(round(height * r));
    Resize resize;
    cv::resize(img, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);

    resize.dw = new_shape.width - new_unpadW;
    resize.dh = new_shape.height - new_unpadH;
    cv::Scalar color = cv::Scalar(100, 100, 100);
    cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);

    return resize;
}

   SampleDetector::~SampleDetector(){

       if(jsonResult)
       {
    delete[] jsonResult;
       jsonResult=nullptr;
       }
   }



/**
 * @brief demo算法class类构造函数，可进行必要的初始化
 */
SampleDetector::SampleDetector()
{
    // 从默认的配置文件读取相关配置参数
    const char *configFile = "/usr/local/ev_sdk/config/algo_config.json";
    LOG(INFO) << "Parsing configuration file: " << configFile;
    std::ifstream confIfs(configFile);
    if (confIfs.is_open())
    {
        size_t len = getFileLen(confIfs);
        char *confStr = new char[len + 1];
        confIfs.read(confStr, len);
        confStr[len] = '\0';

    	LOG(INFO) << "Configs:"<<confStr;
        mConfig.ParseAndUpdateArgs(confStr);
        delete[] confStr;
        confIfs.close();
    }

}

/**
 * @brief demo自定义初始化函数，可进行必要的初始化
 */
STATUS SampleDetector::Init(string xml_path,float cof_threshold,float nms_area_threshold, string classesFile){
    _xml_path = xml_path;
    _cof_threshold = cof_threshold;
    _nms_area_threshold = nms_area_threshold;
    // Core ie;
    // auto cnnNetwork = ie.ReadNetwork(_xml_path); 
    // //输入设置
    // InputsDataMap inputInfo(cnnNetwork.getInputsInfo());
    // InputInfo::Ptr& input = inputInfo.begin()->second;
    // _input_name = inputInfo.begin()->first;
    // input->setPrecision(Precision::FP32);
    // input->getInputData()->setLayout(Layout::NCHW);
    // ICNNNetwork::InputShapes inputShapes = cnnNetwork.getInputShapes();
    // SizeVector& inSizeVector = inputShapes.begin()->second;
    // cnnNetwork.reshape(inputShapes);
    // //输出设置
    // _outputinfo = OutputsDataMap(cnnNetwork.getOutputsInfo());
    // for (auto &output : _outputinfo) {
    //     output.second->setPrecision(Precision::FP32);
    // }
    // //获取可执行网络
    // //_network =  ie.LoadNetwork(cnnNetwork, "GPU");
    // _network =  ie.LoadNetwork(cnnNetwork, "CPU");
    
    ifstream ifs(classesFile.c_str());
	string line;
	while (getline(ifs, line)) this->classes.push_back(line);

        // Step 1. Initialize OpenVINO Runtime core
    ov::Core core;
    // Step 2. Read a model
    std::shared_ptr<ov::Model> model = core.read_model(xml_path);
    // Step 4. Inizialize Preprocessing for the model
    ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
    // Specify input image format
    ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::BGR);
    // Specify preprocess pipeline to input image without resizing
    ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255., 255., 255.});
    //  Specify model's input layout
    ppp.input().model().set_layout("NCHW");
    // Specify output results format
    ppp.output().tensor().set_element_type(ov::element::f32);
    // Embed above steps in the graph
    model = ppp.build();
    compiled_model = core.compile_model(model, "CPU");
    return SampleDetector::SUCCESS_INIT;
}

string SampleDetector::operator()( cv::Mat &img)
{
    return "str";
}


/**
 * @brief demo自定义反初始化函数，可进行必要的资源释放
 */
STATUS SampleDetector::UnInit()
{
    // delete jsonResult;
    /**/
}

bool SampleDetector::parse_yolov5(const Blob::Ptr &blob,int net_grid,float cof_threshold, vector<Object>& detected_objects, const float ratio_h, const float ratio_w)
{
    vector<int> anchors = get_anchors(net_grid);
   LockedMemory<const void> blobMapped = as<MemoryBlob>(blob)->rmap();
   const float *output_blob = blobMapped.as<float *>();
   //80个类是85,一个类是6,n个类是n+5
   //int item_size = 6;
   int item_size = 5 + this->classes.size();
    size_t anchor_n = 3;
    for(int n=0;n<anchor_n;++n)
        for(int i=0;i<net_grid;++i)
            for(int j=0;j<net_grid;++j)
            {
                float box_prob = output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j *item_size+ 4];
                box_prob = sigmoid(box_prob);
                //框置信度不满足则整体置信度不满足
                if(box_prob < cof_threshold)
                    continue;
                
                //注意此处输出为中心点坐标,需要转化为角点坐标
                float x = output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j*item_size + 0];
                float y = output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j*item_size + 1];
                float w = output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j*item_size + 2];
                float h = output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j *item_size+ 3];
               
                float max_prob = 0;
                int idx=0;
                for(int t=5;t<85;++t){
                    float tp= output_blob[n*net_grid*net_grid*item_size + i*net_grid*item_size + j *item_size+ t];
                    tp = sigmoid(tp);
                    if(tp > max_prob){
                        max_prob = tp;
                        idx = t-5;
                    }
                }
                float cof = box_prob * max_prob;                
                //对于边框置信度小于阈值的边框,不关心其他数值,不进行计算减少计算量
                if(cof < cof_threshold)
                    continue;

                x = (sigmoid(x)*2 - 0.5 + j)*640.0f/net_grid;
                y = (sigmoid(y)*2 - 0.5 + i)*640.0f/net_grid;
                w = pow(sigmoid(w)*2,2) * anchors[n*2];
                h = pow(sigmoid(h)*2,2) * anchors[n*2 + 1];

                x *= ratio_w;
                y *= ratio_h;
                w *= ratio_w;
                h *= ratio_h;

                float r_x = x - w/2;
                float r_y = y - h/2;
                Rect rect = Rect(round(r_x),round(r_y),round(w),round(h));
                detected_objects.push_back({cof, this->classes[idx], rect});
            }
    if(detected_objects.size() == 0) return false;
    else return true;
}

//以下为工具函数
float SampleDetector::sigmoid(float x){
    return (1 / (1 + exp(-x)));
}

/**
 * @brief demo自定义辅助函数
 */
void SampleDetector::nms(vector<Object>& inputBoxes)
{
    std::sort(inputBoxes.begin(), inputBoxes.end(), [](Object a, Object b) { return a.prob > b.prob; });

    std::vector<bool> isSuppressed(inputBoxes.size(), false);
    for (int i = 0; i < int(inputBoxes.size()); ++i)
    {
        if (isSuppressed[i])
        {
            continue;
        }
        for (int j = i + 1; j < int(inputBoxes.size()); ++j)
        {
            if (isSuppressed[j])
            {
                continue;
            }

            float inter = (inputBoxes[i].rect&inputBoxes[j].rect).area();
            float ovr = inter / (inputBoxes[i].rect.area() + inputBoxes[j].rect.area() - inter);

            if (ovr >= this->_nms_area_threshold)
            {
                isSuppressed[j] = true;
            }
        }
    }

    // return post_nms;
    int idx_t = 0;
    inputBoxes.erase(remove_if(inputBoxes.begin(), inputBoxes.end(), [&idx_t, &isSuppressed](const Object &f) { return isSuppressed[idx_t++]; }), inputBoxes.end());
}

vector<int> SampleDetector::get_anchors(int net_grid){
    vector<int> anchors(6);
    int a80[6] = {10,13, 16,30, 33,23};
    int a40[6] = {30,61, 62,45, 59,119};
    int a20[6] = {116,90, 156,198, 373,326}; 
    if(net_grid == 80){
        anchors.insert(anchors.begin(),a80,a80 + 6);
    }
    else if(net_grid == 40){
        anchors.insert(anchors.begin(),a40,a40 + 6);
    }
    else if(net_grid == 20){
        anchors.insert(anchors.begin(),a20,a20 + 6);
    }
    return anchors;
}

/**
 * @brief 配置参数更新函数
 * 请尤其注意此处，对于动态配置的参数必须能支持可实现动态配置（即上层应用可能会动态地改变算法部分参数）
 */
bool SampleDetector::UpdateConfig(const char *args)
{
    if (args == nullptr)
    {
        LOG(ERROR) << "mConfig string is null ";
        return false;
    }
    mConfig.ParseAndUpdateArgs(args);
    return true;
}

/**
 * @brief 获取输出结果图
 * 
 */
bool SampleDetector::GetOutFrame(JiImageInfo **out, unsigned int &outCount)
{
    outCount = mOutCount;

    mOutImage[0].nWidth = mOutputFrame.cols;
    mOutImage[0].nHeight = mOutputFrame.rows;
    mOutImage[0].nFormat = JI_IMAGE_TYPE_BGR;
    mOutImage[0].nDataType = JI_UNSIGNED_CHAR;
    mOutImage[0].nWidthStride = mOutputFrame.step;
    mOutImage[0].pData = mOutputFrame.data;

    *out = mOutImage;
}

/**
 * @brief 处理单张图的推理
 * 
 */
STATUS SampleDetector::ProcessImage(Mat& srcimg,vector<Object>& detected_objects){
    if(srcimg.empty()){
        cout << "无效图片输入" << endl;
        return false;
    }
 // Step 3. Read input image
    cv::Mat &img =srcimg;// cv::imread("../../imgs/000000000312.jpg");
    Resize res = resize_and_pad(img, cv::Size(640, 640));
    // Step 5. Create tensor from image
    float *input_data = (float *)res.resized_image.data;
    ov::Tensor input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
    // Step 6. Create an infer request for model inference
    ov::InferRequest infer_request = compiled_model.create_infer_request();
    infer_request.set_input_tensor(input_tensor);
    infer_request.infer();

    // Step 7. Retrieve inference results
    const ov::Tensor &output_tensor = infer_request.get_output_tensor();
    ov::Shape output_shape = output_tensor.get_shape();
    float *detections = output_tensor.data<float>();

    // Step 8. Postprocessing including NMS
    std::vector<cv::Rect> boxes;
    vector<int> class_ids;
    vector<float> confidences;

    for (int i = 0; i < output_shape[1]; i++)
    {
        float *detection = &detections[i * output_shape[2]];

        float confidence = detection[4];
        if (confidence >= _cof_threshold)
        {
            float *classes_scores = &detection[5];
            cv::Mat scores(1, output_shape[2] - 5, CV_32FC1, classes_scores);
            cv::Point class_id;
            double max_class_score;
            cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);

            if (max_class_score > _cof_threshold)
            {
                float x = detection[0];
                float y = detection[1];
                float w = detection[2];
                float h = detection[3];

                float xmin = x - (w / 2);
                float ymin = y - (h / 2);

        float rx = (float)img.cols / (float)(res.resized_image.cols - res.dw);
        float ry = (float)img.rows / (float)(res.resized_image.rows - res.dh);
        xmin *= rx;
       ymin*= ry;
        w*= rx ;
        h *= ry;



        Object result;
        result.class_id = class_id.x;
        result.confidence = confidence;
        result.rect = cv::Rect(xmin, ymin, w, h);
        result.name=namep_map[result.class_id];
        detected_objects.push_back(result);
            }
        }
    }
    // Json::Value root, algorithm_data, objects, target_info, model_data;

    // // Step 9. Print results and save Figure with detections
    // for (int i = 0; i < output.size(); i++)
    // {
    //     auto detection = output[i];
        
    //     auto &box = detection.rect;
    //     auto classId = detection.class_id;
    //     auto confidence = detection.confidence;
    //     float rx = (float)img.cols / (float)(res.resized_image.cols - res.dw);
    //     float ry = (float)img.rows / (float)(res.resized_image.rows - res.dh);
    //     box.x = rx * box.x;
    //     box.y = ry * box.y;
    //     box.width = rx * box.width;
    //     box.height = ry * box.height;
        

    //     objects.append(detection.tojsonvalue());
    //     if (detection.class_id==0)
    //     target_info.append(detection.tojsonvalue());

    //     float xmax = box.x + box.width;
    //     float ymax = box.y + box.height;
    //     cv::rectangle(img, cv::Point(box.x, box.y), cv::Point(xmax, ymax), cv::Scalar(255, 0, 0), 3);
    //     cv::rectangle(img, cv::Point(box.x, box.y - 20), cv::Point(xmax, box.y), cv::Scalar(0, 255, 0), cv::FILLED);
    //     cv::putText(img, namep_map[classId], cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
    // }
    // #ifdef USE_JSONCPP 
    // // Json::Value algorithm_data;
    // algorithm_data["is_alert"] = target_info.size()>0;
    // algorithm_data["target_count"] =target_info.size();
    // algorithm_data["target_info"] = target_info;

    // Json::Value array;
    // Json::StyledWriter sw;
    // root["model_data"] = objects;
    // root["algorithm_data"] = algorithm_data;
    // auto str = sw.write(root);
    // // if (target_info.size())
    // cout << str << endl;
    // #endif




    // const float ratio_h = (float)srcimg.rows / 640.0;
    // const float ratio_w = (float)srcimg.cols / 640.0;
    // Mat inframe;
    // resize(srcimg,inframe,Size(640,640));

    // this->operator()(srcimg);

    // cvtColor(inframe,inframe,COLOR_BGR2RGB);
    // size_t img_size = 640*640;
    // InferRequest::Ptr infer_request = _network.CreateInferRequestPtr();
    // Blob::Ptr frameBlob = infer_request->GetBlob(_input_name);
    // InferenceEngine::LockedMemory<void> blobMapped = InferenceEngine::as<InferenceEngine::MemoryBlob>(frameBlob)->wmap();
    // float* blob_data = blobMapped.as<float*>();
    // //nchw
    // for(size_t row =0;row<640;row++){
    //     for(size_t col=0;col<640;col++){
    //         for(size_t ch =0;ch<3;ch++){
    //             blob_data[img_size*ch + row*640 + col] = float(inframe.at<Vec3b>(row,col)[ch])/255.0f;
    //         }
    //     }
    // }
    // //执行预测
    // infer_request->Infer();
    // //获取各层结果
    // const int s[3] = {80,40,20};
    // int i=0;
    // for (auto &output : _outputinfo) {
    //     auto output_name = output.first;
    //     Blob::Ptr blob = infer_request->GetBlob(output_name);
    //    parse_yolov5(blob,s[i],_cof_threshold, detected_objects, ratio_h, ratio_w);
    //     ++i;
    // }
    //后处理获得最终检测结果
    this->nms(detected_objects);
    return SampleDetector::SUCCESS_PROCESS;
}

/**
 * @brief 算法分析主入口
 * 业务处理函数，输入分析图片，返回算法分析结果
 */

bool SampleDetector::Process(const cv::Mat &inFrame, const char *args, JiEvent &event)
{
    if (inFrame.empty())
    {
        LOG(ERROR) << "Invalid input!";
        return false;
    }
    /**
     * 监测输入图像尺寸变化，若发生变化则必须更新ROI相关参数
     * 此处非常重要，需要引起重视！！！
     */
    if (inFrame.cols != mConfig.currentInFrameSize.width || inFrame.rows != mConfig.currentInFrameSize.height)
    {
	LOG(INFO)<<"Update ROI Info...";
        mConfig.UpdateROIInfo(inFrame.cols, inFrame.rows);
    }

    /**
     * 解析参数并动态更新，根据接口规范标准，接口必须支持配置文件/usr/local/ev_sdk/model/algo_config.json内参数的实时更新功能
     * （即通过ji_calc_*等接口传入）
     * 此处非常重要，需要引起重视！！！
     */
    if(args != nullptr){
	std::string curArgs = args;
    	LOG(INFO)<<"Update args:"<<curArgs;
    }
    mConfig.ParseAndUpdateArgs(args);
    LOG(INFO)<<"Config person detection thresh:"<<mConfig.algoConfig.thresh;

    /**
     * 本demo实现的功能是行人闯入告警，其实现方法是首先根据配置指定的任意几个ROI区域，算法首先在原图上进行行人检测，
     * 然后判断检测到的行人是否在配置的ROI区域内，若在，则认为是闯入，发出告警；若不在，则不认为是闯入
     * 
     */

    // 针对每个ROI进行算法处理
    std::vector<SampleDetector::Object> detectedObjects;
    std::vector<SampleDetector::Object> validTargets;

    // 算法处理
    cv::Mat img = inFrame.clone();
    int processRet = ProcessImage(img, detectedObjects);
    if (processRet != SampleDetector::SUCCESS_PROCESS)
    {
        return false;
    }

    // 创建输出图
    inFrame.copyTo(mOutputFrame);

    for (auto &obj : detectedObjects)
    {
        if (obj.name=="electric_scooter")
        {
            validTargets.emplace_back(obj);
            continue;
        }
        if (mConfig.drawResult)
        {
            auto &box=obj.rect;
        float xmax = box.x + box.width;
        float ymax = box.y + box.height;

            cv::rectangle(mOutputFrame, cv::Point(box.x, box.y), cv::Point(xmax, ymax), cv::Scalar(255, 0, 0), 3);
            cv::rectangle(mOutputFrame, cv::Point(box.x, box.y - 20), cv::Point(xmax, box.y), cv::Scalar(0, 255, 0), cv::FILLED);
            cv::putText(mOutputFrame, obj.name, cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));

        }

        



        // for (auto &roiPolygon : mConfig.currentROIOrigPolygons)
        // {
        //     int mid_x = obj.rect.x + obj.rect.width / 2;
        //     int mid_y = obj.rect.y + obj.rect.height / 2;
        //     // 当检测的目标的中心点在ROI内的话，就视为闯入ROI的有效目标
        //     if (WKTParser::inPolygon(roiPolygon, cv::Point(mid_x, mid_y)))
        //     {
        //         validTargets.emplace_back(obj);
        //     }
        //     /*
        //     当检测的目标的rect在ROI内的话，就视为闯入ROI的有效目标
        //     if (WKTParser::inPolygon(roiPolygon, obj.rect))
        //     {
        //         validTargets.emplace_back(obj);
        //     }
        //     */
        // }
    }
    // cv::imwrite("out.png",img);
    // 此处示例业务逻辑：当算法检测到有行人闯入时，就报警
    bool isNeedAlert = false; // 是否需要报警

    // // 画ROI区域
    // if (mConfig.drawROIArea && !mConfig.currentROIOrigPolygons.empty())
    // {
    //     drawPolygon(mOutputFrame, mConfig.currentROIOrigPolygons, cv::Scalar(mConfig.roiColor[0], mConfig.roiColor[1], mConfig.roiColor[2]),
    //                 mConfig.roiColor[3], cv::LINE_AA, mConfig.roiLineThickness, mConfig.roiFill);
    // }
    // 判断是否要要报警并将检测到的目标画到输出图上
    if (validTargets.size() > 0)
    {
        isNeedAlert = true;
    }
    for (auto &object : validTargets)
    {
    	//LOG(INFO) << "Found targets:" << object.name;
        if (mConfig.drawResult)
        {
            std::stringstream ss;
            //ss << mConfig.targetRectTextMap[mConfig.language];
	    ss << object.name;
            if (mConfig.drawConfidence)
            {
                ss.precision(0);
                //ss << std::fixed << (mConfig.targetRectTextMap[mConfig.language].empty() ? "" : ": ") << object.prob * 100 << "%";
		ss << std::fixed << ": " << object.prob * 100 << "%";
            }
            drawRectAndText(mOutputFrame, object.rect, ss.str(), mConfig.targetRectLineThickness, cv::LINE_AA,
                            cv::Scalar(mConfig.targetRectColor[0], mConfig.targetRectColor[1], mConfig.targetRectColor[2]), mConfig.targetRectColor[3], mConfig.targetTextHeight,
                            cv::Scalar(mConfig.textFgColor[0], mConfig.textFgColor[1], mConfig.textFgColor[2]),
                            cv::Scalar(mConfig.textBgColor[0], mConfig.textBgColor[1], mConfig.textBgColor[2]));
        }
    }

    if (isNeedAlert && mConfig.drawWarningText)
    {
        drawText(mOutputFrame, mConfig.warningTextMap[mConfig.language], mConfig.warningTextSize,
                 cv::Scalar(mConfig.warningTextFg[0], mConfig.warningTextFg[1], mConfig.warningTextFg[2]),
                 cv::Scalar(mConfig.warningTextBg[0], mConfig.warningTextBg[1], mConfig.warningTextBg[2]), mConfig.warningTextLeftTop);
    }

    // 将结果封装成json字符串
    cJSON *rootObj = cJSON_CreateObject();
    cJSON *algoObj = cJSON_CreateObject();
    int jsonAlertCode = JSON_ALERT_FLAG_FALSE;
    if (isNeedAlert)
    {
        jsonAlertCode = JSON_ALERT_FLAG_TRUE;
    }
    cJSON_AddItemToObject(algoObj, JSON_ALERT_FLAG_KEY, cJSON_CreateBool(jsonAlertCode));
    cJSON *personsObj = cJSON_CreateArray();
    for (auto &obj : validTargets)
    {
	//if(obj.name != "person") continue;
        cJSON *odbObj = cJSON_CreateObject();
        int x = obj.rect.x;
        int y = obj.rect.y;
        int width = obj.rect.width;
        int height = obj.rect.height;
        cJSON_AddItemToObject(odbObj, "x", cJSON_CreateNumber(x));
        cJSON_AddItemToObject(odbObj, "y", cJSON_CreateNumber(y));
        cJSON_AddItemToObject(odbObj, "width", cJSON_CreateNumber(width));
        cJSON_AddItemToObject(odbObj, "height", cJSON_CreateNumber(height));
        cJSON_AddItemToObject(odbObj, "name", cJSON_CreateString(obj.name.c_str()));
        cJSON_AddItemToObject(odbObj, "confidence", cJSON_CreateNumber(obj.prob));

        cJSON_AddItemToArray(personsObj, odbObj);
    }
    cJSON_AddItemToObject(algoObj, "target_info", personsObj);
    cJSON_AddItemToObject(rootObj, "algorithm_data", algoObj);

    //create model data
    cJSON *modelObj = cJSON_CreateObject();

    cJSON *persons = cJSON_CreateArray();
    for (auto &obj : detectedObjects)
    {
	//if(obj.name != "person") continue;
        cJSON *odbObj = cJSON_CreateObject();
        int x = obj.rect.x;
        int y = obj.rect.y;
        int width = obj.rect.width;
        int height = obj.rect.height;
        cJSON_AddItemToObject(odbObj, "x", cJSON_CreateNumber(x));
        cJSON_AddItemToObject(odbObj, "y", cJSON_CreateNumber(y));
        cJSON_AddItemToObject(odbObj, "width", cJSON_CreateNumber(width));
        cJSON_AddItemToObject(odbObj, "height", cJSON_CreateNumber(height));
        cJSON_AddItemToObject(odbObj, "name", cJSON_CreateString(obj.name.c_str()));
        cJSON_AddItemToObject(odbObj, "confidence", cJSON_CreateNumber(obj.prob));

        cJSON_AddItemToArray(persons, odbObj);
    }
    cJSON_AddItemToObject(modelObj, "objects", persons);
    cJSON_AddItemToObject(rootObj, "model_data", modelObj);

    char *jsonResultStr = cJSON_Print(rootObj);
    int jsonSize = strlen(jsonResultStr);
    if (jsonResult == nullptr)
    {
        jsonResult = new char[jsonSize + 1];
    }
    else if (strlen(jsonResult) < jsonSize)
    {
        delete[] jsonResult; // 如果需要重新分配空间，需要释放资源
        jsonResult = new char[jsonSize + 1];
    }
    strcpy(jsonResult, jsonResultStr);

    // 注意：JI_EVENT.code需要根据需要填充，切勿弄反
    if (isNeedAlert)
    {
        event.code = JISDK_CODE_ALARM;
    }
    else
    {
        event.code = JISDK_CODE_NORMAL;
    }
    event.json = jsonResult;

    if (rootObj)
        cJSON_Delete(rootObj);
    if (jsonResultStr)
        free(jsonResultStr);
    return true;
}

