// #include <opencv2/opendnn.hpp>
#include <openvino/openvino.hpp>
#include <opencv2/opencv.hpp>

#include "json.h"

#include "main.h"


template <typename T>
static inline bool SortScorePairDescend(const std::pair<float, T> &pair1,
                                        const std::pair<float, T> &pair2)
{
    return pair1.first > pair2.first;
}

inline void GetMaxScoreIndex(const std::vector<float> &scores, const float threshold, const int top_k,
                             std::vector<std::pair<float, int>> &score_index_vec)
{
    CV_DbgAssert(score_index_vec.empty());
    // Generate index score pairs.
    for (size_t i = 0; i < scores.size(); ++i)
    {
        if (scores[i] > threshold)
        {
            score_index_vec.push_back(std::make_pair(scores[i], i));
        }
    }

    // Sort the score pair according to the scores in descending order
    std::stable_sort(score_index_vec.begin(), score_index_vec.end(),
                     SortScorePairDescend<int>);

    // Keep top_k scores if needed.
    if (top_k > 0 && top_k < (int)score_index_vec.size())
    {
        score_index_vec.resize(top_k);
    }
}
template <typename T>
static inline float rectOverlap(const T &a, const T &b)
{
    return 1.f - static_cast<float>(jaccardDistance(a, b));
}

template <typename BoxType>
inline void NMSBoxes(const std::vector<BoxType> &bboxes,
                     const std::vector<float> &scores, const float score_threshold,
                     const float nms_threshold, std::vector<int> &indices,
                     float (*computeOverlap)(const BoxType &, const BoxType &),
                     int limit = std::numeric_limits<int>::max())
{
    const float eta = 1.0f;
    const int top_k = 0;
    CV_Assert(bboxes.size() == scores.size());

    // Get top_k scores (with corresponding indices).
    std::vector<std::pair<float, int>> score_index_vec;
    GetMaxScoreIndex(scores, score_threshold, top_k, score_index_vec);

    // Do nms.
    float adaptive_threshold = nms_threshold;
    indices.clear();
    for (size_t i = 0; i < score_index_vec.size(); ++i)
    {
        const int idx = score_index_vec[i].second;
        bool keep = true;
        for (int k = 0; k < (int)indices.size() && keep; ++k)
        {
            const int kept_idx = indices[k];
            float overlap = computeOverlap(bboxes[idx], bboxes[kept_idx]);
            keep = overlap <= adaptive_threshold;
        }
        if (keep)
        {
            indices.push_back(idx);
            if (indices.size() >= limit)
            {
                break;
            }
        }
        if (keep && eta < 1 && adaptive_threshold > 0.5)
        {
            adaptive_threshold *= eta;
        }
    }
}

using namespace std;

const float SCORE_THRESHOLD = 0.2;
const float NMS_THRESHOLD = 0.4;
const float CONFIDENCE_THRESHOLD = 0.4;
vector<string>  namep_map={"electric_scooter","person","bicycle","others"};

struct Detection
{
    int class_id;
    float confidence;
    cv::Rect box;

    Json::Value tojsonvalue()
    {
        Json::Value _root;
        _root["x"] = box.x;
        _root["y"] = box.y;
        _root["height"] = box.height;
        _root["confidence"] = confidence;
        _root["width"] = box.width;
        _root["name"] = namep_map[class_id%4];

        return _root;
    }
};

struct Resize
{
    cv::Mat resized_image;
    int dw;
    int dh;
};

Resize resize_and_pad(cv::Mat &img, cv::Size new_shape)
{
    float width = img.cols;
    float height = img.rows;
    float r = float(new_shape.width / max(width, height));
    int new_unpadW = int(round(width * r));
    int new_unpadH = int(round(height * r));
    Resize resize;
    cv::resize(img, resize.resized_image, cv::Size(new_unpadW, new_unpadH), 0, 0, cv::INTER_AREA);

    resize.dw = new_shape.width - new_unpadW;
    resize.dh = new_shape.height - new_unpadH;
    cv::Scalar color = cv::Scalar(100, 100, 100);
    cv::copyMakeBorder(resize.resized_image, resize.resized_image, 0, resize.dh, 0, resize.dw, cv::BORDER_CONSTANT, color);

    return resize;
}

void algo::init()
{
    // Step 1. Initialize OpenVINO Runtime core
    ov::Core core;
    // Step 2. Read a model
    std::shared_ptr<ov::Model> model = core.read_model("/project/ev_sdk/model/v5s_dianti.onnx");

    // Step 4. Inizialize Preprocessing for the model
    ov::preprocess::PrePostProcessor ppp = ov::preprocess::PrePostProcessor(model);
    // Specify input image format
    ppp.input().tensor().set_element_type(ov::element::u8).set_layout("NHWC").set_color_format(ov::preprocess::ColorFormat::BGR);
    // Specify preprocess pipeline to input image without resizing
    ppp.input().preprocess().convert_element_type(ov::element::f32).convert_color(ov::preprocess::ColorFormat::RGB).scale({255., 255., 255.});
    //  Specify model's input layout
    ppp.input().model().set_layout("NCHW");
    // Specify output results format
    ppp.output().tensor().set_element_type(ov::element::f32);
    // Embed above steps in the graph
    model = ppp.build();
    compiled_model = core.compile_model(model, "CPU");
}
string algo::operator()( cv::Mat &img)
{

    // Step 3. Read input image
    // cv::Mat img = cv::imread("../../imgs/000000000312.jpg");
    // std::cout<<img<<endl;
    // resize image
    Resize res = resize_and_pad(img, cv::Size(640, 640));

    // Step 5. Create tensor from image
    float *input_data = (float *)res.resized_image.data;
    ov::Tensor input_tensor = ov::Tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape(), input_data);
    // Step 6. Create an infer request for model inference
    ov::InferRequest infer_request = compiled_model.create_infer_request();
    infer_request.set_input_tensor(input_tensor);
    infer_request.infer();

    // Step 7. Retrieve inference results
    const ov::Tensor &output_tensor = infer_request.get_output_tensor();
    ov::Shape output_shape = output_tensor.get_shape();
    float *detections = output_tensor.data<float>();

    // Step 8. Postprocessing including NMS
    std::vector<cv::Rect> boxes;
    vector<int> class_ids;
    vector<float> confidences;

    for (int i = 0; i < output_shape[1]; i++)
    {
        float *detection = &detections[i * output_shape[2]];

        float confidence = detection[4];
        if (confidence >= CONFIDENCE_THRESHOLD)
        {
            float *classes_scores = &detection[5];
            cv::Mat scores(1, output_shape[2] - 5, CV_32FC1, classes_scores);
            cv::Point class_id;
            double max_class_score;
            cv::minMaxLoc(scores, 0, &max_class_score, 0, &class_id);

            if (max_class_score > SCORE_THRESHOLD)
            {

                confidences.push_back(confidence);

                class_ids.push_back(class_id.x);

                float x = detection[0];
                float y = detection[1];
                float w = detection[2];
                float h = detection[3];

                float xmin = x - (w / 2);
                float ymin = y - (h / 2);

                boxes.push_back(cv::Rect(xmin, ymin, w, h));
            }
        }
    }
    std::vector<int> nms_result;
    NMSBoxes(boxes, confidences, SCORE_THRESHOLD, NMS_THRESHOLD, nms_result, rectOverlap);

    std::vector<Detection> output;
    for (int i = 0; i < nms_result.size(); i++)
    {
        Detection result;
        int idx = nms_result[i];
        result.class_id = class_ids[idx];
        result.confidence = confidences[idx];
        result.box = boxes[idx];
        output.push_back(result);
    }
    Json::Value root, algorithm_data, objects, target_info, model_data;

    // Step 9. Print results and save Figure with detections
    for (int i = 0; i < output.size(); i++)
    {
        auto detection = output[i];
        
        auto &box = detection.box;
        auto classId = detection.class_id;
        auto confidence = detection.confidence;
        float rx = (float)img.cols / (float)(res.resized_image.cols - res.dw);
        float ry = (float)img.rows / (float)(res.resized_image.rows - res.dh);
        box.x = rx * box.x;
        box.y = ry * box.y;
        box.width = rx * box.width;
        box.height = ry * box.height;

        objects.append(detection.tojsonvalue());
        if (detection.class_id==0)
        target_info.append(detection.tojsonvalue());

        float xmax = box.x + box.width;
        float ymax = box.y + box.height;
        cv::rectangle(img, cv::Point(box.x, box.y), cv::Point(xmax, ymax), cv::Scalar(255, 0, 0), 3);
        cv::rectangle(img, cv::Point(box.x, box.y - 20), cv::Point(xmax, box.y), cv::Scalar(0, 255, 0), cv::FILLED);
        cv::putText(img, namep_map[classId], cv::Point(box.x, box.y - 5), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
    }
    // Json::Value algorithm_data;
    algorithm_data["is_alert"] = target_info.size()>0;
    algorithm_data["target_count"] =target_info.size();
    algorithm_data["target_info"] = target_info;

    Json::Value array;
    Json::StyledWriter sw;
    root["model_data"] = objects;
    root["algorithm_data"] = algorithm_data;
    auto str = sw.write(root);
    // if (target_info.size())
    // cout << str << endl;

    return str;
}

int   main()
{
    algo ag;
    ag.init();
    auto mat=cv::imread("/project/ev_sdk/test/demo.jpg");
    auto str=ag(mat);
    cout<<str<<endl;
}