﻿// AnalyseVideoStream.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//

#include <iostream>
#include <iomanip>
#include <string>
#include <vector>
#include <queue>
#include <fstream>
#include <thread>
#include <future>
#include <atomic>
#include <mutex>         // std::mutex, std::unique_lock
#include <cmath>
#include "yolo_v2_class.hpp"
#include <opencv2/opencv.hpp>            
#include <opencv2/core/version.hpp>
#include <opencv2/videoio/videoio.hpp>
#include <httplib.h>
#include "Utils.h"
#include "json.hpp"
using namespace nlohmann;

void draw_boxes(cv::Mat mat_img, std::vector<bbox_t> result_vec, std::vector<std::string> obj_names,
    int current_det_fps = -1, int current_cap_fps = -1)
{
    int const colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } };

    for (auto& i : result_vec) {
        cv::Scalar color = obj_id_to_color(i.obj_id);
        cv::rectangle(mat_img, cv::Rect(i.x, i.y, i.w, i.h), color, 2);
        if (obj_names.size() > i.obj_id) {
            std::string obj_name = obj_names[i.obj_id];
            if (i.track_id > 0) obj_name += " - " + std::to_string(i.track_id);
            cv::Size const text_size = getTextSize(obj_name, cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, 2, 0);
            int max_width = (text_size.width > i.w + 2) ? text_size.width : (i.w + 2);
            max_width = std::max(max_width, (int)i.w + 2);
            //max_width = std::max(max_width, 283);
            std::string coords_3d;
            if (!std::isnan(i.z_3d)) {
                std::stringstream ss;
                ss << std::fixed << std::setprecision(2) << "x:" << i.x_3d << "m y:" << i.y_3d << "m z:" << i.z_3d << "m ";
                coords_3d = ss.str();
                cv::Size const text_size_3d = getTextSize(ss.str(), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, 1, 0);
                int const max_width_3d = (text_size_3d.width > i.w + 2) ? text_size_3d.width : (i.w + 2);
                if (max_width_3d > max_width) max_width = max_width_3d;
            }

            cv::rectangle(mat_img, cv::Point2f(std::max((int)i.x - 1, 0), std::max((int)i.y - 35, 0)),
                cv::Point2f(std::min((int)i.x + max_width, mat_img.cols - 1), std::min((int)i.y, mat_img.rows - 1)),
                color, CV_FILLED, 8, 0);
            putText(mat_img, obj_name, cv::Point2f(i.x, i.y - 16), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(0, 0, 0), 2);
            if (!coords_3d.empty()) putText(mat_img, coords_3d, cv::Point2f(i.x, i.y - 1), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
        }
    }
    if (current_det_fps >= 0 && current_cap_fps >= 0) {
        std::string fps_str = "FPS detection: " + std::to_string(current_det_fps) + "   FPS capture: " + std::to_string(current_cap_fps);
        putText(mat_img, fps_str, cv::Point2f(10, 20), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(50, 255, 0), 2);
    }
}

void show_console_result(std::vector<bbox_t> const result_vec, std::vector<std::string> const obj_names, int frame_id = -1) {
    if (frame_id >= 0) std::cout << " Frame: " << frame_id << std::endl;
    for (auto& i : result_vec) {
        if (obj_names.size() > i.obj_id) std::cout << obj_names[i.obj_id] << " - ";
        std::cout << "obj_id = " << i.obj_id << ",  x = " << i.x << ", y = " << i.y
            << ", w = " << i.w << ", h = " << i.h
            << std::setprecision(3) << ", prob = " << i.prob << std::endl;
    }
}

std::vector<std::string> objects_names_from_file(std::string const filename) {
    std::ifstream file(filename);
    std::vector<std::string> file_lines;
    if (!file.is_open()) return file_lines;
    for (std::string line; getline(file, line);) file_lines.push_back(line);
    std::cout << "object names loaded \n";
    return file_lines;
}

template<typename T>
class send_one_replaceable_object_t {
    const bool sync;
    std::atomic<T*> a_ptr;
public:

    void send(T const& _obj) {
        T* new_ptr = new T;
        *new_ptr = _obj;
        if (sync) {
            while (a_ptr.load()) std::this_thread::sleep_for(std::chrono::milliseconds(3));
        }
        std::unique_ptr<T> old_ptr(a_ptr.exchange(new_ptr));
    }

    T receive() {
        std::unique_ptr<T> ptr;
        do {
            while (!a_ptr.load()) std::this_thread::sleep_for(std::chrono::milliseconds(3));
            ptr.reset(a_ptr.exchange(NULL));
        } while (!ptr);
        T obj = *ptr;
        return obj;
    }

    bool is_object_present() {
        return (a_ptr.load() != NULL);
    }

    send_one_replaceable_object_t(bool _sync) : sync(_sync), a_ptr(NULL)
    {}
};

void imageTest()
{
    std::string  names_file = "data/obj.names";
    std::string  cfg_file = "data/yolo-obj.cfg";
    std::string  weights_file = "data/yolo-obj_final.weights";
    std::string filename = "data/test/a.png";
    auto obj_names = objects_names_from_file(names_file);
    Detector detector(cfg_file, weights_file);
    cv::Mat mat_img = cv::imread(filename);
    auto det_image = detector.mat_to_image_resize(mat_img);

    auto start = std::chrono::steady_clock::now();
    std::vector<bbox_t> result_vec = detector.detect_resized(*det_image, mat_img.size().width, mat_img.size().height);
    auto end = std::chrono::steady_clock::now();
    std::chrono::duration<double> spent = end - start;
    std::cout << " Time: " << spent.count() << " sec \n";

    //result_vec = detector.tracking_id(result_vec);    // comment it - if track_id is not required
    draw_boxes(mat_img, result_vec, obj_names);
    cv::imshow("window name", mat_img);
    show_console_result(result_vec, obj_names);
    cv::waitKey(0);
}

/*std::string getJsonText(std::vector<bbox_t> cur_bbox_vec, std::vector<std::string> obj_names, int frame_id,
    std::string filename = std::string())
{
    std::string send_str;

    char* tmp_buf = (char*)calloc(1024, sizeof(char));
    if (!filename.empty()) {
        sprintf(tmp_buf, "{\n \"frame_id\":%d, \n \"filename\":\"%s\", \n \"objects\": [ \n", frame_id, filename.c_str());
    }
    else {
        sprintf(tmp_buf, "{\n \"frame_id\":%d, \n \"objects\": [ \n", frame_id);
    }
    send_str = tmp_buf;
    free(tmp_buf);

    for (auto& i : cur_bbox_vec) {
        char* buf = (char*)calloc(2048, sizeof(char));

        sprintf(buf, "  {\"class_id\":%d, \"name\":\"%s\", \"absolute_coordinates\":{\"center_x\":%d, \"center_y\":%d, \"width\":%d, \"height\":%d}, \"confidence\":%f",
            i.obj_id, obj_names[i.obj_id].c_str(), i.x, i.y, i.w, i.h, i.prob);

        //sprintf(buf, "  {\"class_id\":%d, \"name\":\"%s\", \"relative_coordinates\":{\"center_x\":%f, \"center_y\":%f, \"width\":%f, \"height\":%f}, \"confidence\":%f",
        //    i.obj_id, obj_names[i.obj_id], i.x, i.y, i.w, i.h, i.prob);

        send_str += buf;

        if (!std::isnan(i.z_3d)) {
            sprintf(buf, "\n    , \"coordinates_in_meters\":{\"x_3d\":%.2f, \"y_3d\":%.2f, \"z_3d\":%.2f}",
                i.x_3d, i.y_3d, i.z_3d);
            send_str += buf;
        }

        send_str += "}\n";

        free(buf);
    }

    //send_str +=  "\n ] \n}, \n";
    send_str += "\n ] \n}";
    return send_str;
}*/

struct detection_data_t {
    cv::Mat cap_frame;
    std::shared_ptr<image_t> det_image;
    std::vector<bbox_t> result_vec;
    cv::Mat draw_frame;
    bool new_detection;
    uint64_t frame_id;
    bool exit_flag;
    cv::Mat zed_cloud;
    std::queue<cv::Mat> track_optflow_queue;
    detection_data_t() : new_detection(false), exit_flag(false) {}
};

std::string getJsonText(detection_data_t detectionData, std::vector<std::string> obj_names,
    std::string filename = std::string())
{
    std::string send_str;
    std::string capFrame = Utils::Mat2Base64(detectionData.cap_frame, "png");
    std::string labeledImage = Utils::Mat2Base64(detectionData.draw_frame, "png");
    //std::cout << "capFrame:[" << capFrame.length() << "]";
    //send_str = "{\"originImage\":\"" + capFrame + "\"}";
    std::vector<json> details = {};
    for (auto& item : detectionData.result_vec) {
        details.push_back({ { "classId", item.obj_id }, { "className", obj_names[item.obj_id] },
            { "x", item.x }, { "y", item.y }, { "w", item.w }, { "h", item.h }, { "prob", item.prob } });
    }
    json j = { { "originImage", capFrame }, { "labeledImage", labeledImage}, { "frameId", detectionData.frame_id},
        { "videoName", filename }, { "serialNumber", "E55108066"}, { "details", details } };
    send_str = j.dump();
    //char* tmp_buf = (char*)calloc(1024, sizeof(char));
    //sprintf(tmp_buf, "{\"originImage\":\"%s\"}", capFrame);
    //send_str = tmp_buf;
    //free(tmp_buf);
   // std::cout << "send_str:[" << send_str.length() << "]";
    return send_str;
}

int main()
{
    std::string  names_file = "data/obj.names";
    std::string  cfg_file = "data/yolo-obj.cfg";
    std::string  weights_file = "data/yolo-obj_final.weights";
  //std::string filename = "rtmp://rtmp01open.ys7.com:1935/v3/openlive/E55108066_1_2?expire=1660288561&id=347760489401917440&t=8fdcf723d4c4f347cf7531e48098be93295ba9c3901eefa765df1e8bfd1eecde&ev=100";
    std::string filename = "rtmp://rtmp01open.ys7.com:1935/v3/openlive/E55108066_1_2?expire=1661590949&id=353223098730762240&t=b1b3b047696aab8eba19bd9464bc9f9af7bf54598eef09d9b8b559b4db6b015f&ev=100";
    float const thresh = 0.2;
    Detector detector(cfg_file, weights_file);
    auto obj_names = objects_names_from_file(names_file);
    std::string out_videofile = "result.avi";
    bool const save_output_videofile = false;   // true - for history
    bool const send_network = true;        // true - for remote detection
    bool const use_kalman_filter = false;   // true - for stationary camera

    bool detection_sync = true;             // true - for video-file
    httplib::Client cli("localhost", 8080);

    cv::Mat cur_frame;
    std::atomic<int> fps_cap_counter(0), fps_det_counter(0);
    std::atomic<int> current_fps_cap(0), current_fps_det(0);
    std::atomic<bool> exit_flag(false);
    std::chrono::steady_clock::time_point steady_start, steady_end;
    int video_fps = 25;
    bool use_zed_camera = false;

    track_kalman_t track_kalman;

    cv::VideoCapture cap;
    cap.open(filename);
    cap >> cur_frame;

    video_fps = cap.get(cv::CAP_PROP_FPS);
    cv::Size const frame_size = cur_frame.size();
    std::cout << "\n Video size: " << frame_size << std::endl;

    

    const bool sync = detection_sync; // sync data exchange
    send_one_replaceable_object_t<detection_data_t> cap2prepare(sync), cap2draw(sync),
        prepare2detect(sync), detect2draw(sync), draw2show(sync), draw2write(sync), draw2net(sync);

    std::thread t_cap, t_prepare, t_detect, t_post, t_draw, t_write, t_network;
    if (t_cap.joinable()) t_cap.join();
    t_cap = std::thread([&]()
        {
            uint64_t frame_id = 0;
            detection_data_t detection_data;
            do {
                fps_cap_counter++;
                //std::cout << "\n fps_cap_counter % 100: " << (fps_cap_counter % 100) << std::endl;
                if (fps_cap_counter % 100 != 0)
                {
                    cap >> cur_frame;
                    continue;
                }
                detection_data = detection_data_t();
                {
                    cap >> detection_data.cap_frame;
                }
                detection_data.frame_id = frame_id++;

                if (detection_data.cap_frame.empty() || exit_flag) {
                    std::cout << " exit_flag: detection_data.cap_frame.size = " << detection_data.cap_frame.size() << std::endl;
                    detection_data.exit_flag = true;
                    detection_data.cap_frame = cv::Mat(frame_size, CV_8UC3);
                }

                if (!detection_sync) {
                    cap2draw.send(detection_data);       // skip detection
                }
                cap2prepare.send(detection_data);
            } while (!detection_data.exit_flag);
            std::cout << " t_cap exit \n";
        });


    // pre-processing video frame (resize, convertion)
    t_prepare = std::thread([&]()
        {
            std::shared_ptr<image_t> det_image;
            detection_data_t detection_data;
            do {
                detection_data = cap2prepare.receive();

                det_image = detector.mat_to_image_resize(detection_data.cap_frame);
                detection_data.det_image = det_image;
                prepare2detect.send(detection_data);    // detection

            } while (!detection_data.exit_flag);
            std::cout << " t_prepare exit \n";
        });


    // detection by Yolo
    if (t_detect.joinable()) t_detect.join();
    t_detect = std::thread([&]()
        {
            std::shared_ptr<image_t> det_image;
            detection_data_t detection_data;
            do {
                detection_data = prepare2detect.receive();
                det_image = detection_data.det_image;
                std::vector<bbox_t> result_vec;

                if (det_image)
                    result_vec = detector.detect_resized(*det_image, frame_size.width, frame_size.height, thresh, true);  // true
                fps_det_counter++;
                //std::this_thread::sleep_for(std::chrono::milliseconds(150));

                detection_data.new_detection = true;
                detection_data.result_vec = result_vec;
                detect2draw.send(detection_data);
            } while (!detection_data.exit_flag);
            std::cout << " t_detect exit \n";
        });

    // draw rectangles (and track objects)
    t_draw = std::thread([&]()
        {
            std::queue<cv::Mat> track_optflow_queue;
            detection_data_t detection_data;
            do {

                // for Video-file
                if (detection_sync) {
                    detection_data = detect2draw.receive();
                }
                // for Video-camera
                else
                {
                    // get new Detection result if present
                    if (detect2draw.is_object_present()) {
                        cv::Mat old_cap_frame = detection_data.cap_frame;   // use old captured frame
                        detection_data = detect2draw.receive();
                        if (!old_cap_frame.empty()) detection_data.cap_frame = old_cap_frame;
                    }
                    // get new Captured frame
                    else {
                        std::vector<bbox_t> old_result_vec = detection_data.result_vec; // use old detections
                        detection_data = cap2draw.receive();
                        detection_data.result_vec = old_result_vec;
                    }
                }

                cv::Mat cap_frame = detection_data.cap_frame;
                cv::Mat draw_frame = detection_data.cap_frame.clone();
                std::vector<bbox_t> result_vec = detection_data.result_vec;
                // track ID by using kalman filter
                if (use_kalman_filter) {
                    if (detection_data.new_detection) {
                        result_vec = track_kalman.correct(result_vec);
                    }
                    else {
                        result_vec = track_kalman.predict();
                    }
                }
                // track ID by using custom function
                else {
                    int frame_story = std::max(5, current_fps_cap.load());
                    result_vec = detector.tracking_id(result_vec, true, frame_story, 40);
                }

                //small_preview.set(draw_frame, result_vec);
                //large_preview.set(draw_frame, result_vec);
                draw_boxes(draw_frame, result_vec, obj_names, current_fps_det, current_fps_cap);
                //show_console_result(result_vec, obj_names, detection_data.frame_id);
                //large_preview.draw(draw_frame);
                //small_preview.draw(draw_frame, true);

                detection_data.result_vec = result_vec;
                detection_data.draw_frame = draw_frame;
                if (send_network) draw2net.send(detection_data);
            } while (!detection_data.exit_flag);
            std::cout << " t_draw exit \n";
        });

    // send detection to the network
        t_network = std::thread([&]()
        {
            if (send_network) {
                detection_data_t detection_data;
                do {
                    detection_data = draw2net.receive();
                    //detection_data.
                    //detector.send_json_http(detection_data.result_vec, obj_names, detection_data.frame_id, filename);
                    if (detection_data.result_vec.size() > 0)
                    {
                        std::string text = getJsonText(detection_data, obj_names, filename);
                        //std::cout << text << "\n";
                        /*httplib::Params params{
                            { "originImage", "abc" },
                            { "labeledImage", "aaa" }
                        };*/
                        //std::cout << "begin post\n";
                        //auto res = cli.Post("/videoDetect/uploadDetectResult", params);
                        //auto res = cli.Post("/videoDetect/postTest", params);
                        //auto res = cli.Get("videoDetect/getTest");
                        //auto res = cli.Post("/videoDetect/uploadDetectResult", "{\"originImage\":\"abc\"}", "application/json");
                        auto res = cli.Post("/videoDetect/uploadDetectResult", text, "application/json");
                       // std::cout << "end post\n" << res << "\n";
                        std::cout << detection_data.frame_id << "\n";
                    }
                } while (!detection_data.exit_flag);
            }
            std::cout << " t_network exit \n";
        });

        // wait for all threads
        if (t_cap.joinable()) t_cap.join();
        if (t_prepare.joinable()) t_prepare.join();
        if (t_detect.joinable()) t_detect.join();
        //if (t_post.joinable()) t_post.join();
        if (t_draw.joinable()) t_draw.join();
        //if (t_write.joinable()) t_write.join();
        if (t_network.joinable()) t_network.join();

    std::cout << "Hello World!\n";
}

// 运行程序: Ctrl + F5 或调试 >“开始执行(不调试)”菜单
// 调试程序: F5 或调试 >“开始调试”菜单

// 入门使用技巧: 
//   1. 使用解决方案资源管理器窗口添加/管理文件
//   2. 使用团队资源管理器窗口连接到源代码管理
//   3. 使用输出窗口查看生成输出和其他消息
//   4. 使用错误列表窗口查看错误
//   5. 转到“项目”>“添加新项”以创建新的代码文件，或转到“项目”>“添加现有项”以将现有代码文件添加到项目
//   6. 将来，若要再次打开此项目，请转到“文件”>“打开”>“项目”并选择 .sln 文件
