﻿#include <iostream>
#include <chrono>

#include "rtmpose_utils.h"
#include "rtmdet_onnxruntime.h"
#include "rtmpose_onnxruntime.h"
#include "rtmpose_tracker_onnxruntime.h"

std::vector<std::pair<int, int>> coco_21_joint_links = {
    {0, 1}, {1, 2}, {2, 3}, {3, 4}, {0, 5}, {5, 6}, {6, 7}, {7, 8}, {5, 9}, {9, 10}, {10, 11}, {11, 12}, {9, 13}, {13, 14}, {14, 15}, {15, 16}, {13, 17}, {17, 18}, {18, 19}, {19, 20}};

int main()
{
     // 模型路径
     std::string rtm_detnano_onnx_path = "F:/Code_cxx/RTMPose-CPU/resource/model/rtmpose-cpu/rtmpose-ort/rtmdet-nano/rtmdet_end2end.onnx";
     std::string rtm_pose_onnx_path = "F:/Code_cxx/RTMPose-CPU/resource/model/rtmpose-cpu/rtmpose-ort/rtmpose-m/end2end.onnx";

     RTMPoseTrackerOnnxruntime rtmpose_tracker_onnxruntime;
     RTMPose_Utils utils;
     // 加载模型
     bool load_model_result = rtmpose_tracker_onnxruntime.LoadModel(rtm_detnano_onnx_path, rtm_pose_onnx_path, 10);
     // 判断模型是否加载成功
     if (!load_model_result)
     {
          std::cout << "onnx model loaded failed!!!!" << std::endl;
          return 0;
     }

     cv::VideoCapture video_reader(0, cv::CAP_DSHOW);
     DetectBox detect_box;
     while (video_reader.isOpened())
     {
          cv::Mat image;
          video_reader >> image;
          if (image.empty())
          {
               std::cerr << "Could not read the image from camera." << std::endl;
               break;
          }

          // 起始计时
          auto start = std::chrono::high_resolution_clock::now();
          // 对图片进行变换,获得缩放比例
          cv::Mat image_resize;
          float scale = utils.LetterBoxImage(image, image_resize, cv::Size(256, 256), 32, cv::Scalar(114, 114, 114), true);
          // std::cout << image.cols << image.rows << std::endl;
          // std::cout << std::endl;
          // std::cout << "height:" << image_resize.cols << "width:" << image_resize.rows << std::endl;
          try
          {

               std::pair<DetectBox, std::vector<PosePoint>> inference_result = rtmpose_tracker_onnxruntime.Inference(image_resize);
               // ...
               detect_box = inference_result.first;
               detect_box.left = detect_box.left * scale;
               detect_box.right = detect_box.right * scale;
               detect_box.top = detect_box.top * scale;
               detect_box.bottom = detect_box.bottom * scale;

               std::vector<PosePoint> pose_result = inference_result.second;
               for (int i = 0; i < pose_result.size(); ++i)
               {
                    pose_result[i].x = pose_result[i].x * scale;
                    pose_result[i].y = pose_result[i].y * scale;
               }

               if (detect_box.IsValid())
               {
                    cv::rectangle(
                        image,
                        cv::Point{detect_box.left, detect_box.top},
                        cv::Point{detect_box.right, detect_box.bottom},
                        cv::Scalar{0, 255, 0},
                        2);
                    for (const auto &point : pose_result)
                    {
                         cv::circle(
                             image,
                             cv::Point{static_cast<int>(point.x), static_cast<int>(point.y)},
                             2,
                             cv::Scalar{0, 0, 255},
                             2,
                             cv::LINE_AA);
                    }
                    for (int i = 0; i < coco_21_joint_links.size(); i++)
                    {
                         /* code */
                         std::pair<int, int> join_links = coco_21_joint_links[i];
                         cv::line(
                             image,
                             cv::Point(pose_result[join_links.first].x, pose_result[join_links.first].y),
                             cv::Point(pose_result[join_links.second].x, pose_result[join_links.second].y),
                             cv::Scalar(255, 0, 0),
                             2,
                             cv::LINE_AA);
                    }
               }
               auto end = std::chrono::high_resolution_clock::now();
               std::chrono::duration<float> elapsed = end - start;
               double elpased_time = elapsed.count();
               // 设置显示文本的位置和字体
               int fontFace = cv::FONT_HERSHEY_COMPLEX;
               double fontScale = 1.0;
               int thickness = 2;

               std::string time_str = std::to_string(elpased_time) + "ms";
               // 获取文本尺寸
               int baseline = 0;
               cv::Size textSize = cv::getTextSize(time_str, fontFace, fontScale, thickness, &baseline);

               // 设置文本位置
               cv::Point textOrg(50, 50);

               // 在图像上绘制文本
               cv::putText(image, time_str, textOrg, fontFace, fontScale, cv::Scalar(0, 0, 255), thickness, 8);
               cv::imshow("Pose Estimation", image);
               if (cv::waitKey(1) >= 0)
                    break;
          }
          catch (const Ort::Exception &e)
          {
               std::cerr << "ONNX Runtime exception: " << e.what() << std::endl;
               return 0;
          }
          catch (const std::exception &e)
          {
               std::cerr << "Standard exception: " << e.what() << std::endl;
               return 0;
          }
          catch (...)
          {
               std::cerr << "Unknown exception" << std::endl;
               return 0;
          }
     }
     video_reader.release();
     cv::destroyAllWindows();

     return 0;
}