#include <ros/ros.h>
#include <ros/package.h>
#include <sensor_msgs/Image.h>
#include <cv_bridge/cv_bridge.h>
#include <geometry_msgs/PoseStamped.h>
#include <yolo_detect/YoloResult.h>
#include <traj_utils/DetectPose.h>

#include "cuda_utils.h"
#include "logging.h"
#include "utils.h"
#include "preprocess.h"
#include "postprocess.h"
#include "model.h"

#include <iostream>
#include <chrono>
#include <vector>
#include <string>
#include <numeric>
#include <optional>
#include <chrono>

// test for visualization
#include "tracker/sord3d.hpp"
#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/Geometry>

using namespace nvinfer1;

static Logger gLogger;
const static int kOutputSize =
    kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;

// test for visualization
class ObjectTracker3D
{
public:
  ObjectTracker3D() {}

  void init(ros::NodeHandle &nh) {

    nh.param("camera/color/fx", fx_, 610.8905);
    nh.param("camera/color/fy", fy_, 610.8898);
    nh.param("camera/color/cx", cx_, 322.7573);
    nh.param("camera/color/cy", cy_, 250.6328);

    std::cout << "[YOLO_DETECT] fx: " << fx_ << std::endl;
    std::cout << "[YOLO_DETECT] fy: " << fy_ << std::endl;
    std::cout << "[YOLO_DETECT] cx: " << cx_ << std::endl;
    std::cout << "[YOLO_DETECT] cy: " << cy_ << std::endl;

    int min_hits;
    double max_age, dist_thresh;
    nh.param("tracker/Sord3d/max_age", max_age, 2.5);
    nh.param("tracker/Sord3d/min_hits", min_hits, 0);
    nh.param("tracker/Sord3d/dist_thresh", dist_thresh, 2.0);

    std::cout << "[SORT3D] max_age    : " << max_age << std::endl;
    std::cout << "[SORT3D] min_hits   : " << min_hits << std::endl;
    std::cout << "[SORT3D] dist_thresh: " << dist_thresh << std::endl;
    sort3d_.init(max_age, min_hits, dist_thresh);

    track_pub_ =
        nh.advertise<traj_utils::DetectPose>("/perception/track_result", 1);

    last_time_ = ros::Time::now();
  }

  std::vector<Eigen::Vector3d> update(const std::vector<Eigen::Vector3d> &poses,
                                      const Eigen::Vector3d &p_cam,
                                      const Eigen::Quaterniond &q_cam,
                                      ros::Time img_time) {
    double dt = img_time.toSec() - last_time_.toSec();
    last_time_ = img_time;

    std::vector<Eigen::Vector3d> points(poses.size());
    for (size_t i = 0; i < poses.size(); i++) {
      points[i] = p_cam + q_cam * pixelToCamera(
                                      poses[i].x(), poses[i].y(), poses[i].z());
    }

    std::vector<Eigen::Vector4d> id_poses = sort3d_.update(points, dt);

    Eigen::Quaterniond q_cam_inverse_ = q_cam.inverse();
    std::vector<Eigen::Vector3d> ret(id_poses.size());
    for (size_t i = 0; i < id_poses.size(); i++) {
      Eigen::Vector3d pose = id_poses[i].tail(3);
      pose = q_cam_inverse_ * (pose - p_cam);
      ret[i] << id_poses[i][0], cameraToPixel(pose);
    }

    // pub
    traj_utils::DetectPose track_msg;
    track_msg.header.stamp = img_time;
    for (auto &id_pose : id_poses) {
      track_msg.ids.push_back(id_pose(0));
      geometry_msgs::Point point;
      point.x = id_pose(1);
      point.y = id_pose(2);
      point.z = id_pose(3);
      track_msg.poses.push_back(point);
    }
    track_pub_.publish(track_msg);

    return ret;
  }

private:
  double fx_{610.8905};
  double fy_{610.8898};
  double cx_{322.7573};
  double cy_{250.6328};

  Sort3D sort3d_;

  ros::Time last_time_{ros::Time(0)};
  ros::Publisher track_pub_;

  Eigen::Vector3d pixelToCamera(double x, double y, double depth) {
    double X = (x - cx_) * depth / fx_;
    double Y = (y - cy_) * depth / fy_;
    double Z = depth;
    return {X, Y, Z};
  }

  Eigen::Vector2d cameraToPixel(const Eigen::Vector3d &point) {
    double x = point.x() * fx_ / point.z() + cx_;
    double y = point.y() * fy_ / point.z() + cy_;
    return {x, y};
  }
};

class YoloV5ROSNode
{
  struct XYZ_t
  {
    int pixel_x;
    int pixel_y;
    float depth;
  };

public:
  YoloV5ROSNode(ros::NodeHandle &nh, std::string &_engine_path) : nh_(nh) {
    // 读取 ROS 参数
    nh_.param<int>("gpu_id", gpu_id_, 0);

    // 设置 GPU 设备
    cudaSetDevice(gpu_id_);

    // Deserialize the engine from file
    std::string engine_path = _engine_path;
    deserialize_engine(engine_path, &runtime_, &engine_, &context_);
    CUDA_CHECK(cudaStreamCreate(&stream_));

    // Init CUDA preprocessing
    cuda_preprocess_init(kMaxInputImageSize);

    // Prepare cpu and gpu buffers
    prepare_buffers(
        engine_, &gpu_buffers_[0], &gpu_buffers_[1], &cpu_output_buffer_);

    // 订阅图像话题
    image_sub_ = nh_.subscribe(
        "/camera/color/image_raw", 1, &YoloV5ROSNode::image_callback, this);
    depth_img_sub_ = nh_.subscribe("/camera/aligned_depth_to_color/image_raw",
                                   1,
                                   &YoloV5ROSNode::depth_img_callback,
                                   this);
    camera_pose_sub_ = nh_.subscribe(
        "/camera/pose", 1, &YoloV5ROSNode::camera_pose_callback, this);
    image_pub_ = nh_.advertise<sensor_msgs::Image>("/perception/yolo_img", 1);
    result_pub_ =
        nh_.advertise<yolo_detect::YoloResult>("/perception/yolo_result", 1);

    tracker_.init(nh_);
  }

  ~YoloV5ROSNode() {
    // Release stream and buffers
    cudaStreamDestroy(stream_);
    CUDA_CHECK(cudaFree(gpu_buffers_[0]));
    CUDA_CHECK(cudaFree(gpu_buffers_[1]));
    delete[] cpu_output_buffer_;
    cuda_preprocess_destroy();
    // Destroy the engine
    delete context_;
    delete engine_;
    delete runtime_;
  }

private:
  // ros
  ros::NodeHandle nh_;
  ros::Subscriber image_sub_, depth_img_sub_, camera_pose_sub_;
  ros::Publisher image_pub_, result_pub_;

  std::optional<geometry_msgs::PoseStamped> camera_pose_;
  std::optional<cv::Mat> depth_img_;

  // test for visualization
  ObjectTracker3D tracker_;

  // yolo
  int gpu_id_;

  IRuntime *runtime_ = nullptr;
  ICudaEngine *engine_ = nullptr;
  IExecutionContext *context_ = nullptr;
  cudaStream_t stream_;

  float *gpu_buffers_[2];
  float *cpu_output_buffer_ = nullptr;

  // functions

  void camera_pose_callback(const geometry_msgs::PoseStamped::ConstPtr &msg) {
    camera_pose_ = *msg;
  }

  void depth_img_callback(const sensor_msgs::ImageConstPtr &depth_img_msg) {
    cv_bridge::CvImagePtr cv_ptr =
        cv_bridge::toCvCopy(depth_img_msg, depth_img_msg->encoding);
    depth_img_ = cv_ptr->image;
  }

  // main callback
  void image_callback(const sensor_msgs::ImageConstPtr &img_msg) {
    auto start = std::chrono::high_resolution_clock::now();
    cv_bridge::CvImagePtr cv_ptr =
        cv_bridge::toCvCopy(img_msg, img_msg->encoding);
    cv::Mat img = cv_ptr->image;

    // 预处理
    cuda_preprocess(img.ptr(),
                    img.cols,
                    img.rows,
                    gpu_buffers_[0],
                    kInputW,
                    kInputH,
                    stream_);
    CUDA_CHECK(cudaStreamSynchronize(stream_));

    // 推理
    infer(*context_, stream_, (void **)gpu_buffers_, cpu_output_buffer_, 1);

    // 后处理和 NMS
    std::vector<Detection> result;
    nms(result, cpu_output_buffer_, kConfThresh, kNmsThresh);

    // auto end = std::chrono::high_resolution_clock::now();
    // std::chrono::duration<double, std::milli> fp_ms = end - start;
    // std::cout << "Inference time: " << fp_ms.count() << " ms" << std::endl;

    // 绘制检测结果
    draw_bbox(img, result, true);

    // test
    bool isSave = false;

    // test for visualization
    // only 0.05ms below
    auto test_start = std::chrono::high_resolution_clock::now();
    if (depth_img_.has_value() && camera_pose_.has_value()) {
      std::vector<Eigen::Vector3d> poses;
      for (const auto &det : result) {
        XYZ_t xyz = getXYZ(get_rect(img, det.bbox));
        if (xyz.depth == 0.2f) {
          std::cout << "depth is 0.2" << std::endl;
        }
        isSave |= (xyz.depth == 0.2f);
        poses.push_back({xyz.pixel_x, xyz.pixel_y, xyz.depth});
      }
      auto ret = tracker_.update(poses,
                                 {camera_pose_->pose.position.x,
                                  camera_pose_->pose.position.y,
                                  camera_pose_->pose.position.z},
                                 {camera_pose_->pose.orientation.w,
                                  camera_pose_->pose.orientation.x,
                                  camera_pose_->pose.orientation.y,
                                  camera_pose_->pose.orientation.z},
                                 img_msg->header.stamp);

      for (auto &id_xy : ret) {
        cv::putText(img,
                    std::to_string((int)id_xy[0]),
                    cv::Point(id_xy[1], id_xy[2]),
                    cv::FONT_HERSHEY_PLAIN,
                    1.2,
                    cv::Scalar(0xFF, 0xFF, 0xFF),
                    2);
      }
    }

    if (isSave) {
      std::cout << "save image" << std::endl;
      std::string pic_name = "/home/nvidia/Pictures/" +
                             std::to_string(img_msg->header.stamp.toSec());
      cv::imwrite(pic_name + ".jpg", img);
      cv::imwrite(pic_name + "_depth.jpg", depth_img_.value());
    }

    // // pub
    // if (depth_img_.has_value() && camera_pose_.has_value()) {
    //   yolo_detect::YoloResult yolo_result;
    //   yolo_result.header = img_msg->header;
    //   yolo_result.cam_pose = camera_pose_.value();
    //   geometry_msgs::Point point;
    //   for (const auto &det : result) {
    //     XYZ_t xyz = getXYZ(get_rect(img, det.bbox));
    //     point.x = xyz.pixel_x;
    //     point.y = xyz.pixel_y;
    //     point.z = xyz.depth;
    //     yolo_result.poses.push_back(point);
    //   }
    //   result_pub_.publish(yolo_result);
    // }

    // visualization
    cv_bridge::CvImage cv_image;
    cv_image.image = img;
    cv_image.encoding = img_msg->encoding;

    sensor_msgs::Image ros_image;
    cv_image.toImageMsg(ros_image);
    image_pub_.publish(ros_image);
  }

  XYZ_t getXYZ(const cv::Rect &bbox, float threshold = 0.2, int n = 3) {
    if (!depth_img_.has_value()) return {};

    // 计算中心点及小区域尺寸
    XYZ_t ret;
    ret.pixel_x = bbox.x + bbox.width / 2;
    ret.pixel_y = bbox.y + bbox.height / 2;
    int small_width = std::min(10, bbox.width);
    int small_height = std::min(4, bbox.height);

    const auto &depth_img = depth_img_.value();

    // 计算小矩形框的边界
    int small_x1 = std::max(0, ret.pixel_x - small_width / 2);
    int small_y1 = std::max(0, ret.pixel_y - small_height / 2);
    int small_x2 = std::min(depth_img.cols, ret.pixel_x + small_width / 2);
    int small_y2 = std::min(depth_img.rows, ret.pixel_y + small_height / 2);

    // 提取有效深度值并筛选
    std::vector<float> depths;
    depths.reserve((small_x2 - small_x1) * (small_y2 - small_y1));
    for (int y = small_y1; y < small_y2; ++y) {
      // Cv_16U
      const uint16_t *row_ptr = depth_img.ptr<uint16_t>(y);
      for (int x = small_x1; x < small_x2; ++x) {
        float depth = static_cast<float>(row_ptr[x]);
        if (depth > threshold && depth < std::numeric_limits<float>::max()) {
          depths.push_back(depth);
        }
      }
    }

    // 检查是否有足够的深度值
    if (depths.empty()) {
      // Cv_16U
      float center_depth =
          static_cast<float>(depth_img.at<uint16_t>(ret.pixel_y, ret.pixel_x)) *
          0.001f;
      ret.depth = std::max(std::min(center_depth, 16.0f), threshold);
      if (ret.depth == threshold) {
        std::cout << "[WARNING] depth value is threshold! center_depth: "
                  << center_depth << std::endl;
      }
      return ret;
    }

    // 找到最小的 n 个深度值
    n = std::min(n, static_cast<int>(depths.size()));
    std::nth_element(depths.begin(), depths.begin() + n, depths.end());

    float d = std::accumulate(depths.begin(), depths.begin() + n, 0.0f) / n;

    // 转换为米
    ret.depth = d * 0.001f;
    return ret;
  }

  void deserialize_engine(std::string &engine_name,
                          IRuntime **runtime,
                          ICudaEngine **engine,
                          IExecutionContext **context) {
    std::ifstream file(engine_name, std::ios::binary);
    if (!file.good()) {
      std::cerr << "read " << engine_name << " error!" << std::endl;
      assert(false);
    }
    size_t size = 0;
    file.seekg(0, file.end);
    size = file.tellg();
    file.seekg(0, file.beg);
    char *serialized_engine = new char[size];
    assert(serialized_engine);
    file.read(serialized_engine, size);
    file.close();

    *runtime = createInferRuntime(gLogger);
    assert(*runtime);
    *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
    assert(*engine);
    *context = (*engine)->createExecutionContext();
    assert(*context);
    delete[] serialized_engine;
  }

  void infer(IExecutionContext &context,
             cudaStream_t &stream,
             void **gpu_buffers,
             float *output,
             int batch_size) {
    context.enqueue(batch_size, gpu_buffers, stream, nullptr);
    CUDA_CHECK(cudaMemcpyAsync(output,
                               gpu_buffers[1],
                               batch_size * kOutputSize * sizeof(float),
                               cudaMemcpyDeviceToHost,
                               stream));
    cudaStreamSynchronize(stream);
  }

  void prepare_buffers(ICudaEngine *engine,
                       float **gpu_input_buffer,
                       float **gpu_output_buffer,
                       float **cpu_output_buffer) {
    assert(engine->getNbBindings() == 2);
    // In order to bind the buffers, we need to know the names of the input and
    // output tensors. Note that indices are guaranteed to be less than
    // IEngine::getNbBindings()
    const int inputIndex = engine->getBindingIndex(kInputTensorName);
    const int outputIndex = engine->getBindingIndex(kOutputTensorName);
    assert(inputIndex == 0);
    assert(outputIndex == 1);
    // Create GPU buffers on device
    CUDA_CHECK(cudaMalloc((void **)gpu_input_buffer,
                          kBatchSize * 3 * kInputH * kInputW * sizeof(float)));
    CUDA_CHECK(cudaMalloc((void **)gpu_output_buffer,
                          kBatchSize * kOutputSize * sizeof(float)));

    *cpu_output_buffer = new float[kBatchSize * kOutputSize];
  }
};

int main(int argc, char **argv) {
  ros::init(argc, argv, "yolov5_ros_node");
  ros::NodeHandle nh("~");

  std::string package_path = ros::package::getPath("yolo_detect");
  std::string engine_path = package_path + "/cfg/yolov5s.engine";

  YoloV5ROSNode node(nh, engine_path);
  ros::spin();

  return 0;
}
