#include <iostream>
#include <stdexcept>
#include <type_traits>

#include "utils/image_processing/image_process.h"

namespace tcv {

void read_single_image(image::Image& im, const std::string& image_path) {
  if (!fs_utils::FileExists(image_path)) {
    throw std::runtime_error("Failed to find image file [" + image_path + "]");
  }

  cv::Mat img = cv::imread(image_path, cv::IMREAD_COLOR);
  if (img.empty()) {
    throw std::runtime_error("Failed to read image file [" + image_path + "]");
  }

  im.data = img.data;
  im.width = img.cols;
  im.height = img.rows;

  // TODO using logger to replace std::cout
  std::cout << "DEBUG: "
            << "Done reading Image with size: " << im.width << "x" << im.height
            << std::endl;
}

void read_batch_images(std::vector<image::Image>& ims,
                       const std::vector<std::string>& im_path_list) {
  if (im_path_list.empty()) {
    throw std::runtime_error("Failed to get images list which is empty");
  }

  if (!ims.empty()) {
    ims.clear();
    std::cout << "WARNING: "
              << "Clearing previous images" << std::endl;
  }

  for (std::size_t i = 0; i < im_path_list.size(); ++i) {
    image::Image im;
    read_single_image(im, im_path_list[i]);
    ims.emplace_back(im);
    // TODO using logger to replace std::cout
    std::cout << "DEBUG: "
              << "Done reading Image batch >>>>>> No." << i << std::endl;
  }
}

/**
 * @brief LetterBox 对图像处理成目标大小, 不改变长宽比; 在短边补 padding;
 *
 * @param src 输入图像
 * @param dst_size 目标尺寸, 宽高一样
 * @param color padding 颜色（默认黑色）
 * @return cv::Mat 补padding后的图像
 */
cv::Mat LetterBox(const cv::Mat& src, const cv::Size& dst_size,
                  const cv::Scalar& color) {
  // 计算 scale 和 padding
  float scale = std::min(dst_size.width / (float)src.cols,
                         dst_size.height / (float)src.rows);
  int new_width = static_cast<int>(src.cols * scale);
  int new_height = static_cast<int>(src.rows * scale);

  // 使用计算后 scale 改变图像大小
  cv::Mat resized_image;
  cv::resize(src, resized_image, cv::Size(new_width, new_height));

  // 使用目标尺寸创建图像, 使用color补padding
  cv::Mat dst = cv::Mat::zeros(dst_size.height, dst_size.width, src.type());
  dst.setTo(color);

  // 根据左上角坐标, 对齐图像尺寸大小
  int top = (dst_size.height - new_height) / 2;
  int left = (dst_size.width - new_width) / 2;

  // 修改成最终需要的图像
  resized_image.copyTo(
    dst(cv::Rect(left, top, resized_image.cols, resized_image.rows)));

  return dst;
}

cv::Mat ApplyCLAHE(const cv::Mat& src) {
  cv::Mat lab_image;
  cv::cvtColor(src, lab_image, cv::COLOR_BGR2Lab); // Convert to LAB color space

  std::vector<cv::Mat> lab_planes;
  cv::split(lab_image, lab_planes);

  // 使用 CLAHE 应用到 L 通道 (lightness)
  cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE();
  clahe->setClipLimit(4.0); // Set the clip limit for contrast enhancement
  clahe->apply(lab_planes[0], lab_planes[0]);

  // 合并图层,转换成 BGR
  cv::merge(lab_planes, lab_image);
  cv::Mat result;
  cv::cvtColor(lab_image, result, cv::COLOR_Lab2BGR);

  return result;
}

template<>
std::vector<uint16_t> PreprocessImage(const cv::Mat& image,
                                      std::vector<int64_t>& input_shape) {
  if (image.empty()) {
    throw std::runtime_error("Could not read the image");
  }

  // Step 1: 图像对比度增强
  // cv::Mat enhanced_image = ApplyCLAHE(image); // Use CLAHE as an example
  // cv::Mat enhanced_image = applyHistogramEqualization(image);  // Or use
  // Histogram Equalization cv::Mat enhanced_image =
  // applyGammaCorrection(image, 1.2);  // Or use Gamma Correction

  // Step 2: 更改图像大小
  cv::Mat letterboxed_image =
    LetterBox(image, cv::Size(input_shape[2], input_shape[3]));

  // Step 3: 图像归一化
  letterboxed_image.convertTo(letterboxed_image, CV_32F, 1.0 / 255);

  // Step 4: 图像转换 BGR 到 RGB
  cv::cvtColor(letterboxed_image, letterboxed_image, cv::COLOR_BGR2RGB);

  // Step 5: 图像转换成 1D vector
  std::vector<uint16_t> input_tensor_values;
  input_tensor_values.reserve(input_shape[1] * input_shape[2] * input_shape[3]);

  // 把 Mat 转换成 float 类型vector 数组 (HWC to CHW)
  std::vector<cv::Mat> fp32_channels(3), fp16_channels(3);
  cv::split(letterboxed_image, fp32_channels);

  for (int c = 0; c < 3; ++c) {
    fp32_channels[c].convertTo(fp16_channels[c], CV_16FC1);
    input_tensor_values.insert(
      input_tensor_values.end(), (uint16_t*)fp16_channels[c].data,
      (uint16_t*)fp16_channels[c].data + input_shape[2] * input_shape[3]);
  }

  return input_tensor_values;
}

template<>
std::vector<float> PreprocessImage(const cv::Mat& image,
                                   std::vector<int64_t>& input_shape) {
  if (image.empty()) {
    throw std::runtime_error("Could not read the image");
  }

  // Step 1: 图像对比度增强
  cv::Mat enhanced_image = ApplyCLAHE(image); // Use CLAHE as an example
  // cv::Mat enhanced_image = applyHistogramEqualization(image);  // Or use
  // Histogram Equalization cv::Mat enhanced_image =
  // applyGammaCorrection(image, 1.2);  // Or use Gamma Correction

  // Step 2: 更改图像大小
  cv::Mat letterboxed_image =
    LetterBox(enhanced_image, cv::Size(input_shape[2], input_shape[3]));

  // Step 3: 图像归一化
  letterboxed_image.convertTo(letterboxed_image, CV_32F, 1.0 / 255);

  // Step 4: 图像转换 BGR 到 RGB
  cv::cvtColor(letterboxed_image, letterboxed_image, cv::COLOR_BGR2RGB);

  // Step 5: 图像转换成 1D vector
  std::vector<float> input_tensor_values;
  // std::vector<uint16_t> input_tensor_values;
  input_tensor_values.reserve(input_shape[1] * input_shape[2] * input_shape[3]);

  // 把 Mat 转换成 float 类型vector 数组 (HWC to CHW)
  std::vector<cv::Mat> channels(3);
  cv::split(letterboxed_image, channels);

  for (int c = 0; c < 3; ++c) {
    input_tensor_values.insert(
      input_tensor_values.end(), (float*)channels[c].data,
      (float*)channels[c].data + input_shape[2] * input_shape[3]);
  }

  return input_tensor_values;
}

/**
 * @brief 计算两个检测框的 Intersection over Union (IoU)
 *
 * @param boxA First bounding box.
 * @param boxB Second bounding box.
 * @return IoU value between 0 and 1.
 */
float computeIOU(const cv::Rect& boxA, const cv::Rect& boxB) {
  int xA = std::max(boxA.x, boxB.x);
  int yA = std::max(boxA.y, boxB.y);
  int xB = std::min(boxA.x + boxA.width, boxB.x + boxB.width);
  int yB = std::min(boxA.y + boxA.height, boxB.y + boxB.height);

  int interArea = std::max(0, xB - xA) * std::max(0, yB - yA);

  int boxAArea = boxA.width * boxA.height;
  int boxBArea = boxB.width * boxB.height;

  float iou = static_cast<float>(interArea) / (boxAArea + boxBArea - interArea);
  return iou;
}

/**
 * @brief 对一组检测框 使用 Soft-NMS 减少重叠的框
 *
 * @param detections Vector of detections to process.
 * @param sigma Soft-NMS parameter controlling the Gaussian function's width.
 * Default is 0.5.
 * @param iou_threshold IoU threshold for suppression. Default is 0.3.
 */
void applySoftNMS(std::vector<Detection>& detections, float sigma = 0.5,
                  float iou_threshold = 0.3) {
  for (size_t i = 0; i < detections.size(); ++i) {
    for (size_t j = i + 1; j < detections.size(); ++j) {
      float iou = computeIOU(detections[i].bbox, detections[j].bbox);
      if (iou > iou_threshold) {
        // Apply the Soft-NMS score decay formula
        detections[j].confidence *= std::exp(-iou * iou / sigma);
      }
    }
  }

  // Remove detections with low confidence scores
  detections.erase(
    std::remove_if(detections.begin(), detections.end(),
                   [](const Detection& det) { return det.confidence < 0.001; }),
    detections.end());
}

/*
 * 输入自信度阈值, 过滤掉低于阈值的检测结果
 *
 * @param results: vector of floats representing the output tensor
 * @param confidence_threshold: minimum confidence threshold
 * @param img_width: width of the input image
 * @param img_height: height of the input image
 * @param orig_width: original width of the image
 * @param orig_height: original height of the image
 * @return: vector of Detection objects
 */
std::vector<Detection> FilterDetections(const std::vector<float>& results,
                                        float confidence_threshold,
                                        int img_width, int img_height,
                                        int orig_width, int orig_height) {
  std::vector<Detection> detections;
  const int num_detections = results.size() / 6;

  // Calculate scale and padding factors
  float scale =
    std::min(img_width / (float)orig_width, img_height / (float)orig_height);
  int new_width = static_cast<int>(orig_width * scale);
  int new_height = static_cast<int>(orig_height * scale);
  int pad_x = (img_width - new_width) / 2;
  int pad_y = (img_height - new_height) / 2;

  detections.reserve(num_detections);

  for (int i = 0; i < num_detections; ++i) {
    float left = results[i * 6 + 0];
    float top = results[i * 6 + 1];
    float right = results[i * 6 + 2];
    float bottom = results[i * 6 + 3];
    float confidence = results[i * 6 + 4];
    int class_id = static_cast<int>(results[i * 6 + 5]);

    if (confidence >= confidence_threshold) {
      // Remove padding and rescale to original image dimensions
      left = (left - pad_x) / scale;
      top = (top - pad_y) / scale;
      right = (right - pad_x) / scale;
      bottom = (bottom - pad_y) / scale;

      int x = static_cast<int>(left);
      int y = static_cast<int>(top);
      int width = static_cast<int>(right - left);
      int height = static_cast<int>(bottom - top);

      detections.push_back({confidence, cv::Rect(x, y, width, height), class_id,
                            CLASS_NAMES[class_id]});
    }
  }

  // Apply Soft-NMS to refine detections
  applySoftNMS(
    detections, 0.5,
    0.3); // You can tweak the sigma and IoU threshold values as needed

  return detections;
}

/*
 * 给图像打标签
 *
 * @param image: input image
 * @param detections: vector of Detection objects
 * @return: image with labels drawn
 */
cv::Mat DrawLabels(const cv::Mat& image,
                   const std::vector<Detection>& detections) {
  cv::Mat result = image.clone();

  for (const auto& detection : detections) {
    cv::rectangle(result, detection.bbox, cv::Scalar(0, 255, 0), 2);
    std::string label =
      detection.class_name + ": " + std::to_string(detection.confidence);

    int baseLine;
    cv::Size labelSize =
      cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);

    cv::rectangle(
      result, cv::Point(detection.bbox.x, detection.bbox.y - labelSize.height),
      cv::Point(detection.bbox.x + labelSize.width,
                detection.bbox.y + baseLine),
      cv::Scalar(255, 255, 255), cv::FILLED);

    cv::putText(result, label, cv::Point(detection.bbox.x, detection.bbox.y),
                cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0), 1);
  }

  return result;
}

} // namespace tcv
