#include "detector/onnx_detector.h"
#include <fstream>
#include <sstream>
#include <stdexcept>
#include <opencv2/imgproc.hpp>

static const char* coco_names[] = {
    "person",
    "bicycle",
    "car",
    "motorbike",
    "aeroplane",
    "bus",
    "train",
    "truck",
    "boat",
    "traffic light",
    "fire hydrant",
    "stop sign",
    "parking meter",
    "bench",
    "bird",
    "cat",
    "dog",
    "horse",
    "sheep",
    "cow",
    "elephant",
    "bear",
    "zebra",
    "giraffe",
    "backpack",
    "umbrella",
    "handbag",
    "tie",
    "suitcase",
    "frisbee",
    "skis",
    "snowboard",
    "sports ball",
    "kite",
    "baseball bat",
    "baseball glove",
    "skateboard",
    "surfboard",
    "tennis racket",
    "bottle",
    "wine glass",
    "cup",
    "fork",
    "knife",
    "spoon",
    "bowl",
    "banana",
    "apple",
    "sandwich",
    "orange",
    "broccoli",
    "carrot",
    "hot dog",
    "pizza",
    "donut",
    "cake",
    "chair",
    "sofa",
    "pottedplant",
    "bed",
    "diningtable",
    "toilet",
    "tvmonitor",
    "laptop",
    "mouse",
    "remote",
    "keyboard",
    "cell phone",
    "microwave",
    "oven",
    "toaster",
    "sink",
    "refrigerator",
    "book",
    "clock",
    "vase",
    "scissors",
    "teddy bear",
    "hair drier",
    "toothbrush",
};

OnnxDetector::OnnxDetector(const std::string& model_path)
    : env_(ORT_LOGGING_LEVEL_WARNING, "OnnxDetector") {
    // Session options
    session_options_.SetIntraOpNumThreads(1);
    if (false) {
#ifdef USE_CUDA
        OrtCUDAProviderOptions cuda_options;
        session_options_.AppendExecutionProvider_CUDA(cuda_options);
#endif
    }
    session_options_.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

    // 加载模型
    session_ = std::make_unique<Ort::Session>(env_, model_path.c_str(), session_options_);

    // 获取输入输出名（新版ONNX Runtime API）
    auto input_names = session_->GetInputNames();
    auto output_names = session_->GetOutputNames();
    input_name_ = input_names[0];
    output_name_ = output_names[0];

    // 获取输入尺寸（假设第一个输入为NCHW）
    auto input_shape = session_->GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
    if (input_shape.size() == 4) {
        input_height_ = static_cast<int>(input_shape[2]);
        input_width_ = static_cast<int>(input_shape[3]);
    }
    initialized_ = true;
}

OnnxDetector::~OnnxDetector() {}

std::vector<DetectionResult> OnnxDetector::detect(const cv::Mat& image) {
    if (!initialized_) return {};
    // 1. 预处理
    cv::Mat resized;
    cv::resize(image, resized, cv::Size(input_width_, input_height_));
    cv::Mat blob;
    resized.convertTo(blob, CV_32F, 1.0 / 255);
    // NCHW
    std::vector<float> input_tensor_values(blob.total() * blob.channels());
    std::vector<cv::Mat> chw(blob.channels());
    for (int i = 0; i < blob.channels(); ++i)
        chw[i] = cv::Mat(blob.rows, blob.cols, CV_32F, input_tensor_values.data() + i * blob.rows * blob.cols);
    cv::split(blob, chw);

    // 2. 构建输入Tensor（新版API）
    std::array<int64_t, 4> input_shape = {1, blob.channels(), blob.rows, blob.cols};
    Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
        memory_info, input_tensor_values.data(), input_tensor_values.size(), input_shape.data(), input_shape.size());

    // 3. 推理
    const char* input_names_c[] = {input_name_.c_str()};
    const char* output_names_c[] = {output_name_.c_str()};
    auto output_tensors = session_->Run(Ort::RunOptions{nullptr},
        input_names_c, &input_tensor, 1, output_names_c, 1);

    // 4. 后处理（需由子类实现）
    return {};
}
