#include "NCNNModel.hpp"

NCNNModel::NCNNModel(const std::string& param_path,
                     const std::string& bin_path,
                     int input_size,
                     const std::string& input_name,
                     const std::vector<std::string>& output_names,
                     bool use_gpu)
    : input_size_(input_size),
      input_name_(input_name),
      output_names_(output_names),
      use_gpu_(use_gpu) {

    // 如果支持 GPU，启用 Vulkan
    if (use_gpu_ && ncnn::get_gpu_count() > 0) {
        std::cout << "Number of Vulkan GPUs: " << ncnn::get_gpu_count() << std::endl;
        for (int i = 0; i < ncnn::get_gpu_count(); ++i) {
            std::cout << "GPU[" << i << "] name: " << ncnn::get_gpu_info(i).device_name() << std::endl;
        }
        // ncnn::create_gpu_instance();
        net_.opt.use_vulkan_compute = true;
        // device = ncnn::get_gpu_device(0);
        net_.set_vulkan_device(0);
        // net_.opt.num_threads = 4;
    }

    // 加载模型
    net_.load_param(param_path.c_str());
    net_.load_model(bin_path.c_str());


    ex = std::make_unique<ncnn::Extractor>(net_.create_extractor());
}

ncnn::Mat NCNNModel::preprocess(const cv::Mat& image) {
    // Resize + BGR -> RGB + Normalize [0,1]
    cv::Mat resized;
    cv::resize(image, resized, cv::Size(input_size_, input_size_));

    // 转换为 ncnn Mat
    ncnn::Mat in = ncnn::Mat::from_pixels(
        resized.data,
        ncnn::Mat::PIXEL_BGR2RGB,
        resized.cols, resized.rows
    );

    // 归一化：x /= 255.0
    const float norm_vals[3] = {1.0f / 255.f, 1.0f / 255.f, 1.0f / 255.f};
    in.substract_mean_normalize(nullptr, norm_vals);

    return in;
}

std::vector<ncnn::Mat> NCNNModel::infer(const ncnn::Mat& in_mat) {
    // 输入
    ex->input(input_name_.c_str(), in_mat);

    // 输出
    std::vector<ncnn::Mat> outputs;
    for (const auto& name : output_names_) {
        ncnn::Mat out;
        int ret = ex->extract(name.c_str(), out);
        if (ret != 0) {
            printf("infer faild %d\n", ret);
        }
        outputs.push_back(out);
    }

    return outputs;
}

NCNNModel::~NCNNModel() {
// 如果支持 GPU，启用 Vulkan
    if (device) {
        delete device;
    }
    // if (use_gpu_ && ncnn::get_gpu_count() > 0) {
    //     ncnn::destroy_gpu_instance();
    // }
}
