//
// Created by yuan on 2025/7/31.
//

#include "detector_yolo.h"

#include <iostream>
#include <fstream>
#include <filesystem>
#include <algorithm>
#include <cmath>
#include <string>
#include <numeric>
#include <random>
#include "otl.h"
#include "otl_arch.h"
#include "otl_ffmpeg.h"

#define TOPS_CHECK(func) {auto ret = func; assert(topsSuccess == ret);}

static int total_instances = 0;
static std::mutex counter_mutex;
class ThreadLocalObject {
    bool mIsDeviceSetted = false;
    int mDeviceId = -1;
public:
    ThreadLocalObject() {
        // 每次创建实例时增加全局计数
        std::lock_guard<std::mutex> lock(counter_mutex);
        total_instances++;
        std::cout << "创建了一个线程本地实例，当前总数: " << total_instances << std::endl;
    }

    ~ThreadLocalObject() {
        // 实例销毁时减少全局计数
        std::lock_guard<std::mutex> lock(counter_mutex);
        total_instances--;
        std::cout << "销毁了一个线程本地实例，当前总数: " << total_instances << std::endl;
    }

    // 获取当前总实例数
    static int get_total_instances() {
        std::lock_guard<std::mutex> lock(counter_mutex);
        return total_instances;
    }

    void setDeviceDone(int device) {
        if (mDeviceId == -1) {
            mDeviceId = device;
        }else if (mDeviceId != device) {
            std::cout << "setDevice " << device << std::endl;
        }
        mIsDeviceSetted = true;
    }

    bool IsDeviceSetted() {
        return mIsDeviceSetted;
    }
};

// 线程本地变量
thread_local ThreadLocalObject tls_obj;

// HostMemoryPool implementation
HostMemoryPool::~HostMemoryPool() {
    for (auto &kv : m_freelists) {
        for (void* p : kv.second) {
            delete[] static_cast<char*>(p);
        }
    }
}

void HostMemoryPool::prewarm(const std::vector<size_t>& sizes, int count) {
    for (size_t sz : sizes) {
        auto &list = m_freelists[sz];
        int need = std::max(0, count - static_cast<int>(list.size()));
        for (int i = 0; i < need; ++i) {
            list.push_back(static_cast<void*>(new char[sz]));
        }
    }
}

void* HostMemoryPool::acquire(size_t size) {
    auto it = m_freelists.find(size);
    if (it != m_freelists.end() && !it->second.empty()) {
        void* p = it->second.back();
        it->second.pop_back();
        return p;
    }
    return static_cast<void*>(new char[size]);
}

void HostMemoryPool::release(void* ptr, size_t size) {
    if (!ptr) return;
    m_freelists[size].push_back(ptr);
}

// DeviceMemoryPool implementation
DeviceMemoryPool::~DeviceMemoryPool() {
    for (auto &kv : m_freelists) {
        for (void* p : kv.second) {
            TOPS_CHECK(topsFree(static_cast<topsDevice_t*>(p)));
        }
    }
}

void DeviceMemoryPool::prewarm(const std::vector<size_t>& sizes, int count) {
    for (size_t sz : sizes) {
        auto &list = m_freelists[sz];
        int need = std::max(0, count - static_cast<int>(list.size()));
        for (int i = 0; i < need; ++i) {
            topsDevice_t* devMem = nullptr;
            auto ret = topsMalloc(&devMem, sz);
            assert(ret == topsSuccess);
            ret = topsMemset(devMem, 0, sz);
            assert(ret == topsSuccess);
            list.push_back(devMem);
        }
    }
}

void* DeviceMemoryPool::acquire(size_t size) {
    auto it = m_freelists.find(size);
    if (it != m_freelists.end() && !it->second.empty()) {
        void* p = it->second.back();
        it->second.pop_back();
        return p;
    }
    topsDevice_t* devMem = nullptr;
    auto ret = topsMalloc(&devMem, size);
    assert(ret == topsSuccess);
    ret = topsMemset(devMem, 0, size);
    assert(ret == topsSuccess);
    return static_cast<void*>(devMem);
}

void DeviceMemoryPool::release(void* ptr, size_t size) {
    if (!ptr) return;
    m_freelists[size].push_back(ptr);
}

class Int8EntropyCalibrator : public TopsInference::IInt8EntropyCalibrator
{
public:
    /**
    * @brief 构造函数
    * @param imageDir 校准图像所在目录
    * @param inputShape 输入张量形状 (通道数, 高度, 宽度)
    * @param batchSize 批处理大小
    * @param maxCalibrationImages 最大校准图像数量，最多100张
    * @param cacheFile 校准缓存文件路径
    */
    explicit Int8EntropyCalibrator(const std::string& imageDir,
                   const std::vector<int>& inputShape,
                   int batchSize = 1,
                   int maxCalibrationImages = 100,
                   const std::string& cacheFile = "int8_calibration_cache.bin"):mImageDir(imageDir),
      mInputShape(inputShape),
      mBatchSize(batchSize),
      mMaxCalibrationImages(std::min(maxCalibrationImages, 100)), // 最多100张图片
      mCacheFile(cacheFile),
      mCurrentIndex(0)
    {
        // 验证输入形状
        if (mInputShape.size() != 3)
        {
            throw std::invalid_argument("输入形状必须是 (通道数, 高度, 宽度)");
        }

        // 计算输入数据大小
        size_t inputSize = 1;
        for (int dim : mInputShape)
        {
            inputSize *= dim;
        }
        mInputData.resize(inputSize * mBatchSize);

        // 加载图像路径
        mImagePaths = loadImagePaths();

        if (mImagePaths.empty())
        {
            throw std::runtime_error("未找到任何图像文件");
        }

        // 如果图像数量超过最大校准图像数量，则随机采样
        if (mImagePaths.size() > mMaxCalibrationImages)
        {
            std::random_device rd;
            std::mt19937 g(rd());
            std::shuffle(mImagePaths.begin(), mImagePaths.end(), g);
            mImagePaths.resize(mMaxCalibrationImages);
        }

        std::cout << "成功加载 " << mImagePaths.size() << " 张校准图像" << std::endl;



    }

    ~Int8EntropyCalibrator() override
    {
        std::cout << "Int8EntropyCalibrator() dtor" << std::endl;
    }

    int getBatchSize() const noexcept override
    {
        std::cout << __FUNCTION__ << ":" << __LINE__ << std::endl;
        return 1;
    }

    bool getBatch(TopsInference::TensorPtr_t bindings[], const char* names[], int num) noexcept override
    {
        std::cout << __FUNCTION__ << ":" << __LINE__ << std::endl;
        if (mCurrentIndex + mBatchSize > mImagePaths.size())
        {
            return false; // 没有更多批次
        }

        // 加载并预处理当前批次的图像
        for (int i = 0; i < mBatchSize; ++i)
        {
            const std::string& imagePath = mImagePaths[mCurrentIndex + i];
            cv::Mat image = cv::imread(imagePath);

            if (image.empty())
            {
                std::cerr << "警告: 无法读取图像 " << imagePath << std::endl;
                return false;
            }

            // 预处理图像
            std::vector<float> processedData = preprocessImage(image);

            // 复制到输入缓冲区
            size_t inputSize = processedData.size();
            std::copy(processedData.begin(), processedData.end(),
                     &mInputData[i * inputSize]);
        }

        // 将数据传递给TensorRT
        TopsInference::TensorPtr_t sub_input = bindings[0];
        sub_input->setOpaque(mInputData.data());
        sub_input->setDeviceType(TopsInference::DataDeviceType::HOST);
        TopsInference::Dims inputShape;
        inputShape.nbDims = 4;
        inputShape.dimension[0] = mBatchSize;
        inputShape.dimension[1] = mInputShape[0];
        inputShape.dimension[2] = mInputShape[1];
        inputShape.dimension[3] = mInputShape[2];
        sub_input->setDims(inputShape);

        mCurrentIndex += mBatchSize;

        return true;
    }

    const void* readCalibrationCache(int64_t& length) override
    {
        std::cout << __FUNCTION__ << ":" << __LINE__ << std::endl;
        mCalibrationCache.clear();

        std::ifstream cacheFile(mCacheFile, std::ios::binary);

        if (cacheFile.good())
        {
            cacheFile.seekg(0, std::ios::end);
            size_t size = cacheFile.tellg();
            cacheFile.seekg(0, std::ios::beg);

            mCalibrationCache.resize(size);
            cacheFile.read(mCalibrationCache.data(), size);
            length = size;
            return mCalibrationCache.data();
        }

        length = 0;
        return nullptr;
    }

    bool writeCalibrationCache(const void* cache, int64_t length) override
    {
        std::cout << __FUNCTION__ << ":" << __LINE__ << std::endl;
        std::ofstream cacheFile(mCacheFile, std::ios::binary);
        if (cacheFile.good())
        {
            cacheFile.write(reinterpret_cast<const char*>(cache), length);
        }
        return true;
    }

private:
    std::vector<std::string> loadImagePaths()
    {
        std::vector<std::string> imagePaths;

        if (!fs::exists(mImageDir) || !fs::is_directory(mImageDir)) {
            throw std::invalid_argument("图像目录不存在或不是一个目录: " + mImageDir);
        }

        // 支持的图像扩展名
        const std::vector<std::string> extensions = {".jpg", ".jpeg", ".png", ".bmp", ".gif"};

        // 遍历目录中的所有文件
        for (const auto& entry : fs::directory_iterator(mImageDir)) {
            if (entry.is_regular_file()) {
                std::string path = entry.path().string();
                std::string ext = entry.path().extension().string();

                // 转换为小写
                std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);

                // 检查是否为支持的图像格式
                if (std::find(extensions.begin(), extensions.end(), ext) != extensions.end()) {
                    imagePaths.push_back(path);
                }
            }
        }

        return imagePaths;
    }

    std::vector<float> preprocessImage(const cv::Mat& image)
    {
        cv::Mat processed;
        int channels = mInputShape[0];
        int height = mInputShape[1];
        int width = mInputShape[2];

        // 调整大小
        cv::resize(image, processed, cv::Size(width, height));

        // 转换颜色空间
        if (channels == 3) {
            cv::cvtColor(processed, processed, cv::COLOR_BGR2RGB);
        }
        else if (channels == 1) {
            cv::cvtColor(processed, processed, cv::COLOR_BGR2GRAY);
        }

        // 转换为浮点数并归一化
        processed.convertTo(processed, CV_32F, 1.0 / 255.0);

        // 转换为CHW格式并调整为模型输入范围
        std::vector<float> data;
        data.reserve(channels * height * width);

        if (channels == 3) {
            for (int c = 0; c < 3; ++c) {
                for (int h = 0; h < height; ++h) {
                    for (int w = 0; w < width; ++w) {
                        data.push_back(processed.at<cv::Vec3f>(h, w)[c]);
                    }
                }
            }
        }
        else if (channels == 1) {
            for (int h = 0; h < height; ++h) {
                for (int w = 0; w < width; ++w) {
                    data.push_back(processed.at<float>(h, w));
                }
            }
        }

        return data;
    }

    std::string mImageDir;               // 图像目录
    std::vector<int> mInputShape;        // 输入形状 (C, H, W)
    int mBatchSize;                      // 批处理大小
    int mMaxCalibrationImages;           // 最大校准图像数量
    std::string mCacheFile;              // 缓存文件路径
    std::vector<std::string> mImagePaths;// 图像路径列表
    size_t mCurrentIndex;                // 当前处理索引
    std::vector<float> mInputData;       // 输入数据缓冲区
    std::vector<char> mCalibrationCache; // 校准缓存
};


YoloDetector::YoloDetector(int devId, std::string modelPath) : m_deviceId(OTL_GET_INT32_HIGH16(devId)),
                                                               m_engine(nullptr),
                                                               m_confThreshold(0.45f), m_nmsThreshold(0.25f)
{
    m_modelPath = modelPath;
}

YoloDetector::~YoloDetector()
{
    std::cout << "YoloDetector: dtor" << std::endl;
    cleanup();
}

// Helper methods from yolov5_ref.cpp
int YoloDetector::get_dtype_size(TopsInference::DataType dtype)
{
    switch (dtype) {
    case TopsInference::DataType::TIF_FP32:
        return 4;
    case TopsInference::DataType::TIF_FP16:
    case TopsInference::DataType::TIF_BF16:
        return 2;
    case TopsInference::DataType::TIF_INT32:
    case TopsInference::DataType::TIF_UINT32:
        return 4;
    case TopsInference::DataType::TIF_INT16:
    case TopsInference::DataType::TIF_UINT16:
        return 2;
    case TopsInference::DataType::TIF_INT8:
    case TopsInference::DataType::TIF_UINT8:
        return 1;
    case TopsInference::DataType::TIF_FP64:
    case TopsInference::DataType::TIF_INT64:
    case TopsInference::DataType::TIF_UINT64:
        return 8;
    default:
        return 4;
    }
}

std::vector<YoloDetector::ShapeInfo> YoloDetector::getInputsShape()
{
    std::vector<ShapeInfo> shapes_info;
    int num = m_engine->getInputNum();
    for (int i = 0; i < num; i++) {
        auto name = m_engine->getInputName(i);
        auto Dims = m_engine->getInputShape(i);
        auto dtype = m_engine->getInputDataType(i);

        std::vector<int> shape;
        int dtype_size = get_dtype_size(dtype);
        int mem_size = dtype_size;
        for (int j = 0; j < Dims.nbDims; j++) {
            shape.push_back(Dims.dimension[j]);
            mem_size *= Dims.dimension[j];
        }
        shapes_info.push_back(ShapeInfo(name, shape, dtype, dtype_size, mem_size));
    }
    return shapes_info;
}

std::vector<YoloDetector::ShapeInfo> YoloDetector::getOutputsShape()
{
    std::vector<ShapeInfo> shapes_info;
    int num = m_engine->getOutputNum();
    for (int i = 0; i < num; i++) {
        auto name = m_engine->getOutputName(i);
        auto Dims = m_engine->getOutputShape(i);
        auto dtype = m_engine->getOutputDataType(i);

        std::vector<int> shape;
        int dtype_size = get_dtype_size(dtype);
        int mem_size = dtype_size;
        for (int j = 0; j < Dims.nbDims; j++) {
            shape.push_back(Dims.dimension[j]);
            mem_size *= Dims.dimension[j];
        }
        shapes_info.push_back(ShapeInfo(name, shape, dtype, dtype_size, mem_size));
    }
    return shapes_info;
}

void YoloDetector::allocHostMemory(FrameInfo& info, bool isInput, std::vector<ShapeInfo>& shapes_info, int times,
                                   bool verbose)
{
    std::vector<void*> datum;
    for (auto& shape_info : shapes_info) {
        int dataSize = shape_info.mem_size * times;
        void* data = nullptr;
        if (!m_hostPool) {
            m_hostPool = std::make_unique<HostMemoryPool>(10);
        }
        data = m_hostPool->acquire(dataSize);
        if (isInput) {
            info.netHostInputs.push_back(data);
            info.netInputsSize.push_back(dataSize);
        }
        else {
            info.netHostOutputs.push_back(data);
            info.netOutputsSize.push_back(dataSize);
        }

        if (verbose) {
            std::cout << "Allocated host memory size: " << shape_info.mem_size * times << std::endl;
        }

        //Device memory
        if (!m_devicePool) {
            m_devicePool = std::make_unique<DeviceMemoryPool>(10);
        }
        size_t bytes = static_cast<size_t>(shape_info.mem_size) * static_cast<size_t>(times);
        void* devMem = m_devicePool->acquire(bytes);
        if (isInput) info.netDeviceInputs.push_back(devMem);
        else info.netDeviceOutputs.push_back(devMem);
    }
}

void YoloDetector::freeHostMemory(FrameInfo& info, bool isInput)
{
    if (isInput) {
        for (auto& data : info.netHostInputs) {
            if (m_hostPool) m_hostPool->release(data, info.netInputsSize[&data - &info.netHostInputs[0]]);
        }
        for (size_t i = 0; i < info.netDeviceInputs.size(); ++i) {
            if (m_devicePool) m_devicePool->release(info.netDeviceInputs[i], info.netInputsSize[i]);
        }
        info.netHostInputs.clear();
    }
    else {
        for (auto &data : info.netHostOutputs) {
            if (m_hostPool) m_hostPool->release(data, info.netOutputsSize[&data - &info.netHostOutputs[0]]);
        }
        for (size_t i = 0; i < info.netDeviceOutputs.size(); ++i) {
            if (m_devicePool) m_devicePool->release(info.netDeviceOutputs[i], info.netOutputsSize[i]);
        }
        info.netHostOutputs.clear();
    }
}

int YoloDetector::initialize()
{
    // Initialize TopsInference
    TopsInference::topsInference_init();

    // Set device
    if (!tls_obj.IsDeviceSetted()) {
        std::vector<uint32_t> cluster_ids = {0};
        m_handler = TopsInference::set_device(m_deviceId, cluster_ids.data(), cluster_ids.size());
        if (!m_handler) {
            std::cerr << "threadid=" << std::this_thread::get_id() << ", Failed to set device " << m_deviceId <<
                std::endl;
            return -1;
        }
        tls_obj.setDeviceDone(m_deviceId);
    }

    std::lock_guard<std::mutex> lock(m_mutex);
    if (m_engine != nullptr) return 0;
    try {
        // Initialize TopsInference
        //TopsInference::topsInference_init();

        // Set device
        //std::vector<uint32_t> cluster_ids = {0};
        //m_handler = TopsInference::set_device(m_deviceId, cluster_ids.data(), cluster_ids.size());
        //if (!m_handler) {
        //    std::cerr << "threadid=" << std::this_thread::get_id() << ", Failed to set device " << m_deviceId << std::endl;
        //    return -1;
        //}

        // Generate .exec file path from ONNX model path
        fs::path onnxPath(m_modelPath);
        auto archName = enrigin::GetArchitectureName(enrigin::GetBaseArchitecture());
        std::string execPath = onnxPath.string() + "." + archName + ".exec";

        std::cout << "Looking for pre-built engine: " << execPath << std::endl;

        // Check if .exec file exists in current directory
        if (fs::exists(execPath)) {
            std::cout << "Found pre-built engine file: " << execPath << std::endl;
            std::cout << "Loading pre-built engine..." << std::endl;

            // Create engine and load from .exec file
            m_engine = TopsInference::create_engine();
            if (!m_engine) {
                std::cerr << "Failed to create engine for loading" << std::endl;
                return -1;
            }

            // Load pre-built engine
            auto ret = m_engine->loadExecutable(execPath.c_str());
            if (!ret) {
                std::cerr << "Failed to load pre-built engine from: " << execPath << std::endl;
                TopsInference::release_engine(m_engine);
                m_engine = nullptr;
                return -1;
            }

            std::cout << "Successfully loaded pre-built engine from: " << execPath << std::endl;
        }
        else {
            std::cout << "Pre-built engine not found, building from ONNX model: " << m_modelPath << std::endl;

            // Create parser and read model
            TopsInference::IParser* parser = TopsInference::create_parser(TopsInference::TIF_ONNX);
            if (!parser) {
                std::cerr << "Failed to create parser" << std::endl;
                return -1;
            }

            TopsInference::INetwork* network = parser->readModel(m_modelPath.c_str());
            if (!network) {
                std::cerr << "Failed to read model: " << m_modelPath << std::endl;
                TopsInference::release_parser(parser);
                exit(-1);
            }

            // Create optimizer and build engine
            TopsInference::IOptimizer* optimizer = TopsInference::create_optimizer();
            if (!optimizer) {
                std::cerr << "Failed to create optimizer" << std::endl;
                TopsInference::release_network(network);
                TopsInference::release_parser(parser);
                return -1;
            }

            // 指定模型推理为FP16和FP32混合精度
            auto build_flag = TopsInference::BuildFlag::TIF_KTYPE_MIX_FP16;
            optimizer->getConfig()->setBuildFlag(build_flag);
            if (TopsInference::BuildFlag::TIF_KTYPE_INT8_MIX_FP32 == build_flag ||
                TopsInference::BuildFlag::TIF_KTYPE_INT8_MIX_FP16 == build_flag) {
                std::vector<int> imageShape = {3,640,640};
                std::shared_ptr<Int8EntropyCalibrator> int8EntropyCalibrator = std::make_shared<Int8EntropyCalibrator>(
                    "/home/steven/datasets/coco128/images/train2017", imageShape);
                optimizer->getConfig()->setInt8Calibrator(int8EntropyCalibrator.get());
            }

            std::cout << "Building engine from ONNX model..." << std::endl;
            m_engine = optimizer->build(network);
            if (!m_engine) {
                std::cerr << "Failed to build engine" << std::endl;
                TopsInference::release_optimizer(optimizer);
                TopsInference::release_network(network);
                TopsInference::release_parser(parser);
                return -1;
            }

            std::cout << "Engine built successfully, saving to: " << execPath << std::endl;

            // Save the built engine for future use
            auto status = m_engine->saveExecutable(execPath.c_str());
            if (!status) {
                std::cout << "Warning: Failed to save engine to " << execPath << ", but continuing..." << std::endl;
            }
            else {
                std::cout << "Engine saved successfully to: " << execPath << std::endl;
            }

            // Cleanup temporary objects
            TopsInference::release_optimizer(optimizer);
            TopsInference::release_network(network);
            TopsInference::release_parser(parser);
        }

        // Get input/output shapes (works for both loaded and built engines)
        TopsInference::Dims inputShape = m_engine->getInputShape(0);
        if (inputShape.nbDims >= 4) {
            m_inputHeight = inputShape.dimension[2];
            m_inputWidth = inputShape.dimension[3];
        }

        // Initialize shape information (following yolov5_ref.cpp pattern)
        m_inputShapes = getInputsShape();
        m_outputShapes = getOutputsShape();

        std::cout << "Input shapes:" << std::endl;
        for (const auto& shape : m_inputShapes) {
            std::cout << "  " << shape.name << ": [";
            for (size_t i = 0; i < shape.dims.size(); ++i) {
                std::cout << shape.dims[i];
                if (i < shape.dims.size() - 1) std::cout << ",";
            }
            std::cout << "] mem_size: " << shape.mem_size << std::endl;
        }

        std::cout << "Output shapes:" << std::endl;
        for (const auto& shape : m_outputShapes) {
            std::cout << "  " << shape.name << ": [";
            for (size_t i = 0; i < shape.dims.size(); ++i) {
                std::cout << shape.dims[i];
                if (i < shape.dims.size() - 1) std::cout << ",";
            }
            std::cout << "] mem_size: " << shape.mem_size << std::endl;
        }

        std::cout << "Input size: " << m_inputWidth << "x" << m_inputHeight << std::endl;

        std::cout << "YOLO engine initialized successfully" << std::endl;

        // Initialize and prewarm per-instance host memory pool
        if (!m_hostPool) m_hostPool = std::make_unique<HostMemoryPool>(10);
        if (!m_devicePool) m_devicePool = std::make_unique<DeviceMemoryPool>(10);
        std::vector<size_t> sizes;
        for (const auto& s : m_inputShapes) sizes.push_back(static_cast<size_t>(s.mem_size));
        for (const auto& s : m_outputShapes) sizes.push_back(static_cast<size_t>(s.mem_size));
        m_hostPool->prewarm(sizes, 10);
        m_devicePool->prewarm(sizes, 10);

        return 0;
    }
    catch (const std::exception& e) {
        std::cerr << "Exception in initializeEngine: " << e.what() << std::endl;
        return -1;
    }
}


int YoloDetector::forward(std::vector<FrameInfo>& frameInfos)
{
    //std::lock_guard<std::mutex> lock(m_mutex);

    if (!m_engine && m_inputShapes.empty()) {
        std::cerr << "Engine(dev=" << m_deviceId << ") not initialized" << std::endl;
        return -1;
    }

    for (auto& frameInfo : frameInfos) {
        // Run inference using runWithBatch (following yolov5_ref.cpp pattern)
        auto success = m_engine->runWithBatch(
            1,
            frameInfo.netDeviceInputs.data(),
            frameInfo.netDeviceOutputs.data(),
            TopsInference::BufferType::TIF_ENGINE_RSC_IN_DEVICE_OUT_DEVICE);

        if (!success) {
            std::cerr << "engine runWithBatch failed." << std::endl;
            return -1;
        }


    }

    return 0;
}


void YoloDetector::cleanup()
{
    if (m_engine) {
        TopsInference::release_engine(m_engine);
        m_engine = nullptr;
    }

    if (m_handler) {
        TopsInference::release_device(m_handler);
        m_handler = nullptr;
    }

    TopsInference::topsInference_finish();
}


std::shared_ptr<Detector> Detector::createDetector(int devId, std::string modelPath)
{
    std::shared_ptr<Detector> detector;
    detector.reset(new YoloDetector(devId, modelPath));
    return detector;
}
