#include <opencv2/imgproc.hpp>
#include <fstream>
#include "detector/yolo11_gcu_detector.h"
#include "otl_timer.h"

#if (__GNUC__ < 8 && !defined(_MSC_VER))
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
#else
#include <filesystem>
namespace fs = std::filesystem;
#endif


const char* DataTypeString(int dtype)
{
    switch (dtype)
    {
    case TopsInference::DataType::TIF_BOOL:
        return "TIF_BOOL";
    case TopsInference::DataType::TIF_INDEX:
        return "TIF_INDEX";
    case TopsInference::DataType::TIF_INT8:
        return "TIF_INT8";
    case TopsInference::DataType::TIF_INT16:
        return "TIF_INT16";
    case TopsInference::DataType::TIF_INT32:
        return "TIF_INT32";
    case TopsInference::DataType::TIF_INT64:
        return "TIF_INT64";
    case TopsInference::DataType::TIF_UINT8:
        return "TIF_UINT8";
    case TopsInference::DataType::TIF_UINT16:
        return "TIF_UINT16";
    case TopsInference::DataType::TIF_UINT32:
        return "TIF_UINT32";
    case TopsInference::DataType::TIF_UINT64:
        return "TIF_UINT64";
    case TopsInference::DataType::TIF_FP16:
        return "TIF_FP16";
    case TopsInference::DataType::TIF_FP32:
        return "TIF_FP32";
    case TopsInference::DataType::TIF_FP64:
        return "TIF_FP64";
    case TopsInference::DataType::TIF_BF16:
        return "TIF_BF16";
    case TopsInference::DataType::TIF_TUPLE:
        return "TIF_TUPLE";
    case TopsInference::DataType::TIF_INVALID:
        return "TIF_INVALID";
    default:
        return "INVALID";
    }
}


int get_dtype_size(TopsInference::DataType dtype) {
  int dtype_size;
  switch (dtype) {
  case TopsInference::DataType::TIF_INT8:
  case TopsInference::DataType::TIF_UINT8:
    dtype_size = 1;
    break;
  case TopsInference::DataType::TIF_BF16:
  case TopsInference::DataType::TIF_FP16:
  case TopsInference::DataType::TIF_INT16:
  case TopsInference::DataType::TIF_UINT16:
    dtype_size = 2;
    break;
  case TopsInference::DataType::TIF_FP32:
  case TopsInference::DataType::TIF_INT32:
  case TopsInference::DataType::TIF_UINT32:
    dtype_size = 4;
    break;
  case TopsInference::DataType::TIF_FP64:
  case TopsInference::DataType::TIF_INT64:
  case TopsInference::DataType::TIF_UINT64:
    dtype_size = 8;
    break;
  default: // to do.
    dtype_size = 1;
  }
  return dtype_size;
}

static std::vector<void *> allocHostMemory(std::vector<ShapeInfo> &shapes_info,
                                    int times, bool verbose)
{
    std::vector<void *> datum;
    for (auto &shape_info : shapes_info)
    {
        char *data = new char[shape_info.mem_size * times];
        datum.push_back((void *)data);
        if (verbose)
        {
            std::cout << "new data size: " << shape_info.mem_size << std::endl;
        }
    }
    return datum;
}

static void freeHostMemory(std::vector<void *> &datum)
{
    for (auto &data : datum)
    {
        delete[] (char *)data;
    }
    datum.clear();
}

static void clamp(float &val, const float low, const float high)
{
    if (val > high)
    {
        val = high;
    }
    else if (val < low)
    {
        val = low;
    }
}

static void sort(int n, const std::vector<float> x, std::vector<int> indices)
{
    int i, j;
    for (i = 0; i < n; i++)
        for (j = i + 1; j < n; j++)
        {
            if (x[indices[j]] > x[indices[i]])
            {
                // float x_tmp = x[i];
                int index_tmp = indices[i];
                // x[i] = x[j];
                indices[i] = indices[j];
                // x[j] = x_tmp;
                indices[j] = index_tmp;
            }
        }
}

static bool nonMaximumSuppression(const std::vector<cv::Rect> rects,
                           const std::vector<float> score,
                           float overlap_threshold,
                           std::vector<int> &index_out)
{
    int num_boxes = rects.size();
    int i, j;
    std::vector<float> box_area(num_boxes);
    std::vector<int> indices(num_boxes);
    std::vector<int> is_suppressed(num_boxes);

    for (i = 0; i < num_boxes; i++)
    {
        indices[i] = i;
        is_suppressed[i] = 0;
        box_area[i] = (float)((rects[i].width + 1) * (rects[i].height + 1));
    }

    sort(num_boxes, score, indices);

    for (i = 0; i < num_boxes; i++)
    {
        if (!is_suppressed[indices[i]])
        {
            for (j = i + 1; j < num_boxes; j++)
            {
                if (!is_suppressed[indices[j]])
                {
                    int x1max = std::max(rects[indices[i]].x,
                                         rects[indices[j]].x);
                    int x2min = std::min(
                        rects[indices[i]].x + rects[indices[i]].width,
                        rects[indices[j]].x + rects[indices[j]].width);
                    int y1max =
                        std::max(rects[indices[i]].y, rects[indices[j]].y);
                    int y2min = std::min(
                        rects[indices[i]].y + rects[indices[i]].height,
                        rects[indices[j]].y + rects[indices[j]].height);
                    int overlap_w = x2min - x1max + 1;
                    int overlap_h = y2min - y1max + 1;
                    if (overlap_w > 0 && overlap_h > 0)
                    {
                        float iou =
                            (overlap_w * overlap_h) /
                            (box_area[indices[j]] + box_area[indices[i]] -
                             overlap_w * overlap_h);
                        if (iou > overlap_threshold)
                        {
                            is_suppressed[indices[j]] = 1;
                        }
                    }
                }
            }
        }
    }

    for (i = 0; i < num_boxes; i++)
    {
        if (!is_suppressed[i])
            index_out.push_back(i);
    }

    return true;
}

static const char *PrecisionToString(int precision_type)
{
    const char *precision_str = "";
    switch ((TopsInference::BuildFlag)precision_type)
    {
    case TopsInference::BuildFlag::TIF_KTYPE_DEFAULT:
        precision_str = "default";
        break;
    case TopsInference::BuildFlag::TIF_KTYPE_FLOAT16:
        precision_str = "fp16";
        break;
    case TopsInference::BuildFlag::TIF_KTYPE_MIX_FP16:
        precision_str = "mix";
        break;
    }
    return precision_str;
}

static std::string engine_name_construct(const char *onnx_path,
                                  const char *engine_folder, int batchsize,
                                  const char *precision)
{
#if 0
    fs::path path(onnx_path);
    std::string stem = path.stem().string();
    std::ostringstream str_stream;
    str_stream.str("");
    if (engine_folder == nullptr || (engine_folder) != 0) {
        str_stream << "./" << stem;
    }else{
        str_stream << "." << engine_folder << "/" << stem;
    }

    str_stream << "-" << precision;
    if (batchsize > 0)
    {
        str_stream << "-bs" << batchsize;
    }
    str_stream << ".exec";
    std::string exec_path = str_stream.str();
    std::cout << "engine path: " << exec_path << '\n';
    return exec_path;
#else
    std::string onnxFilePath = onnx_path;
    onnxFilePath += ".exec";
    return onnxFilePath;
#endif
}

Yolo11GcuDetector::Yolo11GcuDetector(const std::string &model_path, int devId, int clusterId)
{
    m_devId = devId;
    m_clusterId = clusterId;
    TopsInference::topsInference_init();  
    m_model_path = model_path;
}

Yolo11GcuDetector:: ~Yolo11GcuDetector() {
    freeHostMemory(mNetInputs);
    freeHostMemory(mNetOutputs);
    if (mTopsEngine)
        TopsInference::release_engine(mTopsEngine);
    if (mTopsHandle)
        TopsInference::release_device(mTopsHandle);
    TopsInference::topsInference_finish();
}


std::vector<ShapeInfo> Yolo11GcuDetector::getInputsShape()
{
    std::vector<ShapeInfo> shapes_info;
    int num = mTopsEngine->getInputNum();
    for (int i = 0; i < num; i++)
    {
        auto name = mTopsEngine->getInputName(i);
        auto Dims = mTopsEngine->getInputShape(i);
        auto dtype = mTopsEngine->getInputDataType(i);

        std::vector<int> shape;
        int dtype_size = get_dtype_size(dtype);
        int mem_size = dtype_size;
        for (int j = 0; j < Dims.nbDims; j++)
        {
            shape.push_back(Dims.dimension[j]);
            mem_size *= Dims.dimension[j];
        }
        shapes_info.push_back(ShapeInfo(name, shape, dtype, dtype_size, mem_size));
    }
    return shapes_info;
}

std::vector<ShapeInfo> Yolo11GcuDetector::getOutputsShape()
{
    std::vector<ShapeInfo> shapes_info;
    int num = mTopsEngine->getOutputNum();
    for (int i = 0; i < num; i++)
    {
        auto name = mTopsEngine->getOutputName(i);
        auto Dims = mTopsEngine->getOutputShape(i);
        auto dtype = mTopsEngine->getOutputDataType(i);

        std::vector<int> shape;
        int dtype_size = get_dtype_size(dtype);
        int mem_size = dtype_size;
        for (int j = 0; j < Dims.nbDims; j++)
        {
            shape.push_back(Dims.dimension[j]);
            mem_size *= Dims.dimension[j];
        }
        shapes_info.push_back(ShapeInfo(name, shape, dtype, dtype_size, mem_size));
    }
    return shapes_info;
}

int Yolo11GcuDetector::loadModel(const std::string& model_path)
{
    const char *input_shapes = "1,3,640,640";
    const char *input_names = "images";
    int card_id = m_devId;
    std::vector<uint32_t> cluster_ids;
    cluster_ids.push_back(0);
    cluster_ids.push_back(1);
    
    int precision_type = 2;
    if (mTopsHandle == nullptr)
    {
        std::cout << "set_device " << card_id << std::endl;
        mTopsHandle = TopsInference::set_device(card_id, cluster_ids.data(), cluster_ids.size());
    }
    std::string exec_path =
        engine_name_construct(m_model_path.c_str(), "", atoi(input_shapes),
                              PrecisionToString(2));
    // load engine
    if (fs::exists(exec_path))
    {
        mTopsEngine = TopsInference::create_engine();
        mTopsEngine->loadExecutable(exec_path.c_str());
        std::cout << "[INFO] load engine file: " << exec_path << '\n';
    }
    else if (fs::exists(m_model_path))
    {
        // Build from onnx
        TopsInference::IParser *parser_ = TopsInference::create_parser(TopsInference::TIF_ONNX);
        TopsInference::IOptimizer *optimizer_ = TopsInference::create_optimizer();
        if (input_names != NULL)
            parser_->setInputNames(input_names);
        if (input_shapes != NULL)
            parser_->setInputShapes(input_shapes);
        TopsInference::INetwork *network = parser_->readModel(m_model_path.c_str());
        TopsInference::IOptimizerConfig *optimizer_config = optimizer_->getConfig();
        optimizer_config->setBuildFlag(TopsInference::BuildFlag::TIF_KTYPE_MIX_FP16);
        mTopsEngine = optimizer_->build(network);
        mTopsEngine->saveExecutable(exec_path.c_str());
        std::cout << "[INFO] save engine file: " << exec_path << '\n';
        TopsInference::release_network(network);
        TopsInference::release_optimizer(optimizer_);
        TopsInference::release_parser(parser_);
    }
    else
    {
        std::cout << std::endl
                  << "[ERROR] fail to load onnx: " << model_path << std::endl
                  << std::endl;
        return -1;
    }

    mInputShapes = getInputsShape();
    for (int i = 0; i < mInputShapes.size(); ++i)
    {
        std::cout << mInputShapes[i].toString() << std::endl;
    }

    mOutputShapes = getOutputsShape();
    for (int i = 0; i < mOutputShapes.size(); ++i)
    {
        std::cout << mOutputShapes[i].toString() << std::endl;
    }
    return 0;
}

int Yolo11GcuDetector::preProcess(std::vector<cv::Mat> &images)
{
    int batchSize = images.size();
    freeHostMemory(mNetInputs);
    mNetInputs = allocHostMemory(mInputShapes, batchSize, false);
    // Shape is [n, c, h, w]
    int inputW = mInputShapes[0].dims[3];
    int inputH = mInputShapes[0].dims[2];

    for (int shapeIdx = 0; shapeIdx < mInputShapes.size(); ++shapeIdx)
    {
        for (int i = 0; i < batchSize; ++i)
        {

            cv::Mat image = images[i];
            int maxLen = MAX(image.cols, image.rows);
            cv::Mat image2 = cv::Mat::zeros(cv::Size(maxLen, maxLen), CV_8UC3);
            cv::Rect roi(0, 0, image.cols, image.rows);
            image.copyTo(image2(roi));

            // 调整大小并保持宽高比
            cv::Mat input;
            cv::resize(image2, input, cv::Size(inputW, inputH), 0, 0, cv::INTER_LINEAR);

            input.convertTo(input, CV_32F, 1.0f / 255.0f);

            int planeSize = input.cols * input.rows;
            float *begin = static_cast<float *>((void *)((char *)mNetInputs[shapeIdx] + mInputShapes[shapeIdx].mem_size * i));
            cv::Mat b(cv::Size(input.cols, input.rows), CV_32FC1, begin);
            cv::Mat g(cv::Size(input.cols, input.rows), CV_32FC1, begin + planeSize);
            cv::Mat r(cv::Size(input.cols, input.rows), CV_32FC1, begin + (planeSize << 1));
            cv::Mat rgb[3] = {r, g, b};
            cv::split(input, rgb);
        }
    }

    return 0;
}

int Yolo11GcuDetector::postProcess(std::vector<cv::Mat> &images, std::vector<std::vector<DetectionResult>> &batchDets) 
{

    // Shape is [n, c, h, w]
    int inputW = mInputShapes[0].dims[3];
    int inputH = mInputShapes[0].dims[2];

    for (int bactchIdx = 0; bactchIdx < images.size(); ++bactchIdx)
    {
        // OutputData is column-major, [[batch1_output1, batch2_output1,...],[batch1_output2, batch2_output2,...]...]
        int outputShapeIdx = 0; // yolov8m has only one output shape.
        //[1,84,8400]
        float *output1 = (float *)mNetOutputs[outputShapeIdx] + mOutputShapes[outputShapeIdx].volume * bactchIdx;

        int output_h = mOutputShapes[outputShapeIdx].dims[1];
        int output_w = mOutputShapes[outputShapeIdx].dims[2];
        int maxLen = MAX(images[bactchIdx].cols, images[bactchIdx].rows);
        float x_factor, y_factor;
        x_factor = y_factor = maxLen / static_cast<float>(inputW);

        cv::Mat dout(output_h, output_w, CV_32F, (float *)output1);
        cv::Mat det_output = dout.t(); // 8400x84

        std::vector<cv::Rect> boxes;
        std::vector<int> classIds;
        std::vector<float> confidences;

        for (int i = 0; i < det_output.rows; i++)
        {
            cv::Mat classes_scores = det_output.row(i).colRange(4, 84);
            cv::Point classIdPoint;
            double score;
            cv::minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);

            // 置信度 0～1之间
            if (score > m_confThres)
            {
                float cx = det_output.at<float>(i, 0);
                float cy = det_output.at<float>(i, 1);
                float ow = det_output.at<float>(i, 2);
                float oh = det_output.at<float>(i, 3);
                int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
                int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
                int width = static_cast<int>(ow * x_factor);
                int height = static_cast<int>(oh * y_factor);
                cv::Rect box;
                box.x = x;
                box.y = y;
                box.width = width;
                box.height = height;

                boxes.push_back(box);
                classIds.push_back(classIdPoint.x);
                confidences.push_back(score);
            }
        }

        // NMS
        std::vector<int> indexes;
        cv::dnn::NMSBoxes(boxes, confidences, m_nmsThres, m_confThres, indexes);

        std::vector<DetectionResult> dets;
        //printf("detect num: %d\n", (int)indexes.size());
        for (size_t i = 0; i < indexes.size(); i++)
        {
            int index = indexes[i];
            int cls_id = classIds[index];
            DetectionResult detection;
            detection.bbox = boxes[index];
            detection.class_id = cls_id;

            //detection.label = label_map[cls_id];
            detection.confidence = confidences[index];
            //printf("box[%d, %d, %d, %d], conf:%f, cls:%d\n", boxes[index].x, boxes[index].y, boxes[index].width, boxes[index].height, confidences[index], cls_id);
            dets.push_back(detection);
        }

        // for debug
        //std::cout << "size of result: " << dets.size() << std::endl;
        batchDets.push_back(dets);
    }

    return 0;
}

// YOLOv11输出假定为 [1, 8400, 84]，每行[x, y, w, h, obj_conf, cls0, cls1, ...]
#include <opencv2/dnn.hpp>
std::vector<DetectionResult> Yolo11GcuDetector::detect(const cv::Mat& image) {
    if (!m_initialized) {
         auto ret = loadModel(m_model_path);
            if (ret < 0)
            {
                std::cout << "load model:" << m_model_path << " failed" << std::endl;
                return {};
            }
        m_initialized = true;
    };

    //Perf perf;
    //perf.begin("detect");

    std::vector<cv::Mat> images;
    images.push_back(image);

    int batch_size = images.size();

    auto ret = preProcess(images);
    if (ret < 0)
    {
        std::cout << "preprocess() failed, ret = " << ret << std::endl;
        return {};
    }

    freeHostMemory(mNetOutputs);
    mNetOutputs = allocHostMemory(mOutputShapes, batch_size, false);

    auto success = mTopsEngine->runWithBatch(
        batch_size, mNetInputs.data(),
        mNetOutputs.data(),
        TopsInference::BufferType::TIF_ENGINE_RSC_IN_HOST_OUT_HOST);
    if (!success)
    {
        std::cout << "engine run_with_batch failed." << std::endl;
        return {};
    }

    std::vector<std::vector<DetectionResult>> bboxes;
    ret = postProcess(images, bboxes);
    if (ret < 0)
    {
        std::cout << "postProcess() failed, ret = " << ret << std::endl;
        return {};
    }

    return bboxes[0];
}

BaseDetector*  BaseDetector::CreateDetector(const std::string& model_path, int dev_id, int cluster_id)
{
    return new Yolo11GcuDetector(model_path, dev_id, cluster_id);
}
