#include "det_predictor.h"

namespace ai {


TRTDetPredictor::TRTDetPredictor(TRTPackPredictor* model, TRTModelManager* manager, LogInfo *lg):
  TRTPredictor(model, manager, lg) { }


TRTDetPredictor::~TRTDetPredictor(){

}


int TRTDetPredictor::RunDet(stream::ImageBlob* blob, std::vector<BaseInfo*>& det_infos) {
  int ret = 0;
  // 设置当前线程中处理的gpu id
  cv::cuda::setDevice(mdl_trt->cfg->gpu_id);
  
  return detectObjects(blob->img, det_infos);

}

int TRTDetPredictor::detectObjects(const cv::Mat &inputImageBGR, std::vector<BaseInfo*>& det_infos) {
    // Upload the image to GPU memory
    cv::cuda::GpuMat gpuImg;
    gpuImg.upload(inputImageBGR);

    // Call detectObjects with the GPU image
    int ret = detectObjects(gpuImg, det_infos);
    return ret;
}

int TRTDetPredictor::detectObjects(const cv::cuda::GpuMat &inputImageBGR, std::vector<BaseInfo*>& det_infos) {
  // Preprocess the input image
  // int64_t time1 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

  const auto inputs = preprocess(inputImageBGR);

  // Run inference using the TensorRT engine
  // int64_t time2 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

  const auto numInputs = mdl_trt->m_inputDims.size();
  const auto batchSize = static_cast<int32_t>(inputs[0].size());
  // std::cout << "numInputs:  " << numInputs << std::endl;


  cudaStream_t inferenceCudaStream;
  checkCudaErrorCode(cudaStreamCreate(&inferenceCudaStream));

  std::vector<cv::cuda::GpuMat> preprocessedInputs;
  // Preprocess all the inputs
  for (size_t i = 0; i < numInputs; ++i) {
    const auto &batchInput = inputs[i];
    const auto &dims = mdl_trt->m_inputDims[i];

    auto &input = batchInput[0];
    if (input.channels() != dims.d[0] || input.rows != dims.d[1] || input.cols != dims.d[2]) {
        std::cout << "===== Error =====" << std::endl;
        std::cout << "Input does not have correct size!" << std::endl;
        std::cout << "Expected: (" << dims.d[0] << ", " << dims.d[1] << ", " << dims.d[2] << ")" << std::endl;
        std::cout << "Got: (" << input.channels() << ", " << input.rows << ", " << input.cols << ")" << std::endl;
        std::cout << "Ensure you resize your input image to the correct size" << std::endl;
        return false;
    }

    nvinfer1::Dims4 inputDims = {batchSize, dims.d[0], dims.d[1], dims.d[2]};
    mdl_trt->m_context->setInputShape(mdl_trt->m_IOTensorNames[i].c_str(), inputDims); // Define the batch size

    // OpenCV reads images into memory in NHWC format, while TensorRT expects
    // images in NCHW format. The following method converts NHWC to NCHW. Even
    // though TensorRT expects NCHW at IO, during optimization, it can
    // internally use NHWC to optimize cuda kernels See:
    // https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#data-layout
    // Copy over the input data and perform the preprocessing
    auto mfloat = blobFromGpuMats(batchInput, m_subVals, m_divVals, m_normalize);
    preprocessedInputs.push_back(mfloat);
    mdl_trt->m_buffers[i] = mfloat.ptr<void>();
  }
  

  // Ensure all dynamic bindings have been defined.
  if (!mdl_trt->m_context->allInputDimensionsSpecified()) {
    throw std::runtime_error("Error, not all required dimensions specified.");
  }

  // Set the address of the input and output buffers
  for (size_t i = 0; i < mdl_trt->m_buffers.size(); ++i) {
    bool status = mdl_trt->m_context->setTensorAddress(mdl_trt->m_IOTensorNames[i].c_str(), mdl_trt->m_buffers[i]);
    if (!status) { 
      std::cout << "setTensorAddress:  " << status << std::endl;
      return -1; 
    }
  }

  // Run inference.
  bool status = mdl_trt->m_context->enqueueV3(inferenceCudaStream);
  if (!status) { 
    std::cout << "enqueueV3:  " << status << std::endl;
    return -1; 
  }


    std::vector<std::vector<std::vector<float>>> featureVectors;


    for (int batch = 0; batch < batchSize; ++batch) {
        // Batch
        std::vector<std::vector<float>> batchOutputs{};
        for (int32_t outputBinding = numInputs; outputBinding < mdl_trt->m_engine->getNbIOTensors(); ++outputBinding) {
            // We start at index m_inputDims.size() to account for the inputs in our
            // m_buffers
            std::vector<float> output;
            auto outputLength = mdl_trt->m_outputLengths[outputBinding - numInputs];
            output.resize(outputLength);
            // Copy the output
            checkCudaErrorCode(cudaMemcpyAsync(output.data(),
                                                     static_cast<char *>(mdl_trt->m_buffers[outputBinding]) + (batch * sizeof(float) * outputLength),
                                                     outputLength * sizeof(float), cudaMemcpyDeviceToHost, inferenceCudaStream));
            batchOutputs.emplace_back(std::move(output));
        }
        featureVectors.emplace_back(std::move(batchOutputs));
    }

    // Synchronize the cuda stream
    checkCudaErrorCode(cudaStreamSynchronize(inferenceCudaStream));
    checkCudaErrorCode(cudaStreamDestroy(inferenceCudaStream));

    // int64_t time3 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

    // Check if our model does only object detection or also supports segmentation
    const auto &numOutputs = mdl_trt->m_outputDims.size();
    if (numOutputs == 1) {
        // Object detection or pose estimation
        // Since we have a batch size of 1 and only 1 output, we must convert the output from a 3D array to a 1D array.
        std::vector<float> featureVector = featureVectors[0][0];

        const auto &outputDims = mdl_trt->m_outputDims;
        size_t numChannels = outputDims[outputDims.size() - 1].d[1];
        if (numChannels == 4 + mdl_trt->cfg->label_list.size() + NUM_KPS * 3) {
            // Pose estimation
            postprocessPose(featureVector, det_infos);
        } else if (numChannels == 4 + mdl_trt->cfg->label_list.size()){
            // Object detection
            postprocessDetect(featureVector, det_infos);
        }
        else {
            std::string str = "";
            for (int id = 0; id < outputDims[outputDims.size() - 1].nbDims; id++) {str += std::to_string(outputDims[outputDims.size() - 1].d[id]) + " ";}
            spdlog::get("logger")->info("str:{}, numChannels:{}, mdl_trt->cfg->label_list.size():{}", str, numChannels, mdl_trt->cfg->label_list.size());
            // throw std::runtime_error("Error: Unable to identify whether the model is for Pose estimation or Object detection.");
        }
    } else {
        // Segmentation
        // Since we have a batch size of 1 and 2 outputs, we must convert the output from a 3D array to a 2D array.
        std::vector<std::vector<float>> featureVector = featureVectors[0];
        postProcessSegmentation(featureVector, det_infos);
    }
    // int64_t time4 = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();

    // std::cout << "time1:" << time2 - time1 << " time2:" << time3 - time2 << " time3:" << time4 - time3 << std::endl;

    return 0;
}


cv::cuda::GpuMat TRTDetPredictor::blobFromGpuMats(const std::vector<cv::cuda::GpuMat> &batchInput, const std::array<float, 3> &subVals, const std::array<float, 3> &divVals, bool normalize) {
    cv::cuda::GpuMat gpu_dst(1, batchInput[0].rows * batchInput[0].cols * batchInput.size(), CV_8UC3);

    size_t width = batchInput[0].cols * batchInput[0].rows;
    for (size_t img = 0; img < batchInput.size(); img++) {
        std::vector<cv::cuda::GpuMat> input_channels{
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[0 + width * 3 * img])),
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[width + width * 3 * img])),
            cv::cuda::GpuMat(batchInput[0].rows, batchInput[0].cols, CV_8U, &(gpu_dst.ptr()[width * 2 + width * 3 * img]))};
        cv::cuda::split(batchInput[img], input_channels); // HWC -> CHW
    }

    cv::cuda::GpuMat mfloat;
    if (normalize) {
        // [0.f, 1.f]
        gpu_dst.convertTo(mfloat, CV_32FC3, 1.f / 255.f);
    } else {
        // [0.f, 255.f]
        gpu_dst.convertTo(mfloat, CV_32FC3);
    }

    // Apply scaling and mean subtraction
    cv::cuda::subtract(mfloat, cv::Scalar(subVals[0], subVals[1], subVals[2]), mfloat, cv::noArray(), -1);
    cv::cuda::divide(mfloat, cv::Scalar(divVals[0], divVals[1], divVals[2]), mfloat, 1, -1);

    return mfloat;
}


std::vector<std::vector<cv::cuda::GpuMat>> TRTDetPredictor::preprocess(const cv::cuda::GpuMat &gpuImg) {
  // Populate the input vectors
  const auto &inputDims = mdl_trt->m_inputDims[0];

  // Convert the image from BGR to RGB
  cv::cuda::GpuMat rgbMat;
  if (gpuImg.channels() == 1) { cv::cvtColor(gpuImg, rgbMat, cv::COLOR_GRAY2BGR); }
  else {
    rgbMat = gpuImg;
  }
  cv::cuda::cvtColor(gpuImg, rgbMat, cv::COLOR_BGR2RGB);

  auto resized = rgbMat;

  // Resize to the model expected input size while maintaining the aspect ratio with the use of padding
  if (resized.rows != inputDims.d[1] || resized.cols != inputDims.d[2]) {
      // Only resize if not already the right size to avoid unecessary copy
      resized = resizeKeepAspectRatioPadRightBottom(rgbMat, inputDims.d[1], inputDims.d[2], cv::Scalar(0, 0, 0));
  }

  // Convert to format expected by our inference engine
  // The reason for the strange format is because it supports models with multiple inputs as well as batching
  // In our case though, the model only has a single input and we are using a batch size of 1.
  std::vector<cv::cuda::GpuMat> input{std::move(resized)};
  std::vector<std::vector<cv::cuda::GpuMat>> inputs{std::move(input)};

  // These params will be used in the post-processing stage
  m_imgHeight = rgbMat.rows;
  m_imgWidth = rgbMat.cols;
  m_ratio = 1.f / std::min(inputDims.d[2] / static_cast<float>(rgbMat.cols), inputDims.d[1] / static_cast<float>(rgbMat.rows));

  return inputs;
}

cv::cuda::GpuMat TRTDetPredictor::resizeKeepAspectRatioPadRightBottom(const cv::cuda::GpuMat &input, size_t height, size_t width, const cv::Scalar &bgcolor) {
    float r = std::min(width / (input.cols * 1.0), height / (input.rows * 1.0));
    int unpad_w = r * input.cols;
    int unpad_h = r * input.rows;
    cv::cuda::GpuMat re(unpad_h, unpad_w, CV_8UC3);
    cv::cuda::resize(input, re, re.size());
    cv::cuda::GpuMat out(height, width, CV_8UC3, bgcolor);
    re.copyTo(out(cv::Rect(0, 0, re.cols, re.rows)));
    return out;
}

int TRTDetPredictor::postProcessSegmentation(std::vector<std::vector<float>> &featureVectors, std::vector<BaseInfo*>& det_infos) {
    const auto &outputDims = mdl_trt->m_outputDims;

    int numChannels = outputDims[0].d[1];
    int numAnchors = outputDims[0].d[2];

    const auto numClasses = numChannels - SEG_CHANNELS - 4;

    // Ensure the output lengths are correct
    if (featureVectors[0].size() != static_cast<size_t>(numChannels) * numAnchors) {
        throw std::logic_error("Output at index 0 has incorrect length");
    }

    if (featureVectors[1].size() != static_cast<size_t>(SEG_CHANNELS) * SEG_H * SEG_W) {
        throw std::logic_error("Output at index 1 has incorrect length");
    }

    cv::Mat output = cv::Mat(numChannels, numAnchors, CV_32F, featureVectors[0].data());
    output = output.t();

    cv::Mat protos = cv::Mat(SEG_CHANNELS, SEG_H * SEG_W, CV_32F, featureVectors[1].data());

    std::vector<int> labels;
    std::vector<float> scores;
    std::vector<cv::Rect> bboxes;
    std::vector<cv::Mat> maskConfs;
    std::vector<int> indices;

    // Object the bounding boxes and class labels
    for (int i = 0; i < numAnchors; i++) {
        auto rowPtr = output.row(i).ptr<float>();
        auto bboxesPtr = rowPtr;
        auto scoresPtr = rowPtr + 4;
        auto maskConfsPtr = rowPtr + 4 + numClasses;
        auto maxSPtr = std::max_element(scoresPtr, scoresPtr + numClasses);
        float score = *maxSPtr;
        if (score > mdl_trt->cfg->draw_threshold) {
            float x = *bboxesPtr++;
            float y = *bboxesPtr++;
            float w = *bboxesPtr++;
            float h = *bboxesPtr;

            float x0 = std::clamp((x - 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y0 = std::clamp((y - 0.5f * h) * m_ratio, 0.f, m_imgHeight);
            float x1 = std::clamp((x + 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y1 = std::clamp((y + 0.5f * h) * m_ratio, 0.f, m_imgHeight);

            int label = maxSPtr - scoresPtr;
            cv::Rect_<float> bbox;
            bbox.x = x0;
            bbox.y = y0;
            bbox.width = x1 - x0;
            bbox.height = y1 - y0;

            cv::Mat maskConf = cv::Mat(1, SEG_CHANNELS, CV_32F, maskConfsPtr);

            bboxes.push_back(bbox);
            labels.push_back(label);
            scores.push_back(score);
            maskConfs.push_back(maskConf);
        }
    }

    // Require OpenCV 4.7 for this function
    cv::dnn::NMSBoxesBatched(bboxes, scores, labels, mdl_trt->cfg->draw_threshold, mdl_trt->cfg->nms_threshold, indices);

    // Obtain the segmentation masks
    cv::Mat masks;
    int cnt = 0;
    for (auto &i : indices) {
        if (cnt >= TOP_K) {
            break;
        }
        cv::Rect tmp = bboxes[i];
        ai::DetInfo* obj = new ai::DetInfo(scores[i], labels[i], tmp);
        masks.push_back(maskConfs[i]);
        det_infos.push_back(obj);
        cnt += 1;
    }

    // Convert segmentation mask to original frame
    if (!masks.empty()) {
        cv::Mat matmulRes = (masks * protos).t();
        cv::Mat maskMat = matmulRes.reshape(indices.size(), {SEG_W, SEG_H});

        std::vector<cv::Mat> maskChannels;
        cv::split(maskMat, maskChannels);
        const auto inputDims = mdl_trt->m_inputDims;

        cv::Rect roi;
        if (m_imgHeight > m_imgWidth) {
            roi = cv::Rect(0, 0, SEG_W * m_imgWidth / m_imgHeight, SEG_H);
        } else {
            roi = cv::Rect(0, 0, SEG_W, SEG_H * m_imgHeight / m_imgWidth);
        }

        for (size_t i = 0; i < indices.size(); i++) {
            cv::Mat dest, mask;
            cv::exp(-maskChannels[i], dest);
            dest = 1.0 / (1.0 + dest);
            dest = dest(roi);
            cv::resize(dest, mask, cv::Size(static_cast<int>(m_imgWidth), static_cast<int>(m_imgHeight)), cv::INTER_LINEAR);
            ((ai::DetInfo*)det_infos[i])->mask = mask(((ai::DetInfo*)det_infos[i])->det_box) > SEGMENTATION_THRESHOLD;
        }
    }

    return 0;
}

int TRTDetPredictor::postprocessPose(std::vector<float> &featureVector, std::vector<BaseInfo*>& det_infos) {
    const auto &outputDims = mdl_trt->m_outputDims;
    auto numChannels = outputDims[0].d[1];
    auto numAnchors = outputDims[0].d[2];

    std::vector<cv::Rect> bboxes;
    std::vector<float> scores;
    std::vector<int> labels;
    std::vector<int> indices;
    std::vector<std::vector<float>> kpss;

    cv::Mat output = cv::Mat(numChannels, numAnchors, CV_32F, featureVector.data());
    output = output.t();

    // Get all the YOLO proposals
    for (int i = 0; i < numAnchors; i++) {
        auto rowPtr = output.row(i).ptr<float>();
        auto bboxesPtr = rowPtr;
        auto scoresPtr = rowPtr + 4;
        auto kps_ptr = rowPtr + 5;
        float score = *scoresPtr;
        if (score > mdl_trt->cfg->draw_threshold) {
            float x = *bboxesPtr++;
            float y = *bboxesPtr++;
            float w = *bboxesPtr++;
            float h = *bboxesPtr;

            float x0 = std::clamp((x - 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y0 = std::clamp((y - 0.5f * h) * m_ratio, 0.f, m_imgHeight);
            float x1 = std::clamp((x + 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y1 = std::clamp((y + 0.5f * h) * m_ratio, 0.f, m_imgHeight);

            cv::Rect_<float> bbox;
            bbox.x = x0;
            bbox.y = y0;
            bbox.width = x1 - x0;
            bbox.height = y1 - y0;

            std::vector<float> kps;
            for (int k = 0; k < NUM_KPS; k++) {
                float kpsX = *(kps_ptr + 3 * k) * m_ratio;
                float kpsY = *(kps_ptr + 3 * k + 1) * m_ratio;
                float kpsS = *(kps_ptr + 3 * k + 2);
                kpsX = std::clamp(kpsX, 0.f, m_imgWidth);
                kpsY = std::clamp(kpsY, 0.f, m_imgHeight);
                kps.push_back(kpsX);
                kps.push_back(kpsY);
                kps.push_back(kpsS);
            }

            bboxes.push_back(bbox);
            labels.push_back(0); // All detected objects are people
            scores.push_back(score);
            kpss.push_back(kps);
        }
    }

    // Run NMS
    cv::dnn::NMSBoxesBatched(bboxes, scores, labels, mdl_trt->cfg->draw_threshold, mdl_trt->cfg->nms_threshold, indices);


    // Choose the top k detections
    int cnt = 0;
    for (auto &chosenIdx : indices) {
        if (cnt >= TOP_K) {
            break;
        }

        ai::DetInfo* obj = new ai::DetInfo(scores[chosenIdx], labels[chosenIdx], bboxes[chosenIdx]);
        obj->kps = kpss[chosenIdx];
        det_infos.push_back(obj);

        cnt += 1;
    }

    return 0;
}

int TRTDetPredictor::postprocessDetect(std::vector<float> &featureVector, std::vector<BaseInfo*>& det_infos) {
    const auto &outputDims = mdl_trt->m_outputDims;
    auto numChannels = outputDims[0].d[1];
    auto numAnchors = outputDims[0].d[2];

    auto numClasses = mdl_trt->cfg->label_list.size();

    std::vector<cv::Rect> bboxes;
    std::vector<float> scores;
    std::vector<int> labels;
    std::vector<int> indices;

    cv::Mat output = cv::Mat(numChannels, numAnchors, CV_32F, featureVector.data());
    output = output.t();

    // Get all the YOLO proposals
    for (int i = 0; i < numAnchors; i++) {
        auto rowPtr = output.row(i).ptr<float>();
        auto bboxesPtr = rowPtr;
        auto scoresPtr = rowPtr + 4;
        auto maxSPtr = std::max_element(scoresPtr, scoresPtr + numClasses);
        float score = *maxSPtr;
        if (score > mdl_trt->cfg->draw_threshold) {
            float x = *bboxesPtr++;
            float y = *bboxesPtr++;
            float w = *bboxesPtr++;
            float h = *bboxesPtr;

            float x0 = std::clamp((x - 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y0 = std::clamp((y - 0.5f * h) * m_ratio, 0.f, m_imgHeight);
            float x1 = std::clamp((x + 0.5f * w) * m_ratio, 0.f, m_imgWidth);
            float y1 = std::clamp((y + 0.5f * h) * m_ratio, 0.f, m_imgHeight);

            int label = maxSPtr - scoresPtr;
            cv::Rect_<float> bbox;
            bbox.x = x0;
            bbox.y = y0;
            bbox.width = x1 - x0;
            bbox.height = y1 - y0;

            bboxes.push_back(bbox);
            labels.push_back(label);
            scores.push_back(score);
        }
    }

    // Run NMS
    cv::dnn::NMSBoxesBatched(bboxes, scores, labels, mdl_trt->cfg->draw_threshold, mdl_trt->cfg->nms_threshold, indices);


    // Choose the top k detections
    int cnt = 0;
    for (auto &chosenIdx : indices) {
        if (cnt >= TOP_K) {
            break;
        }

        ai::DetInfo* obj = new ai::DetInfo(scores[chosenIdx], labels[chosenIdx], bboxes[chosenIdx], mdl_trt->cfg->label_list[labels[chosenIdx]]);
        det_infos.push_back(obj);

        cnt += 1;
    }

    return 0;
}



void TRTDetPredictor::drawObjectLabels(cv::Mat &image, std::vector<BaseInfo*>& det_infos, unsigned int scale) {
    // // If segmentation information is present, start with that
    // if (!objects.empty() && !objects[0].boxMask.empty()) {
    //     cv::Mat mask = image.clone();
    //     for (const auto &object : objects) {
    //         // Choose the color
    //         int colorIndex = object.label % COLOR_LIST.size(); // We have only defined 80 unique colors
    //         cv::Scalar color = cv::Scalar(COLOR_LIST[colorIndex][0], COLOR_LIST[colorIndex][1], COLOR_LIST[colorIndex][2]);

    //         // Add the mask for said object
    //         mask(object.rect).setTo(color * 255, object.boxMask);
    //     }
    //     // Add all the masks to our image
    //     cv::addWeighted(image, 0.5, mask, 0.8, 1, image);
    // }

    // // Bounding boxes and annotations
    // for (auto &object : objects) {
    //     // Choose the color
    //     int colorIndex = object.label % COLOR_LIST.size(); // We have only defined 80 unique colors
    //     cv::Scalar color = cv::Scalar(COLOR_LIST[colorIndex][0], COLOR_LIST[colorIndex][1], COLOR_LIST[colorIndex][2]);
    //     float meanColor = cv::mean(color)[0];
    //     cv::Scalar txtColor;
    //     if (meanColor > 0.5) {
    //         txtColor = cv::Scalar(0, 0, 0);
    //     } else {
    //         txtColor = cv::Scalar(255, 255, 255);
    //     }

    //     const auto &rect = object.rect;

    //     // Draw rectangles and text
    //     char text[256];
    //     sprintf(text, "%s %.1f%%", CLASS_NAMES[object.label].c_str(), object.probability * 100);

    //     int baseLine = 0;
    //     cv::Size labelSize = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.35 * scale, scale, &baseLine);

    //     cv::Scalar txt_bk_color = color * 0.7 * 255;

    //     int x = object.rect.x;
    //     int y = object.rect.y + 1;

    //     cv::rectangle(image, rect, color * 255, scale + 1);

    //     cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(labelSize.width, labelSize.height + baseLine)), txt_bk_color, -1);

    //     cv::putText(image, text, cv::Point(x, y + labelSize.height), cv::FONT_HERSHEY_SIMPLEX, 0.35 * scale, txtColor, scale);

    //     // Pose estimation
    //     if (!object.kps.empty()) {
    //         auto &kps = object.kps;
    //         for (int k = 0; k < NUM_KPS + 2; k++) {
    //             if (k < NUM_KPS) {
    //                 int kpsX = std::round(kps[k * 3]);
    //                 int kpsY = std::round(kps[k * 3 + 1]);
    //                 float kpsS = kps[k * 3 + 2];
    //                 if (kpsS > KPS_THRESHOLD) {
    //                     cv::Scalar kpsColor = cv::Scalar(KPS_COLORS[k][0], KPS_COLORS[k][1], KPS_COLORS[k][2]);
    //                     cv::circle(image, {kpsX, kpsY}, 5, kpsColor, -1);
    //                 }
    //             }
    //             auto &ske = SKELETON[k];
    //             int pos1X = std::round(kps[(ske[0] - 1) * 3]);
    //             int pos1Y = std::round(kps[(ske[0] - 1) * 3 + 1]);

    //             int pos2X = std::round(kps[(ske[1] - 1) * 3]);
    //             int pos2Y = std::round(kps[(ske[1] - 1) * 3 + 1]);

    //             float pos1S = kps[(ske[0] - 1) * 3 + 2];
    //             float pos2S = kps[(ske[1] - 1) * 3 + 2];

    //             if (pos1S > KPS_THRESHOLD && pos2S > KPS_THRESHOLD) {
    //                 cv::Scalar limbColor = cv::Scalar(LIMB_COLORS[k][0], LIMB_COLORS[k][1], LIMB_COLORS[k][2]);
    //                 cv::line(image, {pos1X, pos1Y}, {pos2X, pos2Y}, limbColor, 2);
    //             }
    //         }
    //     }
    // }
}

}