#include "pose_detector_ihs.h"

#include <opencv2/opencv.hpp>

#include <chrono>
#include <fstream>
#include <chrono>

using namespace perception::camera;

std::shared_ptr<PoseDetectorIhs> PoseDetectorIhs::detector_ = nullptr;

PoseDetectorIhs::PoseDetectorIhs()
    : logger__(rclcpp::get_logger("PoseDetectorIhs"))
{
}

bool PoseDetectorIhs::init(const std::string& model_dir)
{
    const std::string engine_file_path = model_dir + "/object_detect_ihs.engine";
    RCLCPP_INFO(logger__, "engine file: %s", engine_file_path.c_str());
    std::ifstream model_file(engine_file_path, std::ios::binary);
    if (!model_file.good()) {
        RCLCPP_FATAL(logger__, "model file err:%s", engine_file_path.c_str());
    }

    model_file.seekg(0, std::ios::end);
    auto size = model_file.tellg();
    model_file.seekg(0, std::ios::beg);
    char* trtModelStream = new char[size];
    assert(trtModelStream);
    model_file.read(trtModelStream, size);
    model_file.close();
    initLibNvInferPlugins(&this->logger_, "");
    this->runtime_ = nvinfer1::createInferRuntime(this->logger_);
    assert(this->runtime_ != nullptr);

    this->engine_ = this->runtime_->deserializeCudaEngine(trtModelStream, size);
    assert(this->engine_ != nullptr);
    delete[] trtModelStream;
    this->context_ = this->engine_->createExecutionContext();

    assert(this->context_ != nullptr);
    cudaStreamCreate(&this->stream_);
    this->num_bindings = this->engine_->getNbBindings();

    for (int i = 0; i < this->num_bindings; ++i) {
        Binding binding;
        nvinfer1::Dims dims;
        nvinfer1::DataType dtype = this->engine_->getBindingDataType(i);
        std::string name = this->engine_->getBindingName(i);
        binding.name = name;
        binding.dsize = type_to_size(dtype);

        bool IsInput = engine_->bindingIsInput(i);
        if (IsInput) {
            this->num_inputs += 1;
            dims = this->engine_->getProfileDimensions(
                i,
                0,
                nvinfer1::OptProfileSelector::kMAX);
                        binding.size = get_size_by_dims(dims);
            binding.dims = dims;
            this->input_bindings.push_back(binding);
            // set max opt shape
            this->context_->setBindingDimensions(i, dims);

        } else {
            dims = this->context_->getBindingDimensions(i);
            binding.size = get_size_by_dims(dims);
            binding.dims = dims;
            this->output_bindings.push_back(binding);
            this->num_outputs += 1;
        }
    }
    data_ = new (float[1920 * 1080 * 3]);
    make_pipe(true);
    // UNKNOWN = 0,
    // UNKNOWN_MOVABLE = 1,
    // UNKNOWN_UNMOVABLE = 2,
    // CAR = 3,
    // VAN = 4,
    // TRUCK = 5,
    // BUS = 6,
    // CYCLIST = 7,
    // MOTORCYCLIST = 8,
    // TRICYCLIST = 9,
    // PEDESTRIAN = 10,
    // TRAFFICCONE = 11,
    // SAFETY_TRIANGLE = 12,
    // BARRIER_DELINEATOR = 13,
    // BARRIER_WATER = 14,
    // ANIMAL = 15,
    // MAX_OBJECT_TYPE = 16,
    // id2name = {0:'Car', 1:'Bus', 2:'Truck', 3:'Pedestrian', 4:'Motorcyclist', 5:'Cyclist', 6:'Van',
    //         7:'Tricyclist', 8:'Trafficcone', 9:'Warningtriangle', 10:'Animal'}
    hw2air_ = { { 0, 3 }, { 1, 6 }, { 2, 5 }, { 3, 10 }, { 4, 8 }, { 5, 7 }, { 6, 4 }, { 7, 9 }, { 8, 11 }, { 9, 12 }, {10, 15} };
    return true;
}

void PoseDetectorIhs::make_pipe(bool warmup)
{

    for (auto& bindings : this->input_bindings) {
        void* d_ptr { nullptr };
        cudaError_t en = cudaMalloc(
            &d_ptr,
            bindings.size * bindings.dsize);
        RCLCPP_INFO(logger__, "errno:%d", en);
        this->device_ptrs.push_back(d_ptr);
    }

    for (auto& bindings : this->output_bindings) {
        void *d_ptr { nullptr }, *h_ptr { nullptr };
        // size_t size = 29*8400*sizeof(float);//640 * 640 * 3 * sizeof(float);//
        size_t size = bindings.size * bindings.dsize;
        RCLCPP_INFO(logger__, "reid output bindings size:%d,dsize:%d", bindings.size, bindings.dsize);
        cudaError_t en = cudaMalloc(
            &d_ptr,
            size);
        RCLCPP_INFO(logger__, "cudama:%d", en);
        en = cudaHostAlloc(
            &h_ptr,
            size,
            0);
        RCLCPP_INFO(logger__, "host:%d", en);
        this->device_ptrs.push_back(d_ptr);
        this->host_ptrs.push_back(h_ptr);
    }

    if (warmup) {
        for (int i = 0; i < 10; i++) {
            for (auto& bindings : this->input_bindings) {
                // size_t size = 640 * 640 * 3 * sizeof(float);//
                size_t size = bindings.size * bindings.dsize;
                void* h_ptr = malloc(size);
                memset(h_ptr, 0, size);
                cudaError_t en = cudaMemcpy(
                    this->device_ptrs[0],
                    h_ptr,
                    size,
                    cudaMemcpyHostToDevice);
                free(h_ptr);
            }
            this->infer();
        }
        RCLCPP_INFO(logger__, "model warmup 10 times2");
    }
}

void PoseDetectorIhs::deinit()
{
    this->context_->destroy();
    this->engine_->destroy();
    this->runtime_->destroy();
    cudaStreamDestroy(this->stream_);
    delete[] data_;
    for (auto& ptr : this->device_ptrs) {
        assert(cudaFree(ptr));
    }

    for (auto& ptr : this->host_ptrs) {
        assert(cudaFreeHost(ptr));
    }

    detector_ = nullptr;
    return;
}

// 只支持1920x1080图片，不是这个尺寸会resize
void PoseDetectorIhs::copy_from_mat(const cv::Mat& image)
{
    auto time_cpy_start = std::chrono::high_resolution_clock::now();
    cv::Mat blob;
    // auto tt0 = std::chrono::system_clock::now();
    // int infer_h = 360, infer_w = 640;
    image_orig_ = image.clone();

    cv::resize(image, blob, { infer_w_, infer_h_ }, 0, 0, cv::INTER_NEAREST);
    // image_resize_ = blob.clone();
    blob.convertTo(blob, CV_32FC3);
    cv::cvtColor(blob, blob, cv::COLOR_BGR2RGB);
    auto time_resize_end = std::chrono::high_resolution_clock::now();
    this->context_->setBindingDimensions(
        0,
        nvinfer1::Dims { 4,
            { 1, infer_h_, infer_w_, 3 } });

    cudaMemcpy(
        this->device_ptrs[0],
        blob.data,
        infer_w_ * infer_h_ * 3 * sizeof(float),
        cudaMemcpyHostToDevice);
    // auto time_cpy_end = std::chrono::high_resolution_clock::now();
    // auto time_resize = std::chrono::duration_cast<std::chrono::microseconds>(time_resize_end - time_cpy_start).count();
    // auto time_cpy = std::chrono::duration_cast<std::chrono::microseconds>(time_cpy_end - time_resize_end).count();
    // std::cout << "copy from mat resize time:" << time_resize << ", mem cpy:" << time_cpy << std::endl;
}

void PoseDetectorIhs::infer()
{
    auto time_infer_start = std::chrono::high_resolution_clock::now();
    this->context_->enqueueV2(
        this->device_ptrs.data(),
        this->stream_,
        nullptr);
    auto time_equeue_end = std::chrono::high_resolution_clock::now();
    for (int i = 0; i < this->num_outputs; i++) {
        // size_t osize = 29*8400*sizeof(float);//640 * 640 * 3 * sizeof(float);//
        size_t osize = this->output_bindings[i].size * this->output_bindings[i].dsize;
        cudaMemcpy(this->host_ptrs[i],
            this->device_ptrs[i + this->num_inputs],
            osize,
            cudaMemcpyDeviceToHost);
    }
    auto time_cpy_end = std::chrono::high_resolution_clock::now();
    cudaStreamSynchronize(this->stream_);
    // auto time_sync_end = std::chrono::high_resolution_clock::now();
    // auto time_enqeue = std::chrono::duration_cast<std::chrono::microseconds>(time_equeue_end - time_infer_start).count();
    // auto time_cpy = std::chrono::duration_cast<std::chrono::microseconds>(time_cpy_end - time_equeue_end).count();
    // auto time_sync = std::chrono::duration_cast<std::chrono::microseconds>(time_sync_end - time_cpy_end).count();
    // std::cout << "infer enqeue:" << time_enqeue << ", mem cpy:" << time_cpy << ", sync:" << time_sync << std::endl;
}

float PoseDetectorIhs::iou(const float& xmin1, const float& ymin1, const float& xmax1, const float& ymax1,
    const float& xmin2, const float& ymin2, const float& xmax2, const float& ymax2)
{
    float inter_xmin = std::max(xmin1, xmin2);
    float inter_ymin = std::max(ymin1, ymin2);
    float inter_xmax = std::min(xmax1, xmax2);
    float inter_ymax = std::min(ymax1, ymax2);
    float inter_area = std::max(0.0f, (inter_xmax - inter_xmin)) * std::max(0.0f, (inter_ymax - inter_ymin));
    float union_area = (xmax1 - xmin1) * (ymax1 - ymin1) + (xmax2 - xmin2) * (ymax2 - ymin2) - inter_area;
    float iou_score = inter_area / (union_area + 0.00000001);
    return iou_score;
}

void PoseDetectorIhs::postprocess(
    std::vector<ObjectDetectInfoPtr>& objs,
    float score_thres)
{
    auto time_trt_start = std::chrono::high_resolution_clock::now();
    objs.clear();
    // int num_channels = this->output_bindings[0].dims.d[1];
    // int num_anchors = this->output_bindings[0].dims.d[2];

    std::vector<cv::Rect> bboxes;
    std::vector<float> scores;
    std::vector<int> labels;
    std::vector<int> indices;
    std::vector<std::vector<float>> kpss;
    int32_t* pnum_det = static_cast<int32_t*>(this->host_ptrs[1]); // 1x1
    float* pnmsed_boxes = static_cast<float*>(this->host_ptrs[2]); // 1x30x4
    float* pnmsed_scores = static_cast<float*>(this->host_ptrs[3]); // 1x30
    float* pnmsed_classes = static_cast<float*>(this->host_ptrs[4]); // 1x30
    int32_t* pnmsed_indices = static_cast<int32_t*>(this->host_ptrs[5]); // 1x30
    float* pkpts_tensor = static_cast<float*>(this->host_ptrs[0]); // 1x8400x16
    int num_det = *pnum_det;
    int orig_image_h = image_orig_.rows;
    int orig_image_w = image_orig_.cols;
    float r = std::min(1.0 * infer_h_ / orig_image_h, 1.0 * infer_w_ / orig_image_w);
    float ratio = 1 / r;
    int resize_h = orig_image_h * r;
    int resize_w = orig_image_w * r;
    bool have_vehicle = true;
    // score 已排序，做少量框nms, 大阈值
    std::set<int> remove_boxes;
    for (int idet = 0; idet < num_det; ++idet) {
        if (remove_boxes.find(idet) != remove_boxes.end()) {
            continue;
        }
        float xmin1 = *(pnmsed_boxes + idet * 4 + 0);
        float ymin1 = *(pnmsed_boxes + idet * 4 + 1);
        float xmax1 = *(pnmsed_boxes + idet * 4 + 2);
        float ymax1 = *(pnmsed_boxes + idet * 4 + 3);
        for (int jdet = idet + 1; jdet < num_det; ++jdet) {
            if (remove_boxes.find(jdet) != remove_boxes.end()) {
                continue;
            }
            float xmin2 = *(pnmsed_boxes + jdet * 4 + 0);
            float ymin2 = *(pnmsed_boxes + jdet * 4 + 1);
            float xmax2 = *(pnmsed_boxes + jdet * 4 + 2);
            float ymax2 = *(pnmsed_boxes + jdet * 4 + 3);
            float iou_score = iou(xmin1, ymin1, xmax1, ymax1, xmin2, ymin2, xmax2, ymax2);
            // std::cout << "iou:" << xmin1 << "," << ymin1 << "," << xmax1 << "<" << ymax1 << ";"
            //         << xmin2 << "," << ymin2 << "," <<  xmax2 << "," <<  ymax2 << ";" << iou_score << std::endl;
            if (iou_score > 0.8) {
                remove_boxes.insert(jdet);
            }
        }
    }
    auto time_toremove = std::chrono::high_resolution_clock::now();

    for (int idet = 0; idet < num_det; ++idet) {
        if (remove_boxes.find(idet) != remove_boxes.end()) {
            continue;
        }
        float xmin { 0.f }, ymin { 0.f }, xmax { 0.f }, ymax { 0.f }, score { 0.f };
        xmin = *(pnmsed_boxes + idet * 4 + 0);
        ymin = *(pnmsed_boxes + idet * 4 + 1);
        xmax = *(pnmsed_boxes + idet * 4 + 2);
        ymax = *(pnmsed_boxes + idet * 4 + 3);
        xmin = std::max(0.0f, xmin);
        ymin = std::max(0.0f, ymin);
        xmax = std::min(1.0f * infer_w_ + pad_w_, xmax);
        ymax = std::min(1.0f * infer_h_ + pad_h_, ymax);
        // cv::rectangle(image_resize_, cv::Point{xmin, ymin-12}, cv::Point{xmax, ymax-12}, cv::Scalar{0, 0, 255}, 2);
        score = *(pnmsed_scores + idet);
        int class_id = (int)(*(pnmsed_classes + idet));
        cv::Rect_<float> bbox { float((xmin - pad_w_) * ratio), float((ymin - pad_h_) * ratio),
            float((xmax - xmin) * ratio), float((ymax - ymin) * ratio) };
        // cv::rectangle(image_orig_, cv::Rect2d { bbox }, cv::Scalar { 255, 0, 0 }, 3);

        if (class_id == -1 || score < score_thres) {
            continue;
        }
        // if (class_id == 0 || class_id == 1 || class_id == 2 || class_id == 6){
        //     have_vehicle = true;
        // }

        int raw_idx = pnmsed_indices[idet];
        int idx = raw_idx % 5040;
        float* ppkts = pkpts_tensor + idx * 16;
        CubeBox2f cubebox, cubebox_tmp;
        cv::Point2f bottom_uv;
        // bool truncated = false;
        for (int ik = 0; ik < 8; ++ik) {
            float x = ppkts[2 * ik];
            float y = ppkts[2 * ik + 1];
            // cv::circle(image_resize_, cv::Point{x, y-12}, 3, cv::Scalar{0, 255, 0}, -1);
            // std::cout << "xy:" << x << "," << y << std::endl;
            x = (x - pad_w_) * ratio;
            y = (y - pad_h_) * ratio;
            // cv::circle(image_orig_, cv::Point{x, y}, 3, cv::Scalar{255, 255, 0}, -1);
            cubebox_tmp.pts8[ik] = perception::camera::Point2f { x, y };
        }
        // std::cout << std::endl;
        cubebox = cubebox_tmp;
        // 8点顺序转换到air的顺序
        // cubebox.pts8[0] = cubebox_tmp.pts8[4];
        // cubebox.pts8[1] = cubebox_tmp.pts8[7];
        // cubebox.pts8[2] = cubebox_tmp.pts8[6];
        // cubebox.pts8[3] = cubebox_tmp.pts8[5];
        // cubebox.pts8[4] = cubebox_tmp.pts8[0];
        // cubebox.pts8[5] = cubebox_tmp.pts8[3];
        // cubebox.pts8[6] = cubebox_tmp.pts8[2];
        // cubebox.pts8[7] = cubebox_tmp.pts8[1];

        ObjectDetectInfoPtr pobj = std::make_shared<ObjectDetectInfo>();
        int air_cls = hw2air_[class_id];
        pobj->type_id = air_cls;
        pobj->type_id_confidence = score;
        pobj->box = Box2f { perception::camera::Point2f { bbox.tl().x, bbox.tl().y },
            perception::camera::Point2f { bbox.br().x, bbox.br().y } };

        float bu = (cubebox.pts8[0].x + cubebox.pts8[1].x + cubebox.pts8[2].x + cubebox.pts8[3].x) / 4;
        float bv = (cubebox.pts8[0].y + cubebox.pts8[1].y + cubebox.pts8[2].y + cubebox.pts8[3].y) / 4;
        pobj->bottom_uv = perception::camera::Point2f { bu, bv };

        // 不让dibian
        pobj->pts8 = std::move(cubebox);
        pobj->is_truncated = TriStatus::UNKNOWN;
        objs.push_back(pobj);
        for (int icb = 0; icb < 8; ++icb){
            int x = cubebox.pts8[icb].x;
            int y = cubebox.pts8[icb].y;
            // cv::circle(image_orig_, cv::Point{x, y}, 3, cv::Scalar{255, 255, 0}, -1);
            cv::putText(image_orig_, std::to_string(icb), cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.5, {0, 0, 255}, 1);
        }
    }
    static int iframe = 0;
    ++ iframe;
    if (have_vehicle){
        cv::imwrite("draw_pose/" + std::to_string(iframe) + ".jpg", image_orig_);
    }
    // auto time_gather = std::chrono::high_resolution_clock::now();
    // auto time_tr  = std::chrono::duration_cast<std::chrono::microseconds>(time_toremove - time_trt_start).count();
    // auto time_g  = std::chrono::duration_cast<std::chrono::microseconds>(time_gather - time_toremove).count();
    // auto time_total  = std::chrono::duration_cast<std::chrono::microseconds>(time_gather - time_trt_start).count();
    // std::cout << "trt toremove:" << time_tr << ", gather:" << time_g << ", total:" << time_total << std::endl;
}

void PoseDetectorIhs::postprocess_cpu(std::vector<perception::camera::ObjectDetectInfoPtr>& objs, float score_thres)
{
    auto time_cpu_start = std::chrono::high_resolution_clock::now();
    objs.clear();
    auto num_channels = this->output_bindings[0].dims.d[1];
    auto num_anchors = this->output_bindings[0].dims.d[2];
    int orig_image_h = image_orig_.rows;
    int orig_image_w = image_orig_.cols;
    float r = std::min(1.0 * infer_h_ / orig_image_h, 1.0 * infer_w_ / orig_image_w);
    float ratio = 1 / r;
    int resize_h = orig_image_h * r;
    int resize_w = orig_image_w * r;
    bool have_vehicle = false;

    std::vector<cv::Rect> bboxes;
    std::vector<float> scores;
    std::vector<int> labels;
    std::vector<int> indices;
    std::vector<std::vector<float>> kpss;
    std::vector<CubeBox2f> cubeboxes;

    cv::Mat output = cv::Mat(num_channels, num_anchors, CV_32F,
        static_cast<float*>(this->host_ptrs[0]));
    output = output.t();
    // std::cout << "channel:" << num_channels << ", num_anchor:" << num_anchors << std::endl;
    // std::cout << "dw:" << dw << ", dh:" << dh << std::endl;
    for (int i = 0; i < num_anchors; i++) {
        float* row_ptr = output.row(i).ptr<float>();
        float* bboxes_ptr = row_ptr;
        float* scores_ptr = row_ptr + 4;
        float* kps_ptr = row_ptr + 4 + 10;
        CubeBox2f cubebox, cubebox_tmp;
        float max_score = -1;
        int max_cls = -1;
        for (int icls = 0; icls < 10; ++icls) {
            float score = scores_ptr[icls];
            // std::cout << "score:" << icls << "," << score << ", max:" << max_score << std::endl;
            if (score > max_score) {
                max_score = score;
                max_cls = icls;
            }
        }
        if (max_score > score_thres) {
            have_vehicle = true;
            // std::cout << "bboxes:" << bboxes_ptr[0] << "," << bboxes_ptr[1] << "," << bboxes_ptr[2] << "," << bboxes_ptr[3] << std::endl;
            float x = *bboxes_ptr++ - pad_w_;
            float y = *bboxes_ptr++ - pad_h_;
            float w = *bboxes_ptr++;
            float h = *bboxes_ptr;
            // std::cout << "ratio:" << ratio << ",x:" << x << ",y:" << y << ",w:" << w << ",h:" << h << std::endl;
            float x0 = std::min(std::max((x - 0.5f * w) * ratio, 0.f), 1.0f * orig_image_w);
            float y0 = std::min(std::max((y - 0.5f * h) * ratio, 0.f), 1.0f * orig_image_h);
            float x1 = std::min(std::max((x + 0.5f * w) * ratio, 0.f), 1.0f * orig_image_w);
            float y1 = std::min(std::max((y + 0.5f * h) * ratio, 0.f), 1.0f * orig_image_h);
            // std::cout << "xy01:" << x0 << ",y0:" << y0 << ",x1:" << x1 << ",y1:" << y1 << std::endl;
            cv::Rect_<float> bbox;
            bbox.x = x0;
            bbox.y = y0;
            bbox.width = x1 - x0;
            bbox.height = y1 - y0;
            std::vector<float> kps;
            for (int ik = 0; ik < 8; ++ik) {
                float kps_x = ratio * (kps_ptr[2 * ik] - pad_w_);
                float kps_y = ratio * (kps_ptr[2 * ik + 1] - pad_h_);
                kps_x = std::min(std::max(kps_x, 0.f), 1.0f * orig_image_w);
                kps_y = std::min(std::max(kps_y, 0.f), 1.0f * orig_image_h);
                kps.push_back(kps_x);
                kps.push_back(kps_y);
                cubebox_tmp.pts8[ik] = perception::camera::Point2f { kps_x, kps_y };
            }
            bboxes.push_back(bbox);
            int air_cls = hw2air_[max_cls];
            labels.push_back(air_cls);
            scores.push_back(max_score);
            kpss.push_back(kps);
            // cubebox.pts8[0] = cubebox_tmp.pts8[4];
            // cubebox.pts8[1] = cubebox_tmp.pts8[7];
            // cubebox.pts8[2] = cubebox_tmp.pts8[6];
            // cubebox.pts8[3] = cubebox_tmp.pts8[5];
            // cubebox.pts8[4] = cubebox_tmp.pts8[0];
            // cubebox.pts8[5] = cubebox_tmp.pts8[3];
            // cubebox.pts8[6] = cubebox_tmp.pts8[2];
            // cubebox.pts8[7] = cubebox_tmp.pts8[1];
            cubebox = cubebox_tmp;
            cubeboxes.push_back(cubebox);
        }
    }
    float iou_thres = 0.65f;
    int topk = 30;

    auto time_anchor_end = std::chrono::high_resolution_clock::now();
#ifdef BATCHED_NMS
    cv::dnn::NMSBoxesBatched(bboxes, scores, labels, score_thres, iou_thres, indices);
#else
    cv::dnn::NMSBoxes(bboxes, scores, score_thres, iou_thres, indices);
#endif
    auto time_nms_end = std::chrono::high_resolution_clock::now();

    int cnt = 0;
    // std::cout << "bboxes.size:" << bboxes.size() << "indices:" << indices.size() << std::endl;
    for (auto& i : indices) {
        if (cnt >= topk) {
            break;
        }
        auto& bbox = bboxes[i];
        // std::cout << "xy:" << bbox.tl().x << "," << bbox.tl().y << "," << bbox.br().x << bbox.br().y << std::endl;
        int left = bbox.tl().x;
        int top = bbox.tl().y;

        cv::rectangle(image_orig_, cv::Point { bbox.tl().x, bbox.tl().y }, cv::Point { bbox.br().x, bbox.br().y }, cv::Scalar { 0, 0, 255 }, 2);
        // cv::putText(image_orig_, std::to_string(labels[i]), cv::Point(left, top-20), cv::FONT_HERSHEY_SIMPLEX, 1, {0, 0, 255}, 2);
        // cv::putText(image_orig_, std::to_string(scores[i]), cv::Point(left, top+20), cv::FONT_HERSHEY_SIMPLEX, 1, {0, 0, 255}, 2);
        ObjectDetectInfoPtr pobj = std::make_shared<ObjectDetectInfo>();
        pobj->type_id = labels[i];
        pobj->type_id_confidence = scores[i];
        pobj->box = Box2f { perception::camera::Point2f { bbox.tl().x, bbox.tl().y },
            perception::camera::Point2f { bbox.br().x, bbox.br().y } };
        auto& cubebox = cubeboxes[i];
        pobj->pts8 = cubebox;
        pobj->is_truncated = TriStatus::UNKNOWN;
        float bu = (cubebox.pts8[0].x + cubebox.pts8[1].x + cubebox.pts8[2].x + cubebox.pts8[3].x) / 4;
        float bv = (cubebox.pts8[0].y + cubebox.pts8[1].y + cubebox.pts8[2].y + cubebox.pts8[3].y) / 4;
        pobj->bottom_uv = perception::camera::Point2f { bu, bv };
        objs.push_back(pobj);
        cnt += 1;
        for (int icb = 0; icb < 8; ++icb){
            int x = cubebox.pts8[icb].x;
            int y = cubebox.pts8[icb].y;
            // cv::circle(image_orig_, cv::Point{x, y}, 3, cv::Scalar{255, 255, 0}, -1);
            // cv::putText(image_orig_, std::to_string(icb), cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.5, {0, 0, 255}, 1);
        }
    }
    static int ifr = 0;
    ++ ifr;
    if (have_vehicle)
        cv::imwrite("draw_pose/" + std::to_string(ifr) + ".jpg", image_orig_);
    // auto time_cpu_end = std::chrono::high_resolution_clock::now();
    // auto time_anchor  = std::chrono::duration_cast<std::chrono::microseconds>(time_anchor_end - time_cpu_start);
    // auto time_nms  = std::chrono::duration_cast<std::chrono::microseconds>(time_nms_end - time_anchor_end);
    // auto time_gather  = std::chrono::duration_cast<std::chrono::microseconds>(time_cpu_end - time_nms_end);
    // auto time_total  = std::chrono::duration_cast<std::chrono::microseconds>(time_cpu_end - time_cpu_start);
    // std::cout << "time anchor:" << time_anchor.count() << ", nms:" << time_nms.count() \
    //         << ", gather:" << time_gather.count() << ", total:" << time_total.count() << std::endl;
}

int PoseDetectorIhs::process(const cv::Mat image, std::vector<ObjectDetectInfoPtr>& objs)
{
    if (image.empty()) {
        return false;
    }

    float score_thres = 0.5; // 0.25f;
    {
        ++ infer_count_;
        // std::lock_guard<std::mutex> guard(pipe_mutex_);
        auto start_time = std::chrono::high_resolution_clock::now();
        cv::Mat image_clone = image.clone();
        copy_from_mat(image_clone);
        infer();
        postprocess(objs, score_thres);
        // postprocess_cpu(objs, score_thres);
        // auto end_time = std::chrono::high_resolution_clock::now();
        // auto duration  = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
        // time_total_ += duration.count();
        // std::cout << "total:" << time_total_ << ", count:" << infer_count_ << std::endl;
        // std::cout << "detect process time: " << 1.0 * time_total_ / infer_count_ << ", cur:" << duration.count()  << std::endl;
    }
    return 0;
}

std::shared_ptr<PoseDetectorIhs> PoseDetectorIhs::get_detector(const std::string& model_dir)
{
    if (!detector_) {
        detector_ = std::shared_ptr<PoseDetectorIhs>(new PoseDetectorIhs());
        detector_->init(model_dir);
    }
    return detector_;
}
