#include "yolov8.h"
#include <bm_wrapper.hpp>

#define USE_ASPECT_RATIO 1
#define DUMP_FILE 0
#define USE_MULTICLASS_NMS 0

const std::vector<std::vector<int>> colors = {
    {255, 0, 0}, {255, 85, 0}, {255, 170, 0}, {255, 255, 0}, {170, 255, 0}, {85, 255, 0}, {0, 255, 0}, {0, 255, 85}, {0, 255, 170}, {0, 255, 255}, {0, 170, 255}, {0, 85, 255}, {0, 0, 255}, {85, 0, 255}, {170, 0, 255}, {255, 0, 255}, {255, 0, 170}, {255, 0, 85}, {255, 0, 0}, {255, 0, 255}, {255, 85, 255}, {255, 170, 255}, {255, 255, 255}, {170, 255, 255}, {85, 255, 255}};

int YoloV8::init(int dev_id, std::string bmodel_file, std::string tpu_kernel_module_path, std::string mask_bmodel_path)
{
    bm_status_t ret;
    // create handle
    BMNNHandlePtr handle = std::make_shared<BMNNHandle>(dev_id);
    std::cout << "set device id: " << dev_id << std::endl;
    bm_handle_t h = handle->handle();
    // load bmodel
    m_bmContext = std::make_shared<BMNNContext>(handle, bmodel_file.c_str());
    // get network
    m_bmNetwork = m_bmContext->network(0);
    handle_ = m_bmContext->handle();
    // get input
    max_batch = m_bmNetwork->maxBatch();
    if (max_batch != 1)
    {
        std::cout << "Error! Only support 1 batch bmodel!" << std::endl;
        return -1;
    }
    auto input_tensor = m_bmNetwork->inputTensor(0);
    m_net_h = input_tensor->get_shape()->dims[2];
    m_net_w = input_tensor->get_shape()->dims[3];
    std::cout << "m_net_h = " << m_net_h << ", m_net_w = " << m_net_w << std::endl;
    // get input number
    input_num = m_bmNetwork->m_netinfo->input_num;
    assert(input_num > 0);
    std::cout << "input_num = " << input_num << min_dim << std::endl;

    // get ouput number
    output_num = m_bmNetwork->outputTensorNum();
    assert(output_num > 0);
    auto out_tensor = m_bmNetwork->outputTensor(0);
    min_dim = out_tensor->get_shape()->num_dims;
    std::cout << "output_num = " << output_num << ", min_dim = " << min_dim << std::endl;

    // preprocess init
    bm_image_data_format_ext img_dtype = DATA_TYPE_EXT_FLOAT32; // float 32bit
    if (input_tensor->get_dtype() == BM_INT8)
    {
        img_dtype = DATA_TYPE_EXT_1N_BYTE_SIGNED; // int 8bit
    }
    // some API only accept bm_image whose stride is aligned to 64
    int aligned_net_w = FFALIGN(m_net_w, 64);
    int strides[3] = {aligned_net_w, aligned_net_w, aligned_net_w};

    ret = bm_image_create(m_bmContext->handle(), m_net_h, m_net_w, FORMAT_RGB_PLANAR, DATA_TYPE_EXT_1N_BYTE,
                          &resized_img_, strides);
    assert(BM_SUCCESS == ret);

    ret = bm_image_alloc_contiguous_mem(1, &resized_img_);
    assert(BM_SUCCESS == ret);

    input_mem_idx_ = -1;
    output_mem_idx_ = -1;
    int buffer_num = que_size_ + 2;
    converto_imgs_.resize(buffer_num);
    input_mem_buffer_.resize(buffer_num);
    for (int i = 0; i < buffer_num; i++)
    {
        ret_ = bm_image_create(m_bmContext->handle(), m_net_h, m_net_w, FORMAT_RGB_PLANAR, img_dtype, &converto_imgs_[i]);
        assert(ret_ == BM_SUCCESS);
        ret_ = bm_image_alloc_contiguous_mem(1, &converto_imgs_[i]);
        assert(ret_ == BM_SUCCESS);
        bm_image_get_contiguous_device_mem(1, &converto_imgs_[i], &input_mem_buffer_[i]);
    }

    output_mem_buffer_.resize(buffer_num, std::vector<bm_device_mem_t>(output_num));
    for (int i = 0; i < buffer_num; i++)
    {
        for (int j = 0; j < output_num; j++)
        {
            bm_tensor_t out_tensor = m_bmNetwork->outputTensor(j)->get_tensor();
            uint64_t bytes_num = bmrt_shape_count(&(out_tensor.shape)) * bmruntime::ByteSize(out_tensor.dtype);
            ret_ = bm_malloc_device_byte(handle_, &output_mem_buffer_[i][j], bytes_num);
            assert(ret_ == BM_SUCCESS);
        }
    }
    // 5.converto 归一化
    float input_scale = input_tensor->get_scale();
    input_scale = input_scale * 1.0 / 255.f;
    converto_attr_.alpha_0 = input_scale;
    converto_attr_.beta_0 = 0;
    converto_attr_.alpha_1 = input_scale;
    converto_attr_.beta_1 = 0;
    converto_attr_.alpha_2 = input_scale;
    converto_attr_.beta_2 = 0;
    std::cout << "yolov8 initial success!" << std::endl;
    return 0;
}

int YoloV8::release()
{
    return 0;
}

void YoloV8::tpumask_Init(std::string bmodel_file, int dev_id)
{
    tpu_post = true;
    // 1. get handle
    assert(BM_SUCCESS == bm_dev_request(&tpu_mask_handle, dev_id));

    // 2. create bmrt and load bmodel
    bmrt = bmrt_create(tpu_mask_handle);
    if (!bmrt_load_bmodel(bmrt, bmodel_file.c_str()))
    {
        std::cout << "load bmodel(" << bmodel_file << ") failed" << std::endl;
    }

    // 3. get network names from bmodel
    const char **names = nullptr;
    int num = bmrt_get_network_number(bmrt); // 获得bmruntime中存在的网络的数量
    if (num > 1)
    {
        std::cout << "This bmodel have " << num << " networks, and this program will only take network 0." << std::endl;
    }

    bmrt_get_network_names(bmrt, &names);
    for (int i = 0; i < num; ++i)
    {
        network_names.emplace_back(names[i]); // [0]: yolov8s
    }
    free(names);

    // 4. get netinfo by netname
    netinfo = bmrt_get_network_info(bmrt, network_names[0].c_str());
    if (netinfo->stage_num > 1)
    { // stage_num = 1
        std::cout << "This bmodel have " << netinfo->stage_num << " stages, and this program will only take stage 0." << std::endl;
    }

    // 5. initialize parameters.
    m_tpumask_net_h = netinfo->stages[0].input_shapes[1].dims[2]; // 160
    m_tpumask_net_w = netinfo->stages[0].input_shapes[1].dims[3]; // 160

    assert(netinfo->stages[0].input_shapes[1].dims[1] == netinfo->stages[0].input_shapes[0].dims[2]);
    tpu_mask_num = netinfo->stages[0].input_shapes[0].dims[1]; // 32
    mask_len = netinfo->stages[0].input_shapes[1].dims[1];     // 32
    std::cout << "tpu_mask_num = " << tpu_mask_num << ", mask_len = " << mask_len << std::endl;
    std::cout << "tpu_mask initial success!" << std::endl;
}

void YoloV8::enableProfile(TimeStamp *ts)
{
    m_ts = ts;
}

int YoloV8::batch_size()
{
    return max_batch;
};

/**
 * 图片预处理
 */
int YoloV8::preprocess(std::shared_ptr<bm_image> img)
{
    // 获取输入内存id
    int idx = get_input_mem_id();
    bm_image *converto_img = &converto_imgs_[idx];
    bm_image *src_img = img.get();
    bm_image *resized_img = &resized_img_;

    // resize image
#if USE_ASPECT_RATIO
    bool isAlignWidth = false;
    float ratio = get_aspect_scaled_ratio(src_img->width, src_img->height, m_net_w, m_net_h, &isAlignWidth);
    // std::cout << "ratio = " << ratio << std::endl;
    bmcv_padding_atrr_t padding_attr;
    memset(&padding_attr, 0, sizeof(padding_attr));
    padding_attr.dst_crop_sty = 0;
    padding_attr.dst_crop_stx = 0;
    padding_attr.padding_b = 114;
    padding_attr.padding_g = 114;
    padding_attr.padding_r = 114;
    padding_attr.if_memset = 1;
    if (isAlignWidth)
    {
        padding_attr.dst_crop_h = src_img->height * ratio;
        padding_attr.dst_crop_w = m_net_w;

        int ty1 = (int)((m_net_h - padding_attr.dst_crop_h) / 2);
        padding_attr.dst_crop_sty = ty1;
        padding_attr.dst_crop_stx = 0;
    }
    else
    {
        padding_attr.dst_crop_h = m_net_h;
        padding_attr.dst_crop_w = src_img->width * ratio;

        int tx1 = (int)((m_net_w - padding_attr.dst_crop_w) / 2);
        padding_attr.dst_crop_sty = 0;
        padding_attr.dst_crop_stx = tx1;
    }

    bmcv_rect_t crop_rect{0, 0, src_img->width, src_img->height};
    ret_ = bmcv_image_vpp_convert_padding(handle_, 1, *src_img, resized_img,
                                          &padding_attr, &crop_rect);
#else
    ret_ = bmcv_image_vpp_convert(handle_, 1, *src_img, resized_img);
#endif
    assert(BM_SUCCESS == ret_);
    // 2. converto
    ret_ = bmcv_image_convert_to(handle_, 1, converto_attr_, resized_img, converto_img);
    CV_Assert(ret_ == 0);
    pre_forward_que_.push_back(&input_mem_buffer_[idx]);

    return 0;
}

void YoloV8::forward_thread_dowork()
{
    while (!stop_flag_)
    {
        bm_device_mem_t *in_mem = pre_forward_que_.pop_front();
        bm_device_mem_t *out_mem = output_mem_buffer_[get_output_mem_id()].data();
        auto ret = m_bmNetwork->forward(in_mem, out_mem);
        assert(ret == 0);
        forward_post_que_.push_back(out_mem);
    }
}

void YoloV8::post_thread_dowork()
{
    YoloV8BoxVec yolobox_vec;
    while (!stop_flag_)
    {
        std::vector<std::shared_ptr<BMNNTensor>> outputTensors(output_num);
        for (int i = 0; i < output_num; i++)
        { // output_num = 2  [0]:mask_info   [1]:output1
            outputTensors[i] = m_bmNetwork->outputTensor(i);
        }
        bm_device_mem_t *dev_mem = forward_post_que_.pop_front();
        for (int j = 0; j < output_num; j++)
        {
            outputTensors[j]->set_device_mem(&dev_mem[j]);
        }

        // post0 init
        std::unique_lock<std::mutex> lock(global_frame_que_mtx_);
        std::shared_ptr<FrameInfoDetectYolov8Seg> data_ptr = global_frame_que_.front();
        global_frame_que_.pop_front();
        lock.unlock();

        yolobox_vec.clear();
        auto &frame = data_ptr->image_ptr;
        int frame_width = frame->width;
        int frame_height = frame->height;

        int min_idx = 0;
        int box_num = 0;

        auto out_tensor = outputTensors[min_idx];
        auto out_tensor1 = outputTensors[1];

        float *out1 = out_tensor1->get_cpu_data();
        const bm_shape_t *shape1 = out_tensor1->get_shape();
        int dims = 4;
        int sizes[] = {shape1->dims[0], shape1->dims[1], shape1->dims[2], shape1->dims[3]};
        cv::Mat output1(dims, sizes, CV_32F, out1 + shape1->dims[2] * shape1->dims[1] * shape1->dims[3]);

#if USE_ASPECT_RATIO

        bool isAlignWidth = false;
        float ratio = get_aspect_scaled_ratio(frame_width, frame_height, m_net_w, m_net_h, &isAlignWidth);
        int tx1 = 0, ty1 = 0;
        if (isAlignWidth)
        {
            ty1 = (int)((m_net_h - frame_height * ratio) / 2); // padding 大小
        }
        else
        {
            tx1 = (int)((m_net_w - frame_width * ratio) / 2);
        }
        ImageInfo para = {cv::Size(frame_width, frame_height), {ratio, ratio, (double)tx1, (double)ty1}};
#else
        float ratio = 1;
        int tx1 = 0, ty1 = 0;
        ImageInfo para = {cv::Size(frame_width, frame_height),
                          {m_net_w / frame_width, m_net_h / frame_height, tx1, ty1}};
#endif
        m_class_num = out_tensor->get_shape()->dims[1] - mask_len - 4;
        int feat_num = out_tensor->get_shape()->dims[2];
        int nout = m_class_num + mask_len + 4;
        float *output_data = nullptr;

        // post2 get output
        assert(box_num == 0 || box_num == out_tensor->get_shape()->dims[1]);
        box_num = out_tensor->get_shape()->dims[1];
        output_data = (float *)out_tensor->get_cpu_data();

        // Candidates
        //"post 2: get detections matrix nx6 (xyxy, conf, cls, mask)");
        float *cls_conf = output_data + 4 * feat_num;
        for (int i = 0; i < feat_num; i++)
        {
            float max_value = 0.0;
            int max_index = 0;
            for (int j = 0; j < m_class_num; j++)
            {
                float cur_value = cls_conf[i + j * feat_num];
                if (cur_value > max_value)
                {
                    max_value = cur_value;
                    max_index = j;
                }
            }

            if (max_value >= m_confThreshold)
            {
                YoloV8Box box;
                box.score = max_value;
                box.class_id = max_index;
                int c = box.class_id * max_wh;
                float centerX = output_data[i + 0 * feat_num];
                float centerY = output_data[i + 1 * feat_num];
                float width = output_data[i + 2 * feat_num];
                float height = output_data[i + 3 * feat_num];

                box.x1 = centerX - width / 2 + c;
                box.y1 = centerY - height / 2 + c;
                box.x2 = box.x1 + width;
                box.y2 = box.y1 + height;
                for (int k = 0; k < mask_len; k++)
                {
                    box.mask.push_back(output_data[i + (nout - mask_len + k) * feat_num]);
                }
                yolobox_vec.push_back(box);
            }
        }

        //"post 3: nms"
        NMS(yolobox_vec, m_nmsThreshold);

        if (yolobox_vec.size() > max_det)
        {
            yolobox_vec.erase(yolobox_vec.begin(), yolobox_vec.begin() + (yolobox_vec.size() - max_det));
        }

        for (int i = 0; i < yolobox_vec.size(); i++)
        {
            int c = yolobox_vec[i].class_id * max_wh;
            yolobox_vec[i].x1 = yolobox_vec[i].x1 - c;
            yolobox_vec[i].y1 = yolobox_vec[i].y1 - c;
            yolobox_vec[i].x2 = yolobox_vec[i].x2 - c;
            yolobox_vec[i].y2 = yolobox_vec[i].y2 - c;
        }

        YoloV8BoxVec yolobox_vec_tmp;
        for (int i = 0; i < yolobox_vec.size(); i++)
        {
            if ((yolobox_vec[i].class_id == 3) || (yolobox_vec[i].class_id == 4))
            {
                float centerx = ((yolobox_vec[i].x2 + yolobox_vec[i].x1) / 2 - tx1) / ratio;
                float centery = ((yolobox_vec[i].y2 + yolobox_vec[i].y1) / 2 - ty1) / ratio;
                float width = (yolobox_vec[i].x2 - yolobox_vec[i].x1) / ratio;
                float height = (yolobox_vec[i].y2 - yolobox_vec[i].y1) / ratio;
                yolobox_vec[i].x1 = centerx - width / 2;
                yolobox_vec[i].y1 = centery - height / 2;
                yolobox_vec[i].x2 = centerx + width / 2;
                yolobox_vec[i].y2 = centery + height / 2;
            }
        }
        clip_boxes(yolobox_vec, frame_width, frame_height);

//"post 4: get mask");
#if 1
        if (tpu_post)
        {
            cv::Vec4f trans = para.trans;
            int r_x = floor(trans[2] / m_net_w * shape1->dims[3]);
            int r_y = floor(trans[3] / m_net_h * shape1->dims[2]);
            int r_w = shape1->dims[3] - 2 * r_x;
            int r_h = shape1->dims[2] - 2 * r_y;
            r_w = MAX(r_w, 1);
            r_h = MAX(r_h, 1);
            struct Paras paras = {r_x, r_y, r_w, r_h, para.raw_size.width, para.raw_size.height};
            YoloV8BoxVec yolobox_valid_vec;
            for (int i = 0; i < yolobox_vec.size(); i++)
            {
                // 只处理钻杆和钻机
                if ((yolobox_vec[i].class_id == 3) || (yolobox_vec[i].class_id == 4))
                {
                    if (yolobox_vec[i].x2 > yolobox_vec[i].x1 + 1 && yolobox_vec[i].y2 > yolobox_vec[i].y1 + 1)
                    {
                        yolobox_valid_vec.push_back(yolobox_vec[i]);
                    }
                }
            }
            if (yolobox_valid_vec.size() > 0)
            {
                int mask_times = (yolobox_valid_vec.size() + tpu_mask_num - 1) / tpu_mask_num;
                for (int i = 0; i < mask_times; i++)
                {
                    int start = i * tpu_mask_num;
                    auto input_tensor1 = out_tensor1->get_tensor();
                    getmask_tpu(yolobox_valid_vec, start, input_tensor1, paras, yolobox_vec_tmp);
                }
            }
        }
        else
        {
            for (int i = 0; i < yolobox_vec.size(); i++)
            {
                // 只处理钻杆和钻机
                if ((yolobox_vec[i].class_id == 3) || (yolobox_vec[i].class_id == 4))
                {
                    if (yolobox_vec[i].x2 > yolobox_vec[i].x1 + 1 && yolobox_vec[i].y2 > yolobox_vec[i].y1 + 1)
                    {
                        get_mask(cv::Mat(yolobox_vec[i].mask).t(), output1, para,
                                 cv::Rect{yolobox_vec[i].x1, yolobox_vec[i].y1, yolobox_vec[i].x2 - yolobox_vec[i].x1,
                                          yolobox_vec[i].y2 - yolobox_vec[i].y1},
                                 yolobox_vec[i].mask_img,
                                 yolobox_vec[i]);
                        yolobox_vec[i].class_name = std::string(m_class_names[yolobox_vec[i].class_id]);
                        yolobox_vec_tmp.push_back(yolobox_vec[i]);
                    }
                }
            }
        }
#endif
        // 绘制结果并更新图像  绘制耗时太久，将不再在此处执行
        // cv::Mat img;
        // int ret = cv::bmcv::toMAT(data_ptr->image_ptr.get(), img, true);
        // draw_result(img, yolobox_vec_tmp);

        // bm_image bmImage;
        // bm_status_t ret1 = cv::bmcv::toBMI(img, &bmImage, true);
        // if (ret1 != BM_SUCCESS)
        // {
        //     std::cout << "Error! bm_image_from_mat: " << ret1 << std::endl;
        // }
        // data_ptr->image_ptr = std::make_shared<bm_image>(bmImage);
        data_ptr->boxs_vec = yolobox_vec_tmp;
        data_ptr->is_mask = true;
        out_que_.push_back(data_ptr);
    }
}

/**
 * @brief 在TPU上获取YoloV8的mask
 *
 * 在TPU上运行网络以获取YoloV8的mask，并对mask进行裁剪和应用。
 *
 * @param yolobox_vec YoloV8边界框向量
 * @param start 开始索引
 * @param input_tensor1 输入张量1
 * @param paras 参数结构体
 * @param yolobox_vec_tmp 临时YoloV8边界框向量
 */
void YoloV8::getmask_tpu(YoloV8BoxVec &yolobox_vec,
                         int start,
                         const bm_tensor_t &input_tensor1,
                         Paras &paras,
                         YoloV8BoxVec &yolobox_vec_tmp)
{
    int mask_height = m_tpumask_net_h;
    int mask_width = m_tpumask_net_w;
    int actual_mask_num = MIN(tpu_mask_num, yolobox_vec.size() - start);

    netinfo->stages[0].input_shapes[0].dims[0] = 1;
    netinfo->stages[0].input_shapes[0].dims[1] = actual_mask_num;
    netinfo->stages[0].input_shapes[0].dims[2] = mask_len;

    // 1. prepare bmodel inputs
    LOG_TS(m_ts, "get_mask_tpu: prepare");
    bm_tensor_t input_tensor0;
    assert(true == bmrt_tensor(&input_tensor0, bmrt, netinfo->input_dtypes[0], netinfo->stages[0].input_shapes[0]));
    for (size_t i = start; i < start + actual_mask_num; i++)
    {
        CV_Assert(BM_SUCCESS == bm_memcpy_s2d_partial_offset(tpu_mask_handle, input_tensor0.device_mem, reinterpret_cast<void *>(yolobox_vec[i].mask.data()), 32 * 4, 32 * 4 * (i - start)));
    }

    std::vector<bm_tensor_t> input_tensors = {input_tensor0, input_tensor1};
    // bm_tensor_t input_tensors[2] = {input_tensor0, input_tensor1};
    std::vector<bm_tensor_t> output_tensors;
    LOG_TS(m_ts, "get_mask_tpu: prepare");

    // 2. run bmodel
    LOG_TS(m_ts, "get_mask_tpu: forward");

    output_tensors.resize(netinfo->output_num); // 1
    bool ok = bmrt_launch_tensor(bmrt, netinfo->name, input_tensors.data(), netinfo->input_num, output_tensors.data(), netinfo->output_num);

    assert(true == ok);
    assert(BM_SUCCESS == bm_thread_sync(tpu_mask_handle));
    bm_free_device(tpu_mask_handle, input_tensors[0].device_mem);
    LOG_TS(m_ts, "get_mask_tpu: forward");

    // 3. get outputs
    LOG_TS(m_ts, "get_mask_tpu: get_output");
    bm_tensor_t output_tensor = output_tensors[0];
    float output0[1 * actual_mask_num * mask_height * mask_width];
    int ret = bm_memcpy_d2s_partial(tpu_mask_handle, output0, output_tensor.device_mem, bmrt_tensor_bytesize(&output_tensor));
    size_t size = bmrt_tensor_bytesize(&output_tensor);

    for (int i = 0; i < output_tensors.size(); i++)
    {
        bm_free_device(tpu_mask_handle, output_tensors[i].device_mem);
    }
    LOG_TS(m_ts, "get_mask_tpu: get_output");

    // 4. crop + mask
    LOG_TS(m_ts, "get_mask_tpu: crop+mask");
    for (int i = 0; i < actual_mask_num; i++)
    {
        LOG_TS(m_ts, "post 4-0: get single mask");
        int yi = start + i;
        cv::Mat temp_mask(mask_height, mask_width, CV_32FC1, output0 + i * mask_height * mask_width);
        cv::Mat masks_feature = temp_mask(cv::Rect(paras.r_x, paras.r_y, paras.r_w, paras.r_h));
        cv::Mat mask;
        cv::resize(masks_feature, mask, cv::Size(paras.width, paras.height));
        // crop + mask
        cv::Rect bound = cv::Rect{yolobox_vec[yi].x1, yolobox_vec[yi].y1, yolobox_vec[yi].x2 - yolobox_vec[yi].x1, yolobox_vec[yi].y2 - yolobox_vec[yi].y1};
        // 从mask中提取bound指定的区域，并根据阈值m_nmsThreshold生成二值化的掩码mast_out
        yolobox_vec[yi].mask_img = mask(bound) > m_confThreshold;
        LOG_TS(m_ts, "post 4-0: get single mask");
        LOG_TS(m_ts, "post 4-1: get angle and length");

        get_mask_angle_and_length(yolobox_vec[yi].mask_img, yolobox_vec[yi]);
        yolobox_vec[yi].class_name = std::string(m_class_names[yolobox_vec[yi].class_id]);

#if 0
        std::string classname = std::string(m_class_names[yolobox_vec[yi].class_id]);
        std::cout << "classname:" << classname << std::endl;
        std::cout << "classname:" << classname << " angle_and_length: \n"
                    << mast_out_angle_and_length.first << "\t" << mast_out_angle_and_length.second << std::endl;
#endif
        LOG_TS(m_ts, "post 4-1: get angle and length");

        yolobox_vec_tmp.push_back(yolobox_vec[yi]);
    }
    LOG_TS(m_ts, "get_mask_tpu: crop+mask");
}
/**
 * @brief 根据给定的掩码信息和数据获取输出掩码
 *
 * 该函数根据给定的掩码信息和数据，结合输入的图片信息，计算出输出掩码。
 *
 * @param mask_info 输入的掩码信息矩阵
 * @param mask_data 输入的掩码数据矩阵
 * @param para 图片信息结构体，包含图像的变换参数和原始尺寸等
 * @param bound 边界矩形，指定了需要处理的区域
 * @param mast_out 输出的掩码矩阵
 */
void YoloV8::get_mask(const cv::Mat &mask_info,
                      const cv::Mat &mask_data,
                      const ImageInfo &para,
                      cv::Rect bound,
                      cv::Mat &mast_out,
                      YoloV8Box &yolobox)
{
    LOG_TS(m_ts, "post 4-0: get single mask");
    cv::Vec4f trans = para.trans;
    int r_x = floor(trans[2] / m_net_w * (m_net_w / 4));
    int r_y = floor(trans[3] / m_net_h * (m_net_h / 4));
    int r_w = (m_net_w / 4) - 2 * r_x;
    int r_h = (m_net_h / 4) - 2 * r_y;
    r_w = MAX(r_w, 1);
    r_h = MAX(r_h, 1);

    std::vector<cv::Range> roi_rangs = {cv::Range(0, 1), cv::Range::all(), cv::Range(r_y, r_h + r_y),
                                        cv::Range(r_x, r_w + r_x)};
    cv::Mat temp_mask = mask_data(roi_rangs).clone(); // crop
    cv::Mat protos = temp_mask.reshape(0, {32, r_w * r_h});
    cv::Mat matmul_res = (mask_info * protos);
    cv::Mat masks_feature = matmul_res.reshape(1, {r_h, r_w});
    int left = bound.x;
    int top = bound.y;
    int width = bound.width;
    int height = bound.height;
    cv::Mat mask;
    resize(masks_feature, mask, cv::Size(para.raw_size.width, para.raw_size.height));
    // 此处拿到了mask的矩阵，需要从这里面找到需要处理的区域，从而获取掩码的长度和角度

    // 从mask中提取bound指定的区域，并根据阈值m_nmsThreshold生成二值化的掩码mast_out
    mast_out = mask(bound) > m_nmsThreshold;

    LOG_TS(m_ts, "post 4-0: get single mask");
    LOG_TS(m_ts, "post 4-1: get angle and length");

    // cv::imwrite(classname + "mast_out.jpg", mast_out);
   get_mask_angle_and_length(mast_out, yolobox);
    LOG_TS(m_ts, "post 4-1: get angle and length");
}
// 计算点到直线的垂直距离
double pointToLineDistance(const cv::Point& point, const cv::Point& lineStart, const cv::Point& lineEnd) {
    double dx = lineEnd.x - lineStart.x;
    double dy = lineEnd.y - lineStart.y;
    double len = std::sqrt(dx * dx + dy * dy);
    double dxx = point.x - lineStart.x;
    double dyy = point.y - lineStart.y;
    double cross = dxx * dy - dyy * dx;
    return std::abs(cross) / len;
}
// 计算多边形上的点到一个直线的最大垂直距离
double calculatePolygonWidth(const std::vector<cv::Point>& polygon, const cv::Point& lineStart, const cv::Point& lineEnd) {
    double maxDistance = 0.0;
    for (const auto& vertex : polygon) {
        double distance = pointToLineDistance(vertex, lineStart, lineEnd);
        maxDistance = std::max(maxDistance, distance);
    }
    return maxDistance;
}

/**
 * 从掩码mask矩阵中获取角度和长度
 */
void YoloV8::get_mask_angle_and_length(const cv::Mat &mask, YoloV8Box &yolobox)
{
    double angle = 0;
    double length = 0;
    double width = 0.0;

    // 查找掩码中的非零像素坐标
    std::vector<cv::Point> none_zero_points;
    for (int y = 0; y < mask.rows; ++y)
    {
        // std::cout << "y: " << mask.row(y) << std::endl;
        for (int x = 0; x < mask.cols; ++x)
        {
            if (mask.at<uchar>(y, x) > 0)
            {
                // std::cout << "mask.at<uchar>(y, x): " << (double)mask.at<uchar>(y, x) << std::endl;
                none_zero_points.push_back(cv::Point(x, y));
            }
        }
    }
    // 掩码中没有非零像素，直接返回
    if (none_zero_points.size() < 2)
    {
        std::cerr << "mask has no non-zero pixels" << std::endl;
        yolobox.angle = angle;
        yolobox.length = length;
        return ;
    }
    // 将点集转换为Mat类型，因为convexHull需要这种类型作为输入
    cv::Mat pointsMat(none_zero_points);
    // 计算凸包
    std::vector<int> hullIndices;
    cv::convexHull(pointsMat, hullIndices, false);
    // 提取凸包的点
    std::vector<cv::Point> hullPoints;
    for (int i = 0; i < hullIndices.size(); ++i)
    {
        hullPoints.push_back(none_zero_points[hullIndices[i]]);
    }
    // 取出最远的两个点，计算角度和长度
    std::pair<cv::Point, cv::Point> furthestPair = findFurthestPair(hullPoints);
    angle = atan2(furthestPair.second.y - furthestPair.first.y, furthestPair.second.x - furthestPair.first.x);
    angle = angle * 180 / CV_PI;
    length = sqrt(pow((furthestPair.second.x - furthestPair.first.x), 2) + pow((furthestPair.second.y - furthestPair.first.y), 2));
    width = calculatePolygonWidth(hullPoints, furthestPair.first, furthestPair.second);

    yolobox.left_center_point = furthestPair.first;
    yolobox.right_center_point = furthestPair.second;
    yolobox.angle = angle;
    yolobox.length = length;
    yolobox.width = width;
    yolobox.hullPoints = hullPoints;
    return;
}


std::pair<cv::Point, cv::Point> YoloV8::findFurthestPair(const std::vector<cv::Point> &points)
{
    double maxDist = std::numeric_limits<double>::min();
    cv::Point p1, p2;

    for (size_t i = 0; i < points.size(); ++i)
    {
        for (size_t j = i + 1; j < points.size(); ++j)
        {
            double dist = std::sqrt(std::pow(points[i].x - points[j].x, 2) + std::pow(points[i].y - points[j].y, 2));
            if (dist > maxDist)
            {
                maxDist = dist;
                p1 = points[i];
                p2 = points[j];
            }
        }
    }

    return {p1, p2};
}

void YoloV8::clip_boxes(YoloV8BoxVec &yolobox_vec, int src_w, int src_h)
{
    for (int i = 0; i < yolobox_vec.size(); i++)
    {
        yolobox_vec[i].x1 = std::max((float)0.0, std::min(yolobox_vec[i].x1, (float)src_w));
        yolobox_vec[i].y1 = std::max((float)0.0, std::min(yolobox_vec[i].y1, (float)src_h));
        yolobox_vec[i].x2 = std::max((float)0.0, std::min(yolobox_vec[i].x2, (float)src_w));
        yolobox_vec[i].y2 = std::max((float)0.0, std::min(yolobox_vec[i].y2, (float)src_h));
    }
}

void YoloV8::NMS(YoloV8BoxVec &dets, float nmsConfidence)
{
    int length = dets.size();
    int index = length - 1;

    std::sort(dets.begin(), dets.end(), [](const YoloV8Box &a, const YoloV8Box &b)
              { return a.score < b.score; });

    std::vector<float> areas(length);
    for (int i = 0; i < length; i++)
    {
        float width = dets[i].x2 - dets[i].x1;
        float height = dets[i].y2 - dets[i].y1;
        areas[i] = width * height;
    }

    while (index > 0)
    {
        int i = 0;
        while (i < index)
        {
            float left = std::max(dets[index].x1, dets[i].x1);
            float top = std::max(dets[index].y1, dets[i].y1);
            float right = std::min(dets[index].x2, dets[i].x2);
            float bottom = std::min(dets[index].y2, dets[i].y2);
            float overlap = std::max(0.0f, right - left) * std::max(0.0f, bottom - top);
            if (overlap / (areas[index] + areas[i] - overlap) > nmsConfidence)
            {
                areas.erase(areas.begin() + i);
                dets.erase(dets.begin() + i);
                index--;
            }
            else
            {
                i++;
            }
        }
        index--;
    }
}

/**
 * @brief 在图像上绘制检测结果
 *
 * 该函数接受一个图像和检测结果，并在图像上绘制矩形框和标签。
 *
 * @param img 待绘制的图像
 * @param result 检测结果，包含每个检测框的位置、类别和置信度
 */
void YoloV8::draw_result(cv::Mat &img, YoloV8BoxVec &result)
{
    cv::Mat mask = img.clone();
    for (int i = 0; i < result.size(); i++)
    {
        if (result[i].score < 0.25)
            continue;
        // 只处理钻杆和钻机
        if ((result[i].class_id == 0) || (result[i].class_id == 1) || (result[i].class_id == 2) || (result[i].class_id == 5))
            continue;
        int left, top;
        left = result[i].x1;
        top = result[i].y1;
        int color_num = i;
        cv::Scalar color(colors[result[i].class_id % 25][0], colors[result[i].class_id % 25][1],
                         colors[result[i].class_id % 25][2]);
        cv::Rect bound = {result[i].x1, result[i].y1, result[i].x2 - result[i].x1, result[i].y2 - result[i].y1};

        // rectangle(img, bound, color, 2);
        if (result[i].mask_img.rows && result[i].mask_img.cols > 0)
        {
            mask(bound).setTo(color, result[i].mask_img);
        }
        std::string label = std::string(m_class_names[result[i].class_id]) + std::string(" ") + doubleToStringWithPrecision(result[i].score, 2) + std::string(" ") + std::string("angle:") + doubleToStringWithPrecision(result[i].angle, 2) + std::string(" ") + std::string("len:") + doubleToStringWithPrecision(result[i].length, 2);
        putText(img, label, cv::Point(left, top), cv::FONT_HERSHEY_SIMPLEX, 1, color, 2);
    }
    addWeighted(img, 0.6, mask, 0.4, 0, img); // add mask to src
}
/**
 * @brief 将图像推送到相应的队列中
 *
 * 根据通道ID和图像，将图像推送到相应的队列中。
 * 如果当前通道已经跳过了指定数量的帧，则将图像推送到全局帧队列中，并进行预处理。
 * 否则，将图像推送到输出队列中，并增加当前通道的跳过帧计数器。
 *
 * @param channel_id 通道ID
 * @param img 共享指针指向的图像对象
 * @return 预处理后的返回值，如果预处理成功则返回0，否则返回非0值
 */
int YoloV8::push_img(int channel_id, std::shared_ptr<bm_image> img)
{
    int ret = 0;
#if 1
    if (channel_skip_nums_[channel_id] == skip_num_)
    {
        std::unique_lock<std::mutex> lock(global_frame_que_mtx_);
        global_frame_que_.push_back(std::make_shared<FrameInfoDetectYolov8Seg>(channel_id, img));
        lock.unlock();
        ret = preprocess(img);
        channel_skip_nums_[channel_id] = 0;
    }
    else
    {
        // 不需要这些不处理的视频帧，直接丢弃
        // out_que_.push_back(std::make_shared<FrameInfoDetectYolov8Seg>(channel_id, img));
        channel_skip_nums_[channel_id]++;
    }
#endif
#if 0
    //暂时只是将照片直接推送到输出队列中
    out_que_.push_back(std::make_shared<FrameInfoDetectYolov8Seg>(channel_id, img));
#endif
    return ret;
}

void YoloV8::start()
{
    stop_flag_ = false;
    thread_vec_.push_back(std::make_shared<std::thread>(&YoloV8::forward_thread_dowork, this));
    thread_vec_.push_back(std::make_shared<std::thread>(&YoloV8::post_thread_dowork, this));
}

void YoloV8::stop()
{
    stop_flag_ = true;
    pre_forward_que_.stop();
    forward_post_que_.stop();
    out_que_.stop();
    for (int i = 0; i < thread_vec_.size(); i++)
    {
        thread_vec_[i]->join();
    }
}
void YoloV8::setClassNames(std::vector<std::string> class_name_vec)
{
    if (class_name_vec.size() > 0)
    {
        for (int i = 0; i < class_name_vec.size(); i++)
        {
            m_class_names.push_back(class_name_vec[i]);
        }
    }
}
std::shared_ptr<FrameInfoDetectYolov8Seg> YoloV8::get_img()
{
    return out_que_.pop_front();
}
std::string YoloV8::doubleToStringWithPrecision(double value, int precision)
{
    std::ostringstream oss;
    oss << std::fixed << std::setprecision(precision);
    oss << value;
    return oss.str();
}