#include"Backend.h"
#include "cuda_icp/icp.h"
using namespace cv;
namespace  cv_dnn {
namespace
{

template <typename T>
static inline bool SortScorePairDescend(const std::pair<float, T>& pair1,
                                        const std::pair<float, T>& pair2)
{
    return pair1.first > pair2.first;
}

} // namespace

inline void GetMaxScoreIndex(const std::vector<float>& scores, const float threshold, const int top_k,
                             std::vector<std::pair<float, int> >& score_index_vec)
{
    for (size_t i = 0; i < scores.size(); ++i)
    {
        if (scores[i] > threshold)
        {
            score_index_vec.push_back(std::make_pair(scores[i], i));
        }
    }
    std::stable_sort(score_index_vec.begin(), score_index_vec.end(),SortScorePairDescend<int>);
    if (top_k > 0 && top_k < (int)score_index_vec.size())
    {
        score_index_vec.resize(top_k);
    }
}

template <typename BoxType>
inline void NMSFast_(const std::vector<BoxType>& bboxes,
                     const std::vector<float>& scores, const float score_threshold,
                     const float nms_threshold, const float eta, const int top_k,
                     std::vector<int>& indices, float (*computeOverlap)(const BoxType&, const BoxType&))
{
    CV_Assert(bboxes.size() == scores.size());
    std::vector<std::pair<float, int> > score_index_vec;
    GetMaxScoreIndex(scores, score_threshold, top_k, score_index_vec);

    // Do nms.
    float adaptive_threshold = nms_threshold;
    indices.clear();
    for (int i = 0; i < score_index_vec.size(); ++i) {
        const int idx = score_index_vec[i].second;
        bool keep = true;
        for (int k = 0; k < (int)indices.size() && keep; ++k) {
            const int kept_idx = indices[k];
            float overlap = computeOverlap(bboxes[idx], bboxes[kept_idx]);
            keep = overlap <= adaptive_threshold;
        }
        if (keep) {
            indices.push_back(idx);
        }
        if (keep && eta < 1 && adaptive_threshold > 0.5) {
            adaptive_threshold *= eta;
        }
    }
}


// copied from opencv 3.4, not exist in 3.0
template<typename _Tp> static inline
    double jaccardDistance__(const Rect_<_Tp>& a, const Rect_<_Tp>& b) {
    _Tp Aa = a.area();
    _Tp Ab = b.area();

    if ((Aa + Ab) <= std::numeric_limits<_Tp>::epsilon()) {
        // jaccard_index = 1 -> distance = 0
        return 0.0;
    }

    double Aab = (a & b).area();
    // distance = 1 - jaccard_index
    return 1.0 - Aab / (Aa + Ab - Aab);
}

template <typename T>
static inline float rectOverlap(const T& a, const T& b)
{
    return 1.f - static_cast<float>(jaccardDistance__(a, b));
}

void NMSBoxes(const std::vector<Rect>& bboxes, const std::vector<float>& scores,
              const float score_threshold, const float nms_threshold,
              std::vector<int>& indices, const float eta = 1, const int top_k = 0)
{
    NMSFast_(bboxes, scores, score_threshold, nms_threshold, eta, top_k, indices, rectOverlap);
}

}


QImage Backend::cvMat2QImage(const cv::Mat &mat)
{
    QImage image;
    switch(mat.type())
    {
    case CV_8UC1:
        // QImage构造：数据，宽度，高度，每行多少字节，存储结构
        image = QImage((const unsigned char*)mat.data, mat.cols, mat.rows, mat.step, QImage::Format_Grayscale8);
        break;
    case CV_8UC3:
        image = QImage((const unsigned char*)mat.data, mat.cols, mat.rows, mat.step, QImage::Format_RGB888);
        image = image.rgbSwapped(); // BRG转为RGB
        // Qt5.14增加了Format_BGR888
        // image = QImage((const unsigned char*)mat.data, mat.cols, mat.rows, mat.cols * 3, QImage::Format_BGR888);
        break;
    case CV_8UC4:
        image = QImage((const unsigned char*)mat.data, mat.cols, mat.rows, mat.step, QImage::Format_ARGB32);
        break;
    case CV_16UC4:
        image = QImage((const unsigned char*)mat.data, mat.cols, mat.rows, mat.step, QImage::Format_RGBA64);
        image = image.rgbSwapped(); // BRG转为RGB
        break;
    }
    return image;
}

//creat
void Backend::createScaledShapeModel(cv::Mat& templateImg, line2Dup::Detector& detector, shape_based_matching::shapeInfo_producer& shapes, const std::string& class_id, float angleStart, float angleExtent, float angleStep, float scaleMin, float scaleMax, float scaleStep, float weak_thresh, float strong_thresh, bool set_produce_dxy)
//weak_treash 和边缘选取有关
//strong_thesh和特征点选取有关
{
    assert(!templateImg.empty() && "templateImg is empty");
    int maxArea =templateImg.rows*templateImg.cols;
    //adaptive feature_nums
    //int feature_nums = std::min(std::max(maxArea /128,64),256);
    int feature_nums = std::min(std::max(maxArea / 128, 64), 40);
    std::cout << "adaptive_features_nums:" << feature_nums << std::endl;
    std::vector<int>T = {4};
    const int minSize = 16;
    //adaptive pyramid_levels
    int min_dim = std::min(templateImg.cols, templateImg.rows);
    int  adaptive_pyramid_levels = std::min(int(std::log2(std::max(min_dim / minSize,1))),10);
    std::cout << "adaptive_pyramid_levels:" << adaptive_pyramid_levels << std::endl;
    for (int i = 0; i < adaptive_pyramid_levels - 1; i++) {
        T.emplace_back(8);
    }
    detector= line2Dup::Detector (feature_nums, T, weak_thresh, strong_thresh);

    detector.set_produce_dxy = set_produce_dxy;
    //adaptive padding

    int padded_size =static_cast<int>(round(std::sqrt(templateImg.cols * templateImg.cols + templateImg.rows * templateImg.rows) * std::max(scaleMax, 1.0f)));
    int padded_width = (padded_size - templateImg.cols+1)/2;
    int padded_height = (padded_size - templateImg.rows+1)/2;
    Mat mask = Mat(templateImg.size(), CV_8UC1, { 255 });
    cv::Mat padded_template_img = cv::Mat(templateImg.rows + 2 * padded_height, templateImg.cols+2*padded_width, templateImg.type(), cv::Scalar::all(0));
    templateImg.copyTo(padded_template_img(Rect(padded_width, padded_height, templateImg.cols, templateImg.rows)));
    cv::Mat padded_mask = cv::Mat(templateImg.rows + 2 * padded_height,templateImg.cols + 2 * padded_width , mask.type(), cv::Scalar::all(0));
    mask.copyTo(padded_mask(Rect(padded_width, padded_height, templateImg.cols, templateImg.rows)));
    std::cout << "padded_template_img.size" << padded_template_img.size()<<std::endl;
    shapes=std::move(shape_based_matching::shapeInfo_producer(padded_template_img, padded_mask));
    //adaptive angle_step
    if (angleStep == 0.0f) {  
        double l = padded_size / 2.0;
        double theta=acos(1.0 - 2.0 / l / l) / CV_PI * 180;
        double k = 3.0;
        // 大于等于1/3 小于等于3 
        angleStep =static_cast<float>( k*theta); 
        std::cout << "adaptive angleStep to" << angleStep << std::endl;
    }
    if (scaleStep == 0.0f) {
        double l = std::sqrt(templateImg.cols * templateImg.cols + templateImg.rows * templateImg.rows) / 2;
        double ds = 2 / l;
        double k = 3.0;
        // 大于等于1/3 小于等于3 
        scaleStep = static_cast<float>(k * ds);
        std::cout << "adaptive scaleStep to" << scaleStep << std::endl;
    }
    shapes.angle_range = { angleStart,angleStart+angleExtent };
    shapes.angle_step = angleStep;
    shapes.scale_range = { scaleMin,scaleMax };
    shapes.scale_step = scaleStep;
    shapes.produce_infos();
    
    detector.addTemplates(class_id, shapes, feature_nums);
       //for (auto info : shapes.infos) {
        //detector.addTemplate(shapes.src_of(info), class_id, shapes.mask_of(info), (int)feature_nums);
        //cv::Mat to_show = shapes.src_of(info);
       // cv::imshow("to_show", to_show);
        //cv::waitKey(10);
       //}
        
}

//find
cv::Mat Backend::findScaledShapeModel(cv::Mat& templateImg, cv::Mat& targetImg, const std::string& class_id, line2Dup::Detector& detector, shape_based_matching::shapeInfo_producer& shapes, float minScore, int numMatches, float maxOverlap) {
    //cv::Mat templateImg, targetImg;
    //cv::resize(targetImg1, targetImg, targetImg1.size()/2);
    //cv::resize(templateImg1, templateImg, templateImg1.size()/2);
    Timer time1;
    auto &infos = shapes.infos;
    assert(!targetImg.empty() && "check your templateImg");
    assert(!targetImg.empty() && "check your targetImg");
    int padding = 100;
    cv::Mat padded_target_img = cv::Mat(targetImg.rows + 2 * padding,
                                       targetImg.cols + 2 * padding, targetImg.type(), cv::Scalar::all(0));
    targetImg.copyTo(padded_target_img(Rect(padding, padding, targetImg.cols, targetImg.rows)));
    //int stride = 16;
    //int n = padded_target_img.rows / stride;
    //int m = padded_target_img.cols / stride;
    //Rect roi(0, 0, stride * m, stride * n);
    Mat img;
    if (targetImg.channels() != 1) { 
        cvtColor(targetImg, img, cv::COLOR_BGR2GRAY);
    }
    else {
        img = targetImg.clone();
    }
    assert(img.isContinuous());
    Timer t;
    auto &&matches = detector.match(padded_target_img, minScore * 100, { class_id });
    t.out("match 1 ");
   // time1.out("matches ");
    if (img.channels() == 1) { cvtColor(img, img, cv::COLOR_GRAY2BGR); }
    //std::cout << "matches.size(): " << matches.size() << std::endl;
    if (numMatches == 0)numMatches = matches.size();
    std::vector<Rect> boxes;
    std::vector<float> scores;
    std::vector<int> idxs;
    for (size_t i = 0; i < matches.size(); i++) {
        cv::Rect box;
        box.x = matches[i].x;
        box.y = matches[i].y;
        const auto& templ = detector.getTemplates(class_id,
                                                  matches[i].template_id);
        box.width = templ[0].width;
        box.height = templ[0].height;
        boxes.push_back(box);
        scores.push_back(matches[i].similarity);
    }

    cv_dnn::NMSBoxes(boxes, scores, minScore, maxOverlap, idxs);
    //float train_img_half_width = templateImg.cols / 2.0f + template_padding;
    //float train_img_half_height = templateImg.rows / 2.0f + template_padding;

    int padded_size = static_cast<int>(std::sqrt(templateImg.cols * templateImg.cols + templateImg.rows * templateImg.rows) * std::max(scaleMax, 1.0f) + 0.5f);
    int padded_width = (padded_size - templateImg.cols + 1) / 2;
    int padded_height = (padded_size - templateImg.rows + 1) / 2;
    int train_img_width = templateImg.cols + 2*padded_width;
    int train_img_height =templateImg.rows + 2*padded_height;
        //Mat edge = edge_global.clone();
    
   // scene.init_Scene_kdtree_cpu(detector.dx_, detector.dy_, kdtree,threshold1,threshold2);
    //默认30 60
    if (detector.set_produce_dxy) {
        Scene_kdtree scene;
        KDTree_cpu kdtree;
        scene.init_Scene_kdtree_cpu(detector.dx_, detector.dy_, kdtree);
        cv::Mat temp;
        if (templateEdges.empty()) {
            cv::Canny(templateImg, temp, threshold1, threshold2);
        }
        else {
            temp = templateEdges.clone();
        }
        for (const auto& idx : idxs)
        {
            const auto& match = matches[idx];
            const auto& templ = detector.getTemplates(class_id,
                match.template_id);
            std::vector<::Vec2f> model_pcd(templ[0].features.size());
            for (int i = 0; i < templ[0].features.size(); i++) {
                auto& feat = templ[0].features[i];
                model_pcd[i] = {
                    float(feat.x + match.x),
                    float(feat.y + match.y)
                };
            }
            cuda_icp::RegistrationResult result = cuda_icp::sim3::ICP2D_Point2Plane_cpu(model_pcd, scene);
            float match_scale = std::sqrt(result.transformation_[0][0] * result.transformation_[0][0] +
                result.transformation_[1][0] * result.transformation_[1][0]) * infos[match.template_id].scale;
            float match_width = templateImg.cols  * match_scale;
            float match_height = templateImg.rows* match_scale;
            cv::Vec3b randColor;
            randColor[0] = 0;
            randColor[1] = 255;
            randColor[2] = 0;

            /*	for (int i = 0; i < templ[0].features.size(); i++) {
                    const auto& feat = templ[0].features[i];
                    float x = feat.x + match.x;
                    float y = feat.y + match.y;
                    float new_x = result.transformation_[0][0] * x + result.transformation_[0][1] * y + result.transformation_[0][2]-padding;
                    float new_y = result.transformation_[1][0] * x + result.transformation_[1][1] * y + result.transformation_[1][2]+1-padding;
                    cv::circle(img, { int(new_x),int(new_y ) }, 1, randColor, -1);
                }*/
                
            cv::putText(img, std::to_string(match.similarity),
                cv::Point(match.x - padding + max(match_width/2, match_height/2) - 10, match.y - padding - 3), cv::FONT_HERSHEY_PLAIN, 2, randColor);
                
            float x = match.x - templ[0].tl_x+ (train_img_width - 1) / 2.0f;
            float y = match.y - templ[0].tl_y+(train_img_height - 1) / 2.0f;
            float new_x = result.transformation_[0][0] * x + result.transformation_[0][1] * y + result.transformation_[0][2];
            float new_y = result.transformation_[1][0] * x + result.transformation_[1][1] * y + result.transformation_[1][2];
            //偏移补正
            new_x = new_x - (padding);
            new_y = new_y - (padding)+1;
            double init_angle = infos[match.template_id].angle;


            double ori_diff_angle = std::abs(init_angle);
            double icp_diff_angle = std::abs(-std::asin(result.transformation_[1][0]) / CV_PI * 180 + init_angle);


            cv::RotatedRect rotatedRectangle({ new_x, new_y }, { match_width, match_height }, -icp_diff_angle);

            cv::Point2f vertices[4];
            rotatedRectangle.points(vertices);
            for (int i = 0; i < 4; i++) {
                int next = (i + 1 == 4) ? 0 : (i + 1);
                cv::line(img, vertices[i], vertices[next], randColor, 2);
            }
            /*             draw contours start                             */
            std::vector<std::vector<cv::Point>>contours;
            cv::findContours(temp, contours, cv::RETR_TREE, cv::CHAIN_APPROX_NONE);
            float targetAngle = (720 + icp_diff_angle);
            float scale = std::sqrt(result.transformation_[0][0] * result.transformation_[0][0] +
                result.transformation_[1][0] * result.transformation_[1][0]) * infos[match.template_id].scale;
            cv::Mat transform = cv::getRotationMatrix2D(cv::Point2f((templateImg.cols-1) / 2.0, (templateImg.rows-1) / 2.0), targetAngle, scale);
            transform.at<double>(0, 2) += new_x - (templateImg.cols-1) / 2.0;//- templateImg.cols/2.0 * scale * std::cos(targetAngle * CV_PI / 180.0) + templateImg.rows/2 * scale * std::sin(targetAngle * CV_PI / 180.0);
            transform.at<double>(1, 2) += new_y - (templateImg.rows-1) / 2.0;//- templateImg.rows/2.0*scale * std::cos(targetAngle * CV_PI / 180.0) - templateImg.cols / 2 * scale * std::sin(targetAngle * CV_PI / 180.0);
            std::vector<std::vector<cv::Point>> contoursTransformed;
            for (const auto& contour : contours) {
                std::vector<cv::Point>contourTransformed;
                cv::transform(contour, contourTransformed, transform);
                contoursTransformed.emplace_back(contourTransformed);
            }
            cv::drawContours(img, contoursTransformed, -1, cv::Scalar(0, 255, 0), 1, 8, noArray(), 2147283647);
            /*             draw contours end                             */

            //cv::Mat temp;
            //cv::Canny(templateImg, temp, 100, 200);
            //std::vector<vector<cv::Point>>contours;
            //cv::findContours(temp, contours, cv::RETR_TREE, cv::CHAIN_APPROX_NONE);
            //for (int i = 0; i < contours.size(); i++) {
            //	const auto& contour = contours[i];
            //	for (auto& feat : contour) {
            //		//float x = feat.x + match.x;
            //		//float y = feat.y + match.y;
            //		float x = feat.x + match.x;//- templ[0].tl_x+ train_img_half_width;
            //		float y = feat.y + match.y;// -templ[0].tl_y + train_img_half_height;
            //		float new_x = result.transformation_[0][0] * x + result.transformation_[0][1] * y + result.transformation_[0][2];
            //		float new_y = result.transformation_[1][0] * x + result.transformation_[1][1] * y + result.transformation_[1][2];
            //		cv::circle(img, { int(new_x + 0.5f),int(new_y + 0.5f) }, 1, randColor, -1);
            //	}
            //}
            //std::cout << "scale: " << infos[match.template_id].scale << std::endl;
            std::cout << "fitness" << result.fitness_ << std::endl;
            std::cout << "inlier_rmse" << result.inlier_rmse_ << std::endl;
            std::cout << "match.scale: " << std::sqrt(result.transformation_[0][0] * result.transformation_[0][0] +
                result.transformation_[1][0] * result.transformation_[1][0]) * infos[match.template_id].scale << std::endl;
            // std::cout << "match.template_id: " << match.template_id << std::endl;
            std::cout << "match.similarity: " << match.similarity << std::endl;
            //std::cout << "match.oangle: " << infos[match.template_id].angle << std::endl;
          //  std::cout << "match.angle: " << icp_diff_angle  << std::endl;
            std::cout << "match.angle: " << (icp_diff_angle > 360 ? (icp_diff_angle)-360 :icp_diff_angle) << std::endl;
            std::cout << "match.center: " << cv::Point2f(new_x, new_y) << std::endl;
        }
    }
    else {
        cv::Mat temp;
        if (templateEdges.empty()) {
            cv::Canny(templateImg, temp, threshold1, threshold2);
        }
        else {
            temp = templateEdges.clone();
        }
        for (const auto& idx : idxs)
        {
            const auto& match = matches[idx];
            const auto& templ = detector.getTemplates(class_id,
                match.template_id);
          
            float match_scale = infos[match.template_id].scale;
            float match_width = templateImg.cols* match_scale;
            float match_height = templateImg.rows * match_scale;
            cv::Vec3b randColor;
            randColor[0] = 0;
            randColor[1] = 255;
            randColor[2] = 0;

            	//for (int i = 0; i < templ[0].features.size(); i++) {
                 //   const auto& feat = templ[0].features[i];
                  //  float x = feat.x + match.x-padding;
                   // float y = feat.y + match.y-padding;
                    //cv::circle(img, { int(x),int(y ) }, 1, randColor, -1);
                //}

            cv::putText(img, std::to_string(match.similarity),
                cv::Point(match.x - padding + max(match_width/2, match_height/2) - 10, match.y -padding - 3), cv::FONT_HERSHEY_PLAIN, 2, randColor);

            float x = match.x - templ[0].tl_x + ((train_img_width - 1) / 2.0f);
            float y = match.y - templ[0].tl_y +((train_img_height - 1) / 2.0f);
            float new_x = x;
            float new_y = y;
            //偏移补正
            new_x = new_x-padding;
            new_y = new_y-padding;
            double init_angle = infos[match.template_id].angle;

            cv::RotatedRect rotatedRectangle({ new_x, new_y }, { match_width, match_height }, -init_angle);

            cv::Point2f vertices[4];
            rotatedRectangle.points(vertices);
            for (int i = 0; i < 4; i++) {
                int next = (i + 1 == 4) ? 0 : (i + 1);
                cv::line(img, vertices[i], vertices[next], randColor, 2);
            }
            /*             draw contours start                             */
            std::vector<std::vector<cv::Point>>contours;
            cv::findContours(temp, contours, cv::RETR_TREE, cv::CHAIN_APPROX_NONE);
            float targetAngle = (720 + init_angle);
            cv::Mat transform = cv::getRotationMatrix2D(cv::Point2f((templateImg.cols-1) / 2.0f, (templateImg.rows-1) / 2.0f), targetAngle, match_scale);
            transform.at<double>(0, 2) += new_x - (templateImg.cols-1) / 2.0;//- templateImg.cols/2.0 * scale * std::cos(targetAngle * CV_PI / 180.0) + templateImg.rows/2 * scale * std::sin(targetAngle * CV_PI / 180.0);
            transform.at<double>(1, 2) += new_y - (templateImg.rows-1) / 2.0;//- templateImg.rows/2.0*scale * std::cos(targetAngle * CV_PI / 180.0) - templateImg.cols / 2 * scale * std::sin(targetAngle * CV_PI / 180.0);
            std::vector<std::vector<cv::Point>> contoursTransformed;
            for (const auto& contour : contours) {
                std::vector<cv::Point>contourTransformed;
                cv::transform(contour, contourTransformed, transform);
                contoursTransformed.emplace_back(contourTransformed);
            }
            cv::drawContours(img, contoursTransformed, -1, cv::Scalar(0, 255, 0), 1, 8, noArray(), 2147283647);
            /*             draw contours end                             */

            //cv::Mat temp;
            //cv::Canny(templateImg, temp, 100, 200);
            //std::vector<vector<cv::Point>>contours;
            //cv::findContours(temp, contours, cv::RETR_TREE, cv::CHAIN_APPROX_NONE);
            //for (int i = 0; i < contours.size(); i++) {
            //	const auto& contour = contours[i];
            //	for (auto& feat : contour) {
            //		//float x = feat.x + match.x;
            //		//float y = feat.y + match.y;
            //		float x = feat.x + match.x;//- templ[0].tl_x+ train_img_half_width;
            //		float y = feat.y + match.y;// -templ[0].tl_y + train_img_half_height;
            //		float new_x = result.transformation_[0][0] * x + result.transformation_[0][1] * y + result.transformation_[0][2];
            //		float new_y = result.transformation_[1][0] * x + result.transformation_[1][1] * y + result.transformation_[1][2];
            //		cv::circle(img, { int(new_x + 0.5f),int(new_y + 0.5f) }, 1, randColor, -1);
            //	}
            //}
            //std::cout << "scale: " << infos[match.template_id].scale << std::endl;
            std::cout << "match.scale: " <<match_scale << std::endl;
            // std::cout << "match.template_id: " << match.template_id << std::endl;
            std::cout << "match.similarity: " << match.similarity << std::endl;
            //std::cout << "match.oangle: " << infos[match.template_id].angle << std::endl;
          //  std::cout << "match.angle: " << icp_diff_angle  << std::endl;
            std::cout << "match.angle: " << init_angle << std::endl;
            std::cout << "match.center: " << cv::Point2f(new_x, new_y) << std::endl;
        }
    }
    
    time1.out("findScaledShapeModel");
    //cv::resize(img, img, cv::Size(960, 640));
    //cv::imshow("img", img);
    //cv::waitKey(0);
    return img;
}
std::vector<MatchResult> Backend::findScaledShapeModel1(cv::Mat& templateImg, cv::Mat& targetImg, const std::string& class_id, line2Dup::Detector& detector, shape_based_matching::shapeInfo_producer& shapes, float minScore, int numMatches, float maxOverlap) {
    std::vector<MatchResult>matchResult;
    auto& infos = shapes.infos;
    assert(!targetImg.empty() && "check your templateImg");
    assert(!targetImg.empty() && "check your targetImg");
    int padding = 30;
    cv::Mat padded_target_img = cv::Mat(targetImg.rows + 2 * padding,
        targetImg.cols + 2 * padding, targetImg.type(), cv::Scalar::all(0));
    targetImg.copyTo(padded_target_img(Rect(padding, padding, targetImg.cols, targetImg.rows)));
    auto&& matches = detector.match(padded_target_img, minScore * 100, { class_id });
    if (numMatches == 0)numMatches = matches.size();
    std::vector<Rect> boxes;
    std::vector<float> scores;
    std::vector<int> idxs;
    for (size_t i = 0; i < matches.size(); i++) {
        cv::Rect box;
        box.x = matches[i].x;
        box.y = matches[i].y;
        const auto& templ = detector.getTemplates(class_id,
            matches[i].template_id);
        box.width = templ[0].width;
        box.height = templ[0].height;
        boxes.push_back(box);
        scores.push_back(matches[i].similarity);
    }
    cv_dnn::NMSBoxes(boxes, scores, minScore, maxOverlap, idxs);   
    int padded_size = static_cast<int>(round(std::sqrt(templateImg.cols * templateImg.cols + templateImg.rows * templateImg.rows) * std::max(shapes.scale_range[1], 1.0f)));
    int padded_width = (padded_size - templateImg.cols + 1) / 2;
    int padded_height = (padded_size - templateImg.rows + 1) / 2;
    int train_img_width = templateImg.cols + 2 * padded_width;
    int train_img_height = templateImg.rows + 2 * padded_height;
    matchResult.reserve(std::min(static_cast<int>(idxs.size()), numMatches));
    if (detector.set_produce_dxy) {
        Scene_kdtree scene;
        KDTree_cpu kdtree;
        //默认30 60
        scene.init_Scene_kdtree_cpu(detector.dx_, detector.dy_, kdtree,detector.weak_thresh,detector.strong_thresh);
       
        for (const auto& idx : idxs)
        {
            if (numMatches == 0)break;
            numMatches--;
            const auto& match = matches[idx];
            const auto& templ = detector.getTemplates(class_id,
                match.template_id);
            std::vector<::Vec2f> model_pcd(templ[0].features.size());
            for (int i = 0; i < templ[0].features.size(); i++) {
                auto& feat = templ[0].features[i];
                model_pcd[i] = {
                    float(feat.x + match.x),
                    float(feat.y + match.y)
                };
            }
            cuda_icp::RegistrationResult result = cuda_icp::sim3::ICP2D_Point2Plane_cpu(model_pcd, scene);
            float x = match.x - templ[0].tl_x + (train_img_width - 1) / 2.0f;
            float y = match.y - templ[0].tl_y + (train_img_height - 1) / 2.0f+1;
            float new_x = result.transformation_[0][0] * x + result.transformation_[0][1] * y + result.transformation_[0][2];
            float new_y = result.transformation_[1][0] * x + result.transformation_[1][1] * y + result.transformation_[1][2];
            //偏移补正
            new_x = new_x - (padding);
            new_y = new_y - (padding);
            double init_angle = infos[match.template_id].angle;
       //     double ori_diff_angle = std::abs(init_angle);
            float icp_diff_angle = static_cast<float>(std::abs(-std::asin(result.transformation_[1][0]) / CV_PI * 180 + init_angle));
            float scale = std::sqrt(result.transformation_[0][0] * result.transformation_[0][0] +
               result.transformation_[1][0] * result.transformation_[1][0]) * infos[match.template_id].scale;
            matchResult.emplace_back( new_x,new_y,(icp_diff_angle > 360 ? (icp_diff_angle)-360 : icp_diff_angle),scale,match.similarity );
        }
    }
    else {
        for (const auto& idx : idxs)
        {
            if (numMatches == 0)break;
            numMatches--;
            const auto& match = matches[idx];
            const auto& templ = detector.getTemplates(class_id,
                match.template_id);

            
            float x = match.x - templ[0].tl_x + ((train_img_width - 1) / 2.0f);
            float y = match.y - templ[0].tl_y + ((train_img_height - 1) / 2.0f)+1;
            float new_x = x;
            float new_y = y;
            //偏移补正
            new_x = new_x - padding;
            new_y = new_y - padding;
            float angle = infos[match.template_id].angle;
            matchResult.emplace_back( new_x,new_y,(angle > 360 ? (angle)-360 : angle),infos[match.template_id].scale,match.similarity );
        }
    }
    return matchResult;
}







void Backend::loadTemplateImage(const QString &path) {
    
    QString localPath=QUrl(path).toLocalFile();
    cv::Mat img = cv::imread(localPath.toStdString(), cv::IMREAD_COLOR);
    if (img.empty()) {
        qWarning() << "Could not open or find the image";
        return;
    }
    templateImageCache.clear();
    templateModel.release();
    templateEdges.release();
    templateImg.release();
    
 /*   cv::Mat t;
    cv::resize(img, t, img.size() * 2);
    cv::imwrite("2.bmp",t);*/




    // 简单的图像处理，例如转换为灰度图
    cv::Mat gray;
    if(img.channels()!=1){
        cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
    }
    else {
        gray=img.clone();
    }
    //存起来
    //cv::resize(gray, gray, gray.size() / 2);
    templateImg=gray;
    // 将OpenCV Mat转换为QImage
    QImage qimg=cvMat2QImage(img);
    // 将QImage转换为QByteArray并存储为缓存
    // QByteArray byteArray;
    //QBuffer buffer(&byteArray);
    //buffer.open(QIODevice::WriteOnly);
    //qimg.save(&buffer, "PNG");
    qint64 msecs = QDateTime::currentMSecsSinceEpoch();
    QString id = QString("template")+QString::number(msecs);
    templateImageCache.insert(id,qimg);
    // 发送信号，传递处理后的图片ID
    emit templateProcessed(id);
}
void Backend:: loadTargetImage(const QString &path) {
    targetImageCache.clear();
    QString localPath=QUrl(path).toLocalFile();
    cv::Mat img = cv::imread(localPath.toStdString(), cv::IMREAD_COLOR);
    if (img.empty()) {
        qWarning() << "Could not open or find the image";
        return;
    }
    // 简单的图像处理，例如转换为灰度图
    cv::Mat gray;
    if(img.channels()!=1){
        cv::cvtColor(img, gray, cv::COLOR_BGR2GRAY);
    }
    else {
        gray=img.clone();
    }
    //cv::resize(gray,gray,gray.size()/2);
    //存起来
    targetImg=gray;
    // 将OpenCV Mat转换为QImage
    QImage qimg=cvMat2QImage(img);
    // 将QImage转换为QByteArray并存储为缓存
    // QByteArray byteArray;
    //QBuffer buffer(&byteArray);
    //buffer.open(QIODevice::WriteOnly);
    //qimg.save(&buffer, "PNG");
    qint64 msecs = QDateTime::currentMSecsSinceEpoch();
    QString id = QString("target")+QString::number(msecs);
    targetImageCache.insert(id,qimg);
    // 发送信号，传递处理后的图片ID
    emit imageProcessed(id);
}
#include<QDir>
void Backend::loadTargetFolder(const QString& path) {
    std::cout << QUrl(path).toLocalFile().toStdString() << std::endl;
    imagePaths.clear();
    QDir dir(QUrl(path).toLocalFile());
    if (!dir.exists()) {
        std::cout << "can not find folder" << std::endl;
        return;
    }
    QStringList filters;
    filters << "*.png" << "*.jpg" << "*.jpeg" << "*.bmp";
    dir.setNameFilters(filters);

    QFileInfoList fileInfoList = dir.entryInfoList(QDir::Files);
    std::cout << fileInfoList.size() << std::endl;
    for (int i = 0; i < fileInfoList.size(); i++) {
        imagePaths.push_back(QString("file:///") + fileInfoList[i].absoluteFilePath());
    }
    if (!imagePaths.empty()) {
        imagePathsIndex = 0;
        loadTargetImage(imagePaths[imagePathsIndex]);
    }
}



//创建模板
void Backend::createTemplate(){
    if(!templateEdges.empty()){
        templateImageCache.clear();
        templateModel=templateEdges.clone();
        //cv::imshow("templateImg",templateImg);
        //cv::Canny(templateImg,templateImg,60,180);
        createScaledShapeModel(templateModel, detector, shapes, class_id, angleStart, angleExtent, angleStep, scaleMin, scaleMax, scaleStep,weak_thresh, strong_thresh,sub_pixel);
        cv::Mat temp;
        cv::cvtColor(templateImg,temp,cv::COLOR_GRAY2BGR);
        cv::Mat edges_color;
        cv::cvtColor(templateEdges, edges_color, cv::COLOR_GRAY2BGR);
        edges_color.setTo(cv::Scalar(0, 0, 255), templateEdges != 0);
        // 叠加绿色边缘到原图上
        cv::bitwise_or(temp, edges_color, temp);

        const auto& templ = detector.getBaseTemplate();
        for (int i = 0; i < templ.features.size(); i++) {
            const auto& feat = templ.features[i];
            int x = feat.x - (templ.width - templateModel.cols) / 2;
            int y = feat.y - (templ.height - templateModel.rows) / 2;
            cv::circle(temp, { x,y }, 1, cv::Scalar(0, 255, 0), -1);
        }        
        
        QImage qimg=cvMat2QImage(temp);
        qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        QString id = QString("template")+QString::number(msecs);
        templateImageCache.insert(id,qimg);
        emit templateProcessed(id);
    }
    else if (!templateImg.empty()) {
        templateImageCache.clear();
        templateModel = templateImg.clone();
        //cv::imshow("templateImg",templateImg);
        //cv::Canny(templateImg,templateImg,60,180);
        createScaledShapeModel(templateModel, detector, shapes, class_id, angleStart, angleExtent, angleStep, scaleMin, scaleMax, scaleStep, weak_thresh, strong_thresh,sub_pixel);
        cv::Mat tempShow;
        cv::cvtColor(templateModel, tempShow, cv::COLOR_GRAY2BGR);
        
        const auto &templ = detector.getBaseTemplate();
        for (int i = 0; i < templ.features.size(); i++) {
            const auto& feat = templ.features[i];
            int x = feat.x - (templ.width - templateModel.cols)/2 ;
            int y = feat.y - (templ.height - templateModel.rows)/2;
            cv::circle(tempShow, { x,y }, 1, cv::Scalar(0, 255, 0), -1);
        }

        QImage qimg = cvMat2QImage(tempShow);
        qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        QString id = QString("template") + QString::number(msecs);
        templateImageCache.insert(id, qimg);
        emit templateProcessed(id);       
    }
}
//寻找模板
void Backend::findTemplate(){
    if(!targetImg.empty()){
        targetImageCache.clear();
        if(templateModel.empty()){
            std::cout<<"no template"<<std::endl;
            return;
        }
        Timer time1;
        std::vector<MatchResult> rst = findScaledShapeModel1(templateModel, targetImg, class_id, detector, shapes, minScore, numMatches, maxOverlap);
        //cv::Mat rst=findScaledShapeModel(templateModel, targetImg, class_id, detector, shapes, minScore, numMatches, maxOverlap);
        //QImage qimg=cvMat2QImage(rst);
        //qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        //QString id = QString("target")+QString::number(msecs);
        //targetImageCache.insert(id,qimg);
        // 发送信号，传递处理后的图片ID
        //emit imageProcessed(id);
        time1.out("findScaledShapeModel");

        cv::Mat showImage;
        cv::cvtColor(targetImg, showImage, cv::COLOR_GRAY2BGR);
        for (int i = 0; i < rst.size(); i++) {
            std::cout <<"match.center:"<<rst[i].x << "," << rst[i].y << std::endl;
            std::cout << "match.scale:" << rst[i].scale << std::endl;
            std::cout << "match.angle:"<<rst[i].angle << std::endl;
            std::cout << "match.score:" <<rst[i].score<< std::endl;
           // std::cout << "d:" << rst[i].scale*templateImg.rows << std::endl;
            cv::RotatedRect rotatedRectangle({ rst[i].x, rst[i].y}, {rst[i].scale*templateModel.cols-1, rst[i].scale * templateModel.rows-1 }, -rst[i].angle);
            cv::Point2f vertices[4];
            rotatedRectangle.points(vertices);
            for (int i = 0; i < 4; i++) {
                int next = (i + 1 == 4) ? 0 : (i + 1);
                cv::line(showImage, vertices[i], vertices[next], cv::Scalar(0,255,0), 2);
            }
            cv::circle(showImage, { static_cast<int>(round(rst[i].x)),static_cast<int>(round(rst[i].y))}, 1, cv::Scalar(0, 255, 0), -1);
            if (!templateEdges.empty()) {
                std::vector<std::vector<cv::Point>>contours;
                cv::findContours(templateEdges, contours, cv::RETR_TREE, cv::CHAIN_APPROX_NONE);
                cv::Mat transform = cv::getRotationMatrix2D(cv::Point2f((templateEdges.cols - 1) / 2.0f, (templateEdges.rows - 1) / 2.0f), rst[i].angle, rst[i].scale);
                transform.at<double>(0, 2) += rst[i].x - (templateImg.cols - 1) / 2.0;//- templateImg.cols/2.0 * scale * std::cos(targetAngle * CV_PI / 180.0) + templateImg.rows/2 * scale * std::sin(targetAngle * CV_PI / 180.0);
                transform.at<double>(1, 2) += rst[i].y - (templateImg.rows - 1) / 2.0;//- templateImg.rows/2.0*scale * std::cos(targetAngle * CV_PI / 180.0) - templateImg.cols / 2 * scale * std::sin(targetAngle * CV_PI / 180.0);
                std::vector<std::vector<cv::Point>> contoursTransformed;
                for (const auto& contour : contours) {
                    std::vector<cv::Point>contourTransformed;
                    cv::transform(contour, contourTransformed, transform);
                    contoursTransformed.emplace_back(contourTransformed);
                }
                cv::drawContours(showImage, contoursTransformed, -1, cv::Scalar(0, 255, 0), 1, 8, noArray(), 2147283647);
            }
            cv::putText(showImage, std::format("{:.2f}", rst[i].score),
             //   cv::putText(showImage, std::to_string(rst[i].score),
                cv::Point(static_cast<int>(rst[i].x), static_cast<int>(rst[i].y)), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0, 0, 255));

        }
        //if (rst.size() == 2) {
         //   std::cout << "distance::" << sqrt((rst[0].x - rst[1].x) * (rst[0].x - rst[1].x) + (rst[0].y - rst[1].y) * (rst[0].y - rst[1].y)) << std::endl;
        //}
       // std::cout << rst.size() << std::endl;
        QImage qimg = cvMat2QImage(showImage);
        qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        QString id = QString("target")+QString::number(msecs);
        targetImageCache.insert(id,qimg);
        // 发送信号，传递处理后的图片ID
        emit imageProcessed(id);
    }

}


std::vector<Point> smoothContourGaussian(const std::vector<Point>& contour, int kernelSize, double sigma) {
    int n = contour.size();
    if (n == 0 || kernelSize % 2 == 0) return contour;

    std::vector<Point> smoothed;
    int radius = kernelSize / 2;
    std::vector<double> kernel(kernelSize);
    double sum = 0.0;

    // 生成高斯核
    for (int i = -radius; i <= radius; ++i) {
        kernel[i + radius] = exp(-(i * i) / (2 * sigma * sigma));
        sum += kernel[i + radius];
    }
    for (auto& k : kernel) k /= sum; // 归一化

    // 应用滤波
    for (int i = 0; i < n; ++i) {
        double x = 0.0, y = 0.0;
        for (int j = -radius; j <= radius; ++j) {
            int idx = (i + j + n) % n; // 处理循环轮廓
            x += contour[idx].x * kernel[j + radius];
            y += contour[idx].y * kernel[j + radius];
        }
        smoothed.push_back(Point(round(x), round(y)));
    }
    return smoothed;
}


//提取边缘
void Backend::getTemplateEdges(){
    if(!templateImg.empty()){
        templateImageCache.clear();
        cv::Canny(templateImg,templateEdges,threshold1,threshold2,3);
        //cv::Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
        //cv::dilate(templateEdges, templateEdges,kernel , cv::Point(-1, -1), 5);
        //morphologyEx(templateEdges, templateEdges, MORPH_CLOSE, kernel);
        std::vector<std::vector<cv::Point>> contours;
        std::vector<cv::Vec4i> hierarchy;
        findContours(templateEdges, contours, hierarchy, RETR_TREE, CHAIN_APPROX_NONE);
        Mat result = Mat::zeros(templateEdges.size(), CV_8UC1);
        std::vector<std::vector<cv::Point>> smooth_contours;
        // 遍历所有找到的轮廓
        for (size_t i = 0; i < contours.size(); i++) {
           // if (cv::contourArea(contours[i])>500)
            {
              
                smooth_contours.push_back(contours[i]);
            }
        }
        cv::drawContours(result,smooth_contours, -1, cv::Scalar(255), 1, 8, noArray(), 2147283647);
        templateEdges = result.clone();
        cv::Mat temp;
        cv::cvtColor(templateImg,temp,cv::COLOR_GRAY2BGR);
        cv::Mat edges_color;
        cv::cvtColor(templateEdges, edges_color, cv::COLOR_GRAY2BGR);
        edges_color.setTo(cv::Scalar(0, 255, 0), templateEdges != 0);

        // 叠加绿色边缘到原图上
        cv::bitwise_or(temp, edges_color, temp);

        QImage qimg=cvMat2QImage(temp);
        qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        QString id = QString("template")+QString::number(msecs);
        templateImageCache.insert(id,qimg);
        // 发送信号，传递处理后的图片ID
        emit templateProcessed(id);
    }
}

//橡皮擦
void Backend::erase(double x,double y,double width,double height){
    //std::cout<<x<<" "<<y<<"radius:"<<radius<<std::endl;
    if(!templateEdges.empty()){
        cv::Mat mask = cv::Mat::zeros(templateEdges.size(), CV_8UC1);
        // 在掩码图像上绘制一个半径为radius的圆，将其像素值设为0
        cv::rectangle(mask, cv::Rect(static_cast<int>(x), static_cast<int>(y), static_cast<int>(width),static_cast<int>(height)), cv::Scalar(255), -1);
        templateEdges=templateEdges-mask;
        cv::Mat edges_color;
        cv::cvtColor(templateEdges, edges_color, cv::COLOR_GRAY2BGR);
        edges_color.setTo(cv::Scalar(0, 255, 0), templateEdges != 0);
        // 叠加绿色边缘到原图上
        cv::Mat temp;
        cv::cvtColor(templateImg,temp,cv::COLOR_GRAY2BGR);
        cv::bitwise_or(temp, edges_color, temp);
        QImage qimg=cvMat2QImage(temp);
        qint64 msecs = QDateTime::currentMSecsSinceEpoch();
        QString id = QString("template")+QString::number(msecs);
        templateImageCache.insert(id,qimg);
        // 发送信号，传递处理后的图片ID
        emit templateProcessed(id);
    }
}


//上一张
void Backend::previous_target_image() {
    if (imagePaths.empty())return;
    if (imagePathsIndex > 0)
    {
        imagePathsIndex--;
    }
    loadTargetImage(imagePaths[imagePathsIndex]);
}

//下一张
void Backend::next_target_image() {
    if (imagePaths.empty())return;
    if (imagePathsIndex < imagePaths.size()-1)
    {
        imagePathsIndex++;
    }
    loadTargetImage(imagePaths[imagePathsIndex]);
}
void Backend:: saveTemplateImage(const QString& path){
    std::cout << path.toStdString() << std::endl;
    if (templateModel.empty())
        return;
    QString localPath = QUrl(path).toLocalFile();
    std::cout << localPath.toStdString() << std::endl;
    cv::imwrite(localPath.toStdString(), templateModel);
}

void getRotateRectRoiImage(const cv::Mat &inputArray,cv::Mat &outputArray,const RotatedRect & roiRectangle) {
    cv::Mat rotationMatrix = cv::getRotationMatrix2D(roiRectangle.center, roiRectangle.angle, 1.0);

    cv::Point2f top_left_rotated(
        roiRectangle.center.x - roiRectangle.size.width / 2.0,
        roiRectangle.center.y - roiRectangle.size.height / 2.0
    );

    rotationMatrix.at<double>(0, 2) -= top_left_rotated.x;
    rotationMatrix.at<double>(1, 2) -= top_left_rotated.y;
    cv::Mat rotatedImage;

    // 步骤4：计算变换后的图像边界
    std::vector<cv::Point2f> src_corners(4);
    src_corners[0] = cv::Point2f(0, 0);
    src_corners[1] = cv::Point2f(inputArray.cols - 1, 0);
    src_corners[2] = cv::Point2f(inputArray.cols - 1, inputArray.rows - 1);
    src_corners[3] = cv::Point2f(0, inputArray.rows - 1);

    std::vector<cv::Point2f> dst_corners;
    cv::transform(src_corners, dst_corners, rotationMatrix);
    // 计算边界矩形
    cv::Rect bbox = cv::boundingRect(dst_corners);
    cv::warpAffine(inputArray, rotatedImage, rotationMatrix, bbox.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT);

    // 在旋转后的图像上创建一个水平矩形来截取旋转矩形区域
    cv::Rect rect(0, 0, static_cast<int>(round((roiRectangle.size.width))), static_cast<int>(round(roiRectangle.size.height)));

    //  cv::warpAffine(targetImg, rotatedImage, rotationMatrix,targetImg.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(255));
    std::cout << rotatedImage.size() << std::endl;
    // cv::imshow("123",rotatedImage);
    if (rect.x<0 || rect.y<0 || rect.width + rect.x>rotatedImage.cols || rect.y + rect.height>rotatedImage.rows)
        return;
    outputArray = rotatedImage(rect).clone();
}
//裁剪
void Backend::cropTargetImage(double x,double y, double width, double height,double angle){
    templateImageCache.clear();
    templateModel.release();
    templateEdges.release();
    templateImg.release();

    if(targetImg.empty()){
        return;
    }
    //qml坐标偏移
    cv::RotatedRect roiRectangle(cv::Point2f(x,y),cv::Size2f(width-1,height-1),angle);
    //cv::Mat temp=targetImg(roiRectangle).clone;
    //cv::imshow("231",temp);
    // 计算旋转矩阵，将图像旋转到与旋转矩形平行的位置
    

    getRotateRectRoiImage(targetImg, templateImg, roiRectangle);
    if (templateImg.empty())return;
    //Mat equalized;
    //equalizeHist(templateImg, templateImg);

    // 自适应阈值
    //Mat binary;
    //adaptiveThreshold(templateImg, templateImg, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 11, 2);

    cv::Mat img;
    cv::cvtColor(templateImg,img,cv::COLOR_GRAY2BGR);
   
    QImage qimg=cvMat2QImage(img);
    qint64 msecs = QDateTime::currentMSecsSinceEpoch();
    QString id = QString("template")+QString::number(msecs);
    templateImageCache.insert(id,qimg);
    // 发送信号，传递处理后的图片ID
    emit templateProcessed(id);


    //cv::imshow("123",croppedRotatedImage);
    // 计算逆旋转矩阵，将截取的区域旋转回0度
    //cv::Mat inverseRotationMatrix = cv::getRotationMatrix2D(rect.center, -angle, 1.0);
    //cv::Mat finalCroppedImage;
    //cv::warpAffine(croppedRotatedImage, finalCroppedImage, inverseRotationMatrix, croppedRotatedImage.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, cv::Scalar(255, 255, 255));

}
