#include "ImageProcessor.h"
#include "cvalgedgedetection.h"
#include "errordialogmanager.h"
#include "imagesavemanager.h"
#include "source/configmanager.h"
#include "source/tools.h"
#include "afterdetect.h"
#include "faultcachemodel.h"
#include "detectcachemodel.h"
#include "ngtable.h"

#include <opencv2/opencv.hpp>

cv::Rect adjustROI(cv::Rect& roi, const cv::Size& size) {
    // 如果 ROI 为空，返回全图范围
    cv::Rect adjustedROI = roi;
    if (roi.empty() || roi.width <= 0 || roi.height <= 0) {
        adjustedROI = cv::Rect(0, 0, size.width, size.height);
    }

    // 调整 ROI 确保其位于 size 范围内
    if (adjustedROI.x < 0) adjustedROI.x = 0;
    if (adjustedROI.y < 0) adjustedROI.y = 0;
    if (adjustedROI.x + adjustedROI.width > size.width)
        adjustedROI.width = size.width - adjustedROI.x;
    if (adjustedROI.y + adjustedROI.height > size.height)
        adjustedROI.height = size.height - adjustedROI.y;

    return adjustedROI;
}


std::string to_string_with_precision(double value, int precision = 1) {
    std::stringstream stream;
    stream << std::fixed << std::setprecision(precision) << value;
    return stream.str();
}


CvAlgEdgeDetection::CvAlgEdgeDetection(QObject *parent): CVAlgorithm(parent),
    setting(QString("master/config/")  + "cv.ini", QSettings::IniFormat),
    boundaryExtraction1_(ParamValue.getParameter("cvInspection", "boundaryExtraction1").toInt()),
    boundaryExtraction2_(ParamValue.getParameter("cvInspection", "boundaryExtraction2").toInt()),
    boundaryThreshold_(ParamValue.getParameter("cvInspection", "boundaryThreshold").toInt()),
    cvSelection_(EdgeDetection)
{
    m_px2mm.init(&setting);
    std::vector<std::pair<std::string, std::pair<cv::Scalar, cv::Scalar>>> colorRanges = {
        {"Yellow", {cv::Scalar(20, 100, 100), cv::Scalar(40, 255, 255)}},
        {"Red", {cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255)}},
        {"Green", {cv::Scalar(35, 100, 100), cv::Scalar(85, 255, 255)}},
        {"Blue", {cv::Scalar(100, 100, 100), cv::Scalar(140, 255, 255)}},
        {"Black", {cv::Scalar(0, 0, 0), cv::Scalar(180, 255, 50)}},
        {"White", {cv::Scalar(0, 0, 200), cv::Scalar(180, 50, 255)}}
    };


    binarayMethonName = ConfigManager::getInstance().getConfig("cvInspection", "boundaryMethods")["option"].toList();
    boundaryExtractionAuto_ = ParamValue.getParameter("cvInspection", "setboundaryExtractionAuto").toBool();
    qRegisterMetaType<CVSelectionEnum>("CVSelectionEnum");
    largeSmallUpperMax_ = ParamValue.getParameter("cvInspection", "largeSmallUpperMax").toFloat();
    largeSmallUpperMin_ = ParamValue.getParameter("cvInspection", "largeSmallUpperMin").toFloat();
    largeSmallLowerMax_ = ParamValue.getParameter("cvInspection", "largeSmallLowerMax").toFloat();
    largeSmallLowerMin_ = ParamValue.getParameter("cvInspection", "largeSmallLowerMin").toFloat();

    resthreshold_ = ParamValue.getParameter("cvInspection", "resthreshold").toFloat();

    overturn_ = ParamValue.getParameter("cvInspection", "overturn").toInt();
    edgeSmoothing_ = ParamValue.getParameter("cvInspection", "edgeSmoothing").toInt();
    LargeSmallLastTop_ = 0.0;
    LargeSmallLastBottom_ = 0.0;

    connect(&ConfigManager::getInstance() , &ConfigManager::valueTableLoad,
            this, &CvAlgEdgeDetection::valueTableLoad);
}


void CvAlgEdgeDetection::characterDefectTraining()
{

    QVector<FaultCacheModel::FaultImageCacheItem>& cacheList = FaultCacheModel::instance().getCacheList();
    qInfo() << "CvAlgEdgeDetection::characterDefectTraining" ;
    for (int i = 0; i < cacheList.size(); ++i) {
        // 获取当前元素的引用
        FaultCacheModel::FaultImageCacheItem& currentItem = cacheList[i];
        // 如果当前项尚未训练
        if (!currentItem.isTrain)
        {
            std::vector<int> vec = currentItem.ngcard.getDefects();
            // 如果找到 defect 37
            if (std::find(vec.begin(), vec.end(), 37) != vec.end())
            {
                // 设置为已训练
                currentItem.isTrain = true;
                currentProcess = i;
                // 显示处理过程
                qInfo() << "characterDefectTraining ImageRecord with id=" << currentProcess;
                FaultCacheModel::instance().displayEmChart(currentItem.id);
                // 一旦找到并处理，跳出循环
                return;
            }
        }
    }
    currentProcess = -1;
    emit characterDefectTrainingFinishSignal();
}


void CvAlgEdgeDetection::addSpecialV2base()
{
    QWriteLocker locker(&specialV2mutex);
    qInfo() << "CvAlgEdgeDetection addSpecialV2base";


    if(specialV2BaseMatGray.empty())
    {
        ErrorDialogManager::instance().showNonBlockingError("添加知识库失败" , "请先激活该功能");
        return ;
    }
    cv::Mat LastbaseImageScale;
    cv::resize(LastbaseImage , LastbaseImageScale, cv::Size(),
               isSpecialV2Scale(), isSpecialV2Scale(), cv::INTER_LINEAR);

    cv::Mat diff = getSpecialDiff(LastbaseImageScale);
    if (diff.empty()) {
        ErrorDialogManager::instance().showNonBlockingError("添加知识库失败" , "无法找到对应关系");
        return ;
    }
    if(specialV2BaseBac.empty())
        specialV2BaseBac = diff;
    else
        cv::max(specialV2BaseBac, diff, specialV2BaseBac);
}

void CvAlgEdgeDetection::resetSpecialV2base()
{
    QWriteLocker locker(&specialV2mutex);
    specialV2BaseBac = cv::Mat();
    specialV2BaseMatGray = imageRoteBase;
    specialV2BaseMat = LastbaseImage;
    specialV2BaseEdge = cv::Mat();

}

void CvAlgEdgeDetection::enbaleSpecialV2()
{
    QWriteLocker locker(&specialV2mutex);
    qInfo() << "CvAlgEdgeDetection enbaleSpecialV2";
    if(imageRoteBase.empty())
    {
        ErrorDialogManager::instance().showNonBlockingError("启动知识库失败" , "请先预览图片");
        return;
    }

    cv::Mat filtereBase;
    cv::resize(LastbaseImage, filtereBase , cv::Size(),
               isSpecialV2Scale(), isSpecialV2Scale(), cv::INTER_LINEAR);
    if(isSpecialV2median() % 2 == 0)
    {
        setisSpecialV2median(isSpecialV2median()+1);
    }
    cv::medianBlur(filtereBase, specialV2BaseMat, isSpecialV2median());

    cv::cvtColor(specialV2BaseMat, specialV2BaseMatGray, cv::COLOR_BGR2GRAY);

    setisSpecialDiagnosisV2(true);
    ErrorDialogManager::instance().showNonBlockingInfo("特征差异V2", "开启成功，请根据情况是否需要增加额外标准");
}

void CvAlgEdgeDetection::setEdegCany()
{
    qInfo() << "CvAlgEdgeDetection::setEdegCany";
    if(specialV2BaseMatGray.empty())
    {
        ErrorDialogManager::instance().showNonBlockingInfo("", "");
        return;
    }
    cv::Mat specialEdge;

    cv::Rect roi = specialRoi;
    adjustROI(roi, specialV2BaseMatGray.size());
    specialEdge = specialV2BaseMatGray(roi);

    cv::Mat specialV2BaseEdge1 =  specialGetedgeCanny(specialEdge, isSpecialV2orbCanny1(), isSpecialV2orbCanny2() );
    int kernelSize = 5;
    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(kernelSize, kernelSize));
    cv::Mat erodedImage;
    cv::dilate(specialV2BaseEdge1, specialV2BaseEdge, kernel, cv::Point(-1, -1), 1); // 1 表示迭代一次


    ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
    ImageProvider::getInstance().image2qml(specialV2BaseEdge, "Main");
}

void CvAlgEdgeDetection::setSpecialV2Roi(int x, int y, int wid, int hei)
{
    QWriteLocker locker(&specialV2mutex);
    qInfo() << "CvAlgEdgeDetection::setSpecialV2Roi cvSelection_:" ;
    if(wid < 10 || hei < 10)
    {
        LogError << "Fail to PreProcessor::setSpecialV2Roi width:" << wid << ",height:" << hei;
        ErrorDialogManager::instance().showNonBlockingError("设置差异错误", "尺寸太小");
        return;
    }
    if(!(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == SpecialDiagnosisV2))
    {
        ErrorDialogManager::instance().showNonBlockingError("设置字符强调区失败","请回到配置页面->差异V2页面");
        return;
    }

    specialRoi = cv::Rect(x, y ,wid, hei);

}

void CvAlgEdgeDetection::resetSpecialV2Roi()
{
    QWriteLocker locker(&specialV2mutex);
    qInfo() << "resetSpecialV2Roi ;" ;
    specialRoi = cv::Rect();
}

cv::Mat CvAlgEdgeDetection::specialGetedgeCanny(cv::Mat image, int thread1, int thread2)
{
    cv::Mat edge;
    cv::Canny(image, edge, thread1, thread2);
    return edge;
}





cv::Mat CvAlgEdgeDetection::getSpecialDiff(cv::Mat awaitImage)
{
    // QElapsedTimer timer;
    // timer.start();  // 启动计时器

    if(specialV2BaseMatGray.empty())
    {
        //ErrorDialogManager::instance().showNonBlockingError("添加知识库失败" , "请先激活该功能");
        qWarning() << "Fail to getSpecialDiff specialV2BaseMatGray is empty";
        return cv::Mat();
    }

    // cv::Mat H = getHomographyMatrix(awaitImage, specialV2BaseMatGray);

    // if (H.empty()) {
    //     //ErrorDialogManager::instance().showNonBlockingError("添加知识库失败" , "无法找到对应关系");
    //     qWarning() << "Fail to getSpecialDiff H is empty";
    //     return cv::Mat();
    // }

    // qint64 elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    // qDebug() << "Elapsed time:" << elapsed << "ms";

    cv::Mat aliantWait ;

    // cv::warpPerspective(awaitImage, aliantWait, H, specialV2BaseMat.size());
    cv::resize(awaitImage, aliantWait, specialV2BaseMat.size());

    std::vector<cv::Mat> await_channels, base_channels, diff_channels;
    cv::split(aliantWait, await_channels);
    cv::split(specialV2BaseMat, base_channels);

    // elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    // qDebug() << "Elapsed time4:" << elapsed << "ms";

    for (int i = 0; i < 3; ++i) {
        cv::Mat diff;
        cv::absdiff(await_channels[i], base_channels[i], diff);
        diff_channels.push_back(diff);
    }
    // cv::Mat H = getHomographyMatrix(awaitImage, specialV2BaseMatGray);

    // if (H.empty()) {
    //     //ErrorDialogManager::instance().showNonBlockingError("添加知识库失败" , "无法找到对应关系");
    //     qWarning() << "Fail to getSpecialDiff H is empty";
    //     return cv::Mat();
    // }
    // 将三个色域上的最大值合并成一个新的差异矩阵
    cv::Mat max_diff;
    cv::max(diff_channels[0], diff_channels[1], max_diff);
    cv::max(max_diff, diff_channels[2], max_diff);

    // elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    // qDebug() << "Elapsed time3 :" << elapsed << "ms";
    return max_diff;
}


void CvAlgEdgeDetection::autoDetectReset()
{
    qInfo() << "Attempting to autoDetectReset";
    ImageRecord imageCard = AfterDetect::getInstance().lastCard;
    if(get_largeSmallEnable())
    {
        float diff = imageCard.largeSmallLastTop - saveLargeSmallLastTop_;
        if (!std::isnan(diff)) { // 确保 diff 合理
            setlargeSmallUpperMax(largeSmallUpperMax() + diff);
            setlargeSmallUpperMin(largeSmallUpperMin() + diff);
            saveLargeSmallLastTop_ = imageCard.largeSmallLastTop;
        } else {
            LogDebug << "Invalid diff (NaN) for largeSmallLastTop. Skipping update.";
        }

        // 检查和更新 largeSmallLastBottom 差值
        diff = imageCard.largeSmallLastBottom - saveLargeSmallLastBottom_;
        if (!std::isnan(diff)) { // 确保 diff 合理
            setlargeSmallLowerMax(largeSmallLowerMax() + diff);
            setlargeSmallLowerMin(largeSmallLowerMin() + diff);
            saveLargeSmallLastBottom_ = imageCard.largeSmallLastBottom;
        } else {
            LogDebug << "Invalid diff (NaN) for largeSmallLastBottom. Skipping update.";
        }
    }

    if(m_lengthDetectionEnable)
    {
        // float diff = imageCard.lenthDistance - saveLengthDistance;
        // set_lengthDetectionMax(get_lengthDetectionMax() + diff);
        // set_lengthDetectionMin(get_lengthDetectionMin() + diff);
        // saveLengthDistance = imageCard.lenthDistance ;
    }
    if(diffEable())// && imgcard.isInROI)
    {
        setembedding();
    }
}

void CvAlgEdgeDetection::setLargeSmallTotal(double diifNum)
{
    LogDebug << "Attempting to set setLargeSmallTotal" << diifNum;
    if(LargeSmallLastTop_ == 0 || LargeSmallLastBottom_ == 0)
    {
        ErrorDialogManager::instance().showNonBlockingError("设置大小边阈值错误", "请先进行一次检测");
        LogWarning << "fail setLargeSmallTotal LargeSmallLastTop_ == 0 || LargeSmallLastBottom_ == 0";
        return;
    }
    setlargeSmallUpperMax(saveLargeSmallLastTop_+ diifNum);
    setlargeSmallUpperMin(saveLargeSmallLastTop_- diifNum);
    setlargeSmallLowerMax(saveLargeSmallLastBottom_+ diifNum);
    setlargeSmallLowerMin(saveLargeSmallLastBottom_- diifNum);

}

void CvAlgEdgeDetection::calibrationPx2MM()
{
    setpx2mm(px2mm() * 100.0f / lengthDistance());
    qInfo() << "set CvAlgEdgeDetection calibrationPx2MM :" << px2mm();
}

int CvAlgEdgeDetection::setembedding()
{
    if (isThreadRunning.loadRelaxed() == 1) {
        LogDebug << "Thread is already running, skipping execution.";
        return -1; // 表示未执行新线程
    }

    // 设置标志为运行中
    isThreadRunning.storeRelaxed(1);

    // 启动线程
    QtConcurrent::run([this]() {
        LogDebug << "Attempting to run CvAlgEdgeDetection::setembedding";
        resdiff.setembedding(LastbaseImage);

        // 发出信号
        emit isInitchanged();

        // 线程完成后重置标志
        isThreadRunning.storeRelaxed(0);
    });

    return 0; // 表示线程已启动
}

int CvAlgEdgeDetection::loadResDiffModel()
{
    LogDebug << "Attemping ting to CvAlgEdgeDetection::loadResDiffModel";
    resdiff.loadModel();
    setdiffEable(true);
    emit isInitchanged();
    return 0;
}

void CvAlgEdgeDetection::setTotalLengthDistance(double std , double total)
{
    LogDebug << "Attempting to setTotalLengthDistance total" << total << "std" << std;
    set_lengthStd(std);
    set_lengthRance(total);
    set_lengthDetectionMax(std + total);
    set_lengthDetectionMin(std - total);
}

void CvAlgEdgeDetection::setTotaltiltAngle(float total)
{
    LogDebug << "Attempting to setTotaltiltAngle total" << total ;
    set_tiltAngleMax(saveTiltAngle_ + total);
    set_tiltAngleMin(saveTiltAngle_ - total);
}

void CvAlgEdgeDetection::addEmphasize(int x, int y, int wid, int hei)
{
    LogDebug << "CvAlgEdgeDetection::addEmphasize cvSelection_:" << cvSelection_;
    if(wid < 10 || hei < 10)
    {
        LogError << "Fail to PreProcessor::addEmphasize width:" << wid << ",height:" << hei;
        ErrorDialogManager::instance().showNonBlockingError("设置字符错误", "尺寸太小");
        return;
    }
    QMutexLocker locker(&emphasmutex);
    if(!(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == characterEmphasis))
    {
        ErrorDialogManager::instance().showNonBlockingError("设置字符强调区失败","请回到配置页面->字符强调页面");
        return;
    }
    QVariantList emphasizeArea = {};
    emphasizeArea  << x << y << wid << hei;
    QVariantList tempList = m_emphasizeAreaList;
    tempList.append(QVariant::fromValue(emphasizeArea));
    qDebug() <<" CvAlgEdgeDetection::addEmphasize :" <<
        QVariant::fromValue(tempList);;

    if(imageRoteBase.empty())
    {
        ErrorDialogManager::instance().showNonBlockingError("设置字符强调区失败","请先点击预览或者单次");
        return;
    }
    // 使用 cv::Rect 定义 ROI
    cv::Rect roi(x, y, wid, hei);

    emphasizeImgaeList.append(imageRoteBase(roi).clone());
    QVariantList tempBianryList = emphasizeBinaryList();
    tempBianryList.append(70);
    setemphasizeBinaryList(tempBianryList);

    QVariantList tempservoList = emphasizeBinaryliserveList();
    tempservoList.append(get_emphasizeBinaryliserve());
    setemphasizeBinaryliserveList(tempservoList);



    setemphasizeAreaList(tempList);

}

void CvAlgEdgeDetection::delEmphasize(int index)
{
    qDebug() <<" CvAlgEdgeDetection::delEmphasize index: " << index;
    QMutexLocker locker(&emphasmutex);
    if(emphasizeImgaeList.isEmpty() || index < 0 || index > emphasizeImgaeList.size()-1)
    {
        ErrorDialogManager::instance().showNonBlockingError("删除强调区错误","索引错误");
        return ;
    }
    QVariantList tempList = m_emphasizeAreaList;
    if(tempList.size() != emphasizeImgaeList.size())
    {
        ErrorDialogManager::instance().showNonBlockingError("强调区错误","清空所有的区域");
        emphasizeImgaeList.clear();
        tempList = m_emphasizeAreaList;
        tempList.clear();
        setemphasizeAreaList(tempList);
        qDebug() <<tempList.size() << "!=" << emphasizeImgaeList.size() <<
            " CvAlgEdgeDetection::delEmphasize clear All m_emphasizeAreaList: " ;
        return ;

    }
    tempList = m_emphasizeAreaList;
    tempList.removeAt(index);
    setemphasizeAreaList(tempList);
    emphasizeImgaeList.removeAt(index);
    emphasizeImgaeListLast.removeAt(index);
    emPhasizeBinaryList.removeAt(index);
    QVariantList tempBianryList = emphasizeBinaryList();
    tempBianryList.removeAt(index);
    setemphasizeBinaryList(tempBianryList);

    QVariantList tempservoList = emphasizeBinaryliserveList();
    tempservoList.removeAt(index);
    setemphasizeBinaryliserveList(tempservoList);


    qDebug() <<" CvAlgEdgeDetection::delEmphasize m_emphasizeAreaList: " << m_emphasizeAreaList;
}

void CvAlgEdgeDetection::setLengthROI(int x, int y, int wid, int hei)
{
    if(wid < 10 || hei < 10)
    {
        LogError << "Fail to PreProcessor::setLengthROI width:" << wid << ",height:" << hei;
        ErrorDialogManager::instance().showNonBlockingError("设置长度ROI错误", "尺寸太小");
        return;
    }
    QMutexLocker locker(&lenthmutex);
    if(!(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == LengthDetection))
    {
        ErrorDialogManager::instance().showNonBlockingError("设置字符强调区失败","请回到配置页面->长度检测页面");
        return;
    }
    QVariantList lenthROI({x, y, wid, hei});
    qInfo() << "setLengthROI : " << lenthROI;
    set_LengthROI(lenthROI);
}

void CvAlgEdgeDetection::resetLengthROI()
{
    QMutexLocker locker(&lenthmutex);
    QVariantList lenthROI;
    qInfo() << "resetLengthROI " ;
    set_LengthROI(lenthROI);
    //updatelogoSizeEnable(false);
}

void CvAlgEdgeDetection::setlogoSizeROI(int x, int y, int wid, int hei)
{
    LogDebug << "CvAlgEdgeDetection::setlogoSizeROI cvSelection_:" << cvSelection_;
    if(wid < 10 || hei < 10)
    {
        LogError << "Fail to PreProcessor::addEmphasize width:" << wid << ",height:" << hei;
        ErrorDialogManager::instance().showNonBlockingError("设置码数标错误", "尺寸太小");
        return;
    }
    QMutexLocker locker(&logoSizeMutex);
    if(!(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == LogoSize))
    {
        ErrorDialogManager::instance().showNonBlockingError("设置分码区域失败","请回到配置页面->分码页面");
        return;
    }

    if(imageRoteBase.empty())
    {
        ErrorDialogManager::instance().showNonBlockingError("设置码数标错误","请先点击预览或者单次");
        return;
    }


    logoSizeBase.clear();
    logoSizeBase.setRoi(x , y,  wid, hei);
    qDebug() << "imageRoteBase : wid:" << imageRoteBase.size().width << "hei :" << imageRoteBase.size().height;
    qDebug() <<" CvAlgEdgeDetection::setlogoSizeROI :" << x << y << wid << hei;
    logoSizeBase.setImage(imageRoteBase(logoSizeBase.getRoi()));
    cv::Rect box;
    logoSizeBinaryBase = extractLogoSizeFeatures(imageRoteBase , logoSizeBase , box).clone();
    updatelogoSizeEnable(true);
}

void CvAlgEdgeDetection::restlogoSize()
{
    QMutexLocker locker(&logoSizeMutex);
    logoSizeBase.clear();
    updatelogoSizeEnable(false);

}

void CvAlgEdgeDetection::testlogoSize()
{
    if(imageRoteBase.empty() || logoSizeBase.isEmpty())
    {
        qWarning() << "Fail CvAlgEdgeDetection::testlogoSize() imageRoteBase.empty()";
        return;
    }
    cv::Rect logoSizeBox;
    logoSizeBinaryBase = extractLogoSizeFeatures(imageRoteBase , logoSizeBase , logoSizeBox , true).clone();
}

CvAlgEdgeDetection &CvAlgEdgeDetection::getInstance()
{
    static CvAlgEdgeDetection _instance;
    return _instance;
}

void CvAlgEdgeDetection::initialize()
{

}

void CvAlgEdgeDetection::counttiltAngle(const cv::Mat &daryimg)
{
    if(m_tiltAngleEnable)
    {
        currenttiltAngle_ = fitAngleLimited(daryimg);
        LogTrack << "CvAlgEdgeDetection::counttiltAngle:" << currenttiltAngle_;
    }
}

cv::Mat CvAlgEdgeDetection::cropBoundaryAndAdjustROI(const cv::Mat& image, cv::Rect roi)
{
    if (roi.x < 0 || roi.y < 0 || roi.x + roi.width > image.cols || roi.y + roi.height > image.rows) {
        std::cerr << "Invalid ROI: ROI is out of image bounds." << std::endl;
        return cv::Mat();
    }

    // Step 2: Adjust ROI height and y-coordinate
    roi.y = 0;
    roi.height = image.rows;

    // Verify adjusted ROI
    if (roi.x + roi.width > image.cols || roi.height > image.rows) {
        std::cerr << "Adjusted ROI is out of image bounds." << std::endl;
        return cv::Mat();
    }

    // Step 3: Crop the image
    cv::Mat croppedImage = image(roi);
    return croppedImage;

}
cv::Mat CvAlgEdgeDetection::edgeDetectionAndBinarize(const cv::Mat& image, int lowThreshold, int highThreshold) {
    cv::Mat  edgeImage;


    LogDebug << "Attemting CvAlgEdgeDetection::edgeDetectionAndBinarize : " << lowThreshold << "Highe: "<< highThreshold;
    cv::Canny(image, edgeImage, lowThreshold, highThreshold);
    return edgeImage;
}

cv::Mat CvAlgEdgeDetection::binarizeImage(const cv::Mat& image,int manualThreshold ,bool isAuto) {

    LogTrack << "Attemting CvAlgEdgeDetection::binarizeImage" << manualThreshold;
    cv::Mat binaryImage;
    // Convert the image to grayscale
    // if (image.channels() == 3) {
    //     cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY);
    // } else {
    //     grayImage = image.clone();
    // }

    if (isAuto) {
        // Automatic thresholding using Otsu's method
        int boundaryExtraction = cv::threshold(image, binaryImage, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
        if (boundaryExtraction1_ != boundaryExtraction) {
            boundaryExtraction1_ = boundaryExtraction ;
            emit boundaryExtraction1Change();
        }
    } else  {
        // Manual thresholding
        cv::threshold(image, binaryImage, manualThreshold, 255, cv::THRESH_BINARY);
    }

    return binaryImage;
}

cv::Mat CvAlgEdgeDetection::reverseImageIfNeeded(const cv::Mat& inputImage) {
    // 确保输入图像是二值化图像


    // 获取图像的行数和列数
    int rows = inputImage.rows;
    int cols = inputImage.cols;

    // 计算最上一行和最下一行的255像素的数量
    int topRowCount = cv::countNonZero(inputImage.row(0));
    int bottomRowCount = cv::countNonZero(inputImage.row(rows - 1));

    // 计算0像素的数量
    int topRowZeroCount = cols - topRowCount;
    int bottomRowZeroCount = cols - bottomRowCount;

    // 判断是否需要反转图像ConfigManager::getInstance().getConfig()
    if (topRowCount > topRowZeroCount && bottomRowCount > bottomRowZeroCount) {
        cv::Mat reversedImage;
        cv::bitwise_not(inputImage, reversedImage);
        return reversedImage;
    }

    // 如果不需要反转，则返回原图
    return inputImage;
}

void CvAlgEdgeDetection::detectEdgeDefects(const cv::Mat& image,
                                           int maxDeviation,
                                           std::vector<cv::Point> &defects,
                                           std::vector<cv::Point> &topPoints,
                                           std::vector<cv::Point> &bottomPoints,
                                           cv::Vec4f& topFitLine,
                                           cv::Vec4f & bottomFitLine,
                                           double &topdiff,double &bottomdiff) {
    cv::Mat grayImage;

    // 转换为灰度图
    // if (image.channels() == 3) {
    //     cv::cvtColor(image, grayImage, cv::COLOR_BGR2GRAY);
    // } else {
    //     grayImage = image.clone();
    // }

    // // 二值化图像（假设输入已经是二值化图像，可以跳过此步骤）
    // cv::threshold(grayImage, binaryImage, 128, 255, cv::THRESH_BINARY_INV);

    // 从顶部和底部检测边缘
    std::vector<int> topEdges(image.cols, -1);
    std::vector<int> bottomEdges(image.cols, -1);

    int maxTopY = -1, minTopY = image.rows;
    int maxBottomY = -1, minBottomY = image.rows;
    cv::Point maxTopPoint,minTopPoint,maxBottomPoint,minBottomPoint;
    // 找到顶部边缘binarayMethonName
    topPoints.clear();
    bottomPoints.clear();
    for (int x = 0; x < image.cols; ++x) {
        for (int y = 0; y < image.rows; ++y) {
            if (image.at<uchar>(y, x) == 255) {
                topPoints.push_back(cv::Point(x, y));
                if (y > maxTopY) {
                    maxTopY = y;
                    maxTopPoint = cv::Point(x, y);

                }
                if (y < minTopY) {
                    minTopY = y;
                    minTopPoint = cv::Point(x, y);
                }
                break;
            }
        }
        for (int y = image.rows - 1; y >= 0; --y) {
            if (image.at<uchar>(y, x) == 255) {
                bottomPoints.push_back(cv::Point(x, y));
                if (y > maxBottomY) {
                    maxBottomY = y;
                    maxBottomPoint = cv::Point(x, y);
                }
                if (y < minBottomY) {
                    minBottomY = y;
                    maxBottomPoint = cv::Point(x, y);
                }
                break;
            }
        }
    }

    if (!topPoints.empty()) {
        cv::fitLine(topPoints, topFitLine, cv::DIST_L2, 0, 0.01, 0.01);
    }
    if (!bottomPoints.empty()) {
        cv::fitLine(bottomPoints, bottomFitLine, cv::DIST_L2, 0, 0.01, 0.01);
    }

    //计算残差
    topdiff = calculateStraightness(topPoints, topFitLine,maxTopPoint,minTopPoint);
    bottomdiff = calculateStraightness(bottomPoints, bottomFitLine,maxBottomPoint,minBottomPoint);
    //LogDebug << "[" << topdiff<< "][" << bottomdiff <<"]";

    defects.clear();
    // topdiff = std::abs(maxTopY - minTopY);
    // bottomdiff = std::abs(maxBottomY - minBottomY);
    if(bottomdiff > maxDeviation)
    {
        defects.push_back(maxBottomPoint);
        defects.push_back(minBottomPoint);

    }
    if(topdiff > maxDeviation)
    {
        defects.push_back(maxTopPoint);
        defects.push_back(minTopPoint);
    }
    LogTrack << "CvAlgEdgeDetection::detectEdgeDefects " << topPoints.size();
}

cv::Mat CvAlgEdgeDetection::emphasePreprocess(cv::Mat image , cv::Mat imageBase , bool isShowMain)
{

    if( emphasizeIndex()  < 0  || emphasizeIndex() >= emphasizeAreaList().size())
    {
        qWarning() <<  " emphasizeIndex() : " << emphasizeIndex()  << " emphasizeAreaList().size():" << emphasizeAreaList().size();
        return cv::Mat();
    }
    cv::Mat alignedBinary ,  emphaseBaseBinary;
    std::vector<std::vector<cv::Point>> contours;
    cv::equalizeHist(image , image);
    cv::threshold(image, alignedBinary, emphasizeBinaryList()[emphasizeIndex()].toInt(), 255, cv::THRESH_BINARY);
    if(emphasizeBinaryliserveList()[emphasizeIndex()].toBool())
        cv::bitwise_not(alignedBinary, alignedBinary);
    cv::findContours(alignedBinary, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    for (size_t i = 0; i < contours.size(); ++i) {
        if (cv::contourArea(contours[i]) < m_emphasizeBinaryfilter) {
            cv::drawContours(alignedBinary, contours, static_cast<int>(i), cv::Scalar(0), cv::FILLED);
        }
    }

    contours.clear();
    cv::equalizeHist(imageBase , imageBase);
    cv::threshold(imageBase, emphaseBaseBinary, emphasizeBinaryList()[emphasizeIndex()].toInt(), 255, cv::THRESH_BINARY);
    if(emphasizeBinaryliserveList()[emphasizeIndex()].toBool())
        cv::bitwise_not(emphaseBaseBinary, emphaseBaseBinary);
    cv::findContours(emphaseBaseBinary, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    for (size_t i = 0; i < contours.size(); ++i) {
        if (cv::contourArea(contours[i]) <  m_emphasizeBinaryfilter) {
            cv::drawContours(emphaseBaseBinary, contours, static_cast<int>(i), cv::Scalar(0), cv::FILLED);
        }
    }
    int kernelInt = m_emphasizeBinaryKernel;
    if(kernelInt%2 == 0)
    {
        kernelInt += 1 ;
    }
    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(kernelInt, kernelInt)); // 可以根据需要调整大小

    cv::Mat result , dilatedImage;
    cv::dilate(alignedBinary, dilatedImage, kernel);

    if (emphaseBaseBinary.size() != dilatedImage.size() ||
        emphaseBaseBinary.type() != dilatedImage.type()) {
        // 使用 qWarning 输出图像的尺寸和类型
        qWarning() << "Size or type mismatch detected:";
        qWarning() << "emphaseBaseBinary - Size:" << emphaseBaseBinary.size().width << "x"
                   << emphaseBaseBinary.size().height << ", Type:" << emphaseBaseBinary.type();
        qWarning() << "dilatedImage - Size:" << dilatedImage.size().width << "x"
                   << dilatedImage.size().height << ", Type:" << dilatedImage.type();
        return cv::Mat();
    }
    cv::subtract(emphaseBaseBinary, dilatedImage , result);

    if(isShowMain  && ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == characterEmphasis)
    {
        ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
        ImageProvider::getInstance().image2qml(result,"Box2");
        ImageProvider::getInstance().image2qml(image,"Box1");
        ImageProvider::getInstance().image2qml(alignedBinary, "Main");
    }
    return result;
}

bool CvAlgEdgeDetection::process(ImageRecord &imgcard)
{

    bool isDrawOutput = (ImageProcessor::getInstance().selectedStep() == ImageProcessor::MeauCv);
    QString outtext;
    outtext = "数理分析:\n";
    cv::Mat imgCapGray= imgcard.getCaptureImageGray().clone();
    cv::Mat imgCap_ = imgcard.getStepImage(Capture).clone();
    cv::Mat imgDraw = imgcard.getdrawPreoProcessImage();
    cv::Mat imgAi = imgcard.getStepImage(Detect);
    cv::Rect2d monitorRect = imgcard.getMonitorRoi();
    cv::Mat imgCapDraw = cropBoundaryAndAdjustROI(imgCap_, monitorRect);
    cv::Mat imgPreProcess_ = imgcard.getStepImage(Reprogress).clone();


    if (imgCapGray.empty() || imgDraw.empty()) {
        outtext += "没有启动[未检测到灰度图]\n";
        ImageProcessor::getInstance().setdetectDetail(outtext);
        LogError << "CvAlgEdgeDetection imgCapGray ||imgDrawGray   image is empty";
        imgcard.setStepImage(StepProcess::Cv, imgDraw);
        //imgcard.setState(ImageRecord::Fail);
        return false;
    }
    imgcard.setStepImage(StepProcess::Cv, imgDraw);

    if(isDrawOutput)
    {
        imgCapcache = imgCapGray.clone();
        monitorRectCache = monitorRect;
    }
    ParamValue.getParameter("cvInspection", "boundaryMethods").toInt();

    outtext += QString("图像二值化算法[%1]\n")
                   .arg(binarayMethonName[ParamValue.getParameter("cvInspection", "boundaryMethods").toInt()].toString());
    cv::Mat boundaryBinaryImg = preprocessImage(imgCapGray, monitorRect);

    // 定义缺陷点、边缘点和拟合线的容器
    std::vector<cv::Point> defects, topPoints, bottomPoints;
    cv::Vec4f topFitLine, bottomFitLine;
    double topdiff,bottomdiff;
    int maxDeviation = ParamValue.getParameter("cvInspection","boundaryThreshold").toInt();
    CvAlgEdgeDetection::detectEdgeDefects(boundaryBinaryImg, maxDeviation, defects,
                                          topPoints, bottomPoints, topFitLine,
                                          bottomFitLine,topdiff,bottomdiff);
    outtext += QString("边缘平展度 上：%1,下：%2 [%3]\n")
                   .arg(int(topdiff))
                   .arg(int(bottomdiff))
                   .arg(boundaryThreshold_);

    LogTrack << "CvAlgEdgeDetection defects num: " << defects.size();


    for (const auto& point : defects) {
        if (isDrawOutput && cvSelection_ == EdgeDetection) {
            cv::circle(imgCapDraw, point, 20, cv::Scalar(0, 0, 255), 2);
        } else {
            cv::Point point_Temp(point);
            point_Temp.y = point.y - monitorRect.y;
            cv::circle(imgCapDraw, point_Temp, 20, cv::Scalar(0, 0, 255), 2);
        }
    }
    if (isDrawOutput) {
        if(topdiff > bottomdiff)
            changecurentBoundaryValue(topdiff);
        else
            changecurentBoundaryValue(bottomdiff);

        if(cvSelection_ == EdgeDetection)
            drawDefectsAndEdges(imgCapDraw, defects, topPoints, bottomPoints, topFitLine,
                                bottomFitLine, isDrawOutput, monitorRect);
        if(cvSelection_ == LargeAsmallDetection)
            drawDefectsAndEdges(imgDraw, defects, topPoints, bottomPoints, topFitLine,
                                bottomFitLine, isDrawOutput, monitorRect);
    }
    else
    {
        drawDefectsAndEdges(imgAi, defects, topPoints, bottomPoints, topFitLine,
                            bottomFitLine, isDrawOutput, monitorRect);
    }


    if(get_edgeEnable() && (topdiff > boundaryThreshold_ || bottomdiff > boundaryThreshold_))
    {
        imgcard.setDefect(29);
    }


    if(isDrawOutput && cvSelection_ == EdgeDetection)
    {

        ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
        ImageProcessor::getInstance().setdetectDetail(outtext);
        LogTrack << "CvAlgEdgeDetection draw" << boundaryBinaryImg.size;
        LogTrack << "CvAlgEdgeDetection imgCapDraw " << imgCapDraw.size;
        ImageProvider::getInstance().image2qml(boundaryBinaryImg,"Box1");
        //ImageProvider::getInstance().image2qml(img_src,"Box2");
        ImageProvider::getInstance().image2qml(imgCapDraw, "Main");
        imgcard.setIsSkipDisplay(true);
        return true;
    }


    if(isDrawOutput)
        //processROI(imgDraw, imgcard, topFitLine, bottomFitLine, monitorRect);
        processROI(imgDraw, imgcard, topFitLine, bottomFitLine, monitorRect ,
                   largeSmallUpperMax_ - LargeSmallLastTop_,
                   largeSmallUpperMin_ - LargeSmallLastTop_ ,
                   largeSmallLowerMax_ - LargeSmallLastBottom_,
                   largeSmallLowerMin_ - LargeSmallLastBottom_ , true);
    else
        processROI(imgAi, imgcard, topFitLine, bottomFitLine, monitorRect ,
                   largeSmallUpperMax_ - LargeSmallLastTop_,
                   largeSmallUpperMin_ - LargeSmallLastTop_ ,
                   largeSmallLowerMax_ - LargeSmallLastBottom_,
                   largeSmallLowerMin_ - LargeSmallLastBottom_);
    float totalLargeSmall  = LargeSmallLastTop_ + LargeSmallLastBottom_;
    LargeSmallLastTop_ = (LargeSmallLastTop_ / totalLargeSmall)*100;
    LargeSmallLastBottom_ = (LargeSmallLastBottom_ / totalLargeSmall)*100;
    outtext += QString("大小边 -上：%1,[阈值%2~%3]\n")
                   .arg(LargeSmallLastTop_)
                   .arg(largeSmallUpperMin_)
                   .arg(largeSmallUpperMax_);
    outtext += QString("大小边 -下：%1,[阈值%2~%3]\n")
                   .arg(LargeSmallLastBottom_)
                   .arg(largeSmallLowerMin_)
                   .arg(largeSmallLowerMax_);
    if(get_largeSmallEnable())
    {
        if(LargeSmallLastTop_ > largeSmallUpperMax_)
            imgcard.setDefect(30);
        if(LargeSmallLastTop_ < largeSmallUpperMin_)
            imgcard.setDefect(31);
        if(LargeSmallLastBottom_ > largeSmallLowerMax_)
            imgcard.setDefect(32);
        if(LargeSmallLastBottom_ < largeSmallLowerMin_)
            imgcard.setDefect(33);
    }
    imgcard.largeSmallLastTop = LargeSmallLastTop_;
    imgcard.largeSmallLastBottom = LargeSmallLastBottom_;
    if(saveLargeSmallLastTop_ == 0)
        saveLargeSmallLastTop_ = LargeSmallLastTop_;
    if(saveLargeSmallLastBottom_ == 0)
        saveLargeSmallLastBottom_ = LargeSmallLastBottom_;

    if (isDrawOutput && cvSelection_ == LargeAsmallDetection) {
        saveLargeSmallLastTop_ = LargeSmallLastTop_;
        saveLargeSmallLastBottom_ = LargeSmallLastBottom_;
        ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
        ImageProcessor::getInstance().setdetectDetail(outtext);
        LogTrack << "CvAlgEdgeDetection draw " << boundaryBinaryImg.size;
        ImageProvider::getInstance().image2qml(imgDraw, "Main");
        imgcard.setIsSkipDisplay(true);
        return true;
    }

    //旋转识别图像
    cv::Mat imageRote = rotateImageByFitLine(imgPreProcess_,topFitLine);
    imgcard.imageRote = imageRote.clone();
    cv::Mat imageRoteDraw  = imageRote.clone();
    cv::Mat imgPrecessRolray;
    LastbaseImage = imageRote.clone();
    cv::cvtColor(imageRote, imgPrecessRolray, cv::COLOR_BGR2GRAY);
    imageRoteBase = imgPrecessRolray;
    //字符强调
    {
        QMutexLocker locker(&emphasmutex);
        int emphaseDefect = -1;
        cv::Mat emphaseShowImage ;
        cv::Mat emphaseShowImage2 ;
        cv::Mat emphaseImage;
        int emphasizeMax = 0;

        QVector<cv::Mat> imageList;
        QVector<QPoint> imageDrawROI;
        QVector<cv::Mat> binaryList;
        binaryList.clear();
        if(m_emphasizeEnable &&  emphasizeImgaeList.size() )
        {
            for(int i=0 ;i < emphasizeImgaeList.size();i++)
            {
                QVariantList roiList = m_emphasizeAreaList[i].toList();
                cv::Rect roi(roiList[0].toInt(), roiList[1].toInt(),
                             roiList[2].toInt(), roiList[3].toInt());
                //cv::Mat  emphaseImage = imgPrecessRolray(roi);

                cv::Mat  emphaseBase = emphasizeImgaeList[i];

                //搜索最接近框
                int searchRange = get_alignmentRange();
                cv::Mat result , preresult;
                cv::Rect searchArea(std::max(roi.x - searchRange, 0),
                                    std::max(roi.y - searchRange, 0),
                                    std::min(roi.width + 2 * searchRange, imgPrecessRolray.cols - roi.x),
                                    std::min(roi.height + 2 * searchRange, imgPrecessRolray.rows - roi.y));
                cv::matchTemplate(imgPrecessRolray(searchArea), emphaseBase, result, cv::TM_SQDIFF);

                double minVal, maxVal;
                cv::Point minLoc, maxLoc;
                cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
                // 计算最佳ROI
                cv::Rect bestRoi(searchArea.x + minLoc.x, searchArea.y + minLoc.y, roi.width, roi.height);
                // 使用最佳roi进行后续处理
                emphaseImage = imgPrecessRolray(bestRoi);
                //差异检测
                if(isDrawOutput)
                {
                    if (i >= 0) {
                        if (i < emphaseImageLast.size()) {
                            // 索引 i 已存在，进行修改
                            emphaseImageLast[i] = emphaseImage.clone();
                        } else {
                            // 索引 i 不存在，扩展 QVector 并添加新元素
                            emphaseImageLast.resize(i + 1); // 扩展 QVector 大小
                            emphaseImageLast[i] = emphaseImage.clone(); // 新建并添加
                        }
                    } else {
                        qWarning() << "Invalid index:" << i;
                    }
                }
                // 第一步：使用ECC（增强相关系数）方法进行图像配准
                cv::Mat alignedImage;
                if( get_perspectiveAlignment())
                {
                    cv::Mat warp_matrix = cv::Mat::eye(2, 3, CV_32F); // 仿射变换矩阵
                    try {
                        cv::findTransformECC(emphaseBase, emphaseImage, warp_matrix, cv::MOTION_AFFINE);
                        // 使用计算出的变换矩阵对图像进行仿射变换
                        cv::warpAffine(emphaseImage, alignedImage, warp_matrix, emphaseBase.size(), cv::INTER_LINEAR + cv::WARP_INVERSE_MAP);
                    }catch (const cv::Exception& e){
                        alignedImage = emphaseImage;
                    }
                }else
                {
                    alignedImage = emphaseImage;
                }
                imageDrawROI.append(QPoint(searchArea.x + minLoc.x , searchArea.y + minLoc.y));
                imageList.append(alignedImage.clone());

                if(m_emphasizeMode == 0)
                {
                    // 第二步：在配准后计算绝对差异
                    cv::Mat diff;
                    cv::absdiff(alignedImage, emphaseBase, diff);

                    int borderSize = 5; // 定义边框大小
                    cv::Rect innerRoi(borderSize, borderSize, diff.cols - 2 * borderSize, diff.rows - 2 * borderSize);
                    cv::Mat diffCropped = diff(innerRoi);

                    // 创建一个与原始图像相同大小的零矩阵
                    cv::Mat diffPadded = cv::Mat::zeros(diff.size(), diff.type());

                    int xOffset = (diffPadded.cols - diffCropped.cols) / 2;
                    int yOffset = (diffPadded.rows - diffCropped.rows) / 2;

                    // 将裁剪后的图像放置在中心位置
                    diffCropped.copyTo(diffPadded(cv::Rect(xOffset, yOffset, diffCropped.cols, diffCropped.rows)));

                    cv::Mat gradMag ,  preresult;
                    if(get_gradientVariation())
                    {
                        // 第四步：应用阈值以获取二值图像（根据需要调整阈值）
                        cv::Mat gradX, gradY;
                        cv::Sobel(diffCropped, gradX, CV_32F, 1, 0, 3); // X方向梯度
                        cv::Sobel(diffCropped, gradY, CV_32F, 0, 1, 3); // Y方向梯度
                        // 计算梯度幅值

                        cv::magnitude(gradX, gradY, gradMag);
                        gradMag.convertTo(gradMag, CV_8U);
                    }else
                    {
                        gradMag = diffCropped;
                    }
                    //cv::normalize(gradMag, emphaseShowImage2, 0, 255, cv::NORM_MINMAX, CV_8U);
                    emphaseShowImage2 = gradMag;

                    // 根据梯度幅值进行阈值操作（可选，根据需要设置阈值）
                    cv::Mat thresholded;
                    cv::threshold(gradMag, thresholded, m_emphasizeRecthreshold, 255, cv::THRESH_BINARY);


                    // cv::threshold(diffCropped, thresholded, m_emphasizeRecthreshold, 255, cv::THRESH_BINARY);

                    // // 第五步：进行形态学操作（开运算以去除噪声）
                    // cv::Mat morph;
                    // cv::morphologyEx(thresholded, morph, cv::MORPH_OPEN, cv::Mat::ones(m_emphasizeOpen, m_emphasizeOpen, CV_8U));

                    // 第六步：进行腐蚀操作以进一步精炼感兴趣区域

                    cv::erode(thresholded, preresult, cv::Mat::ones(m_emphasizeErode, m_emphasizeErode, CV_8U));
                }else if(m_emphasizeMode == 1)
                {    // alignedImage, emphaseBase
                    cv::Mat alignedBinary ,  emphaseBaseBinary;
                    if(emphasizeAreaList().size() !=  emphasizeImgaeList.size() )
                    {
                        qWarning() << "cv process :  emphasizeAreaList().size():" << emphasizeAreaList().size()
                        << " emphasizeImgaeList.size() : "<<  emphasizeImgaeList.size();
                    }
                    if(emPhasizeBinaryList.size() != emphasizeImgaeList.size())
                    {
                        qWarning() << "emPhasizeBinaryList.size() != emphasizeImgaeList.size() retyr set1";
                        resetEmPhasizeBinaryList();
                    }
                    emphaseBaseBinary = emPhasizeBinaryList[i];
                    std::vector<std::vector<cv::Point>> contours;
                    cv::equalizeHist(alignedImage , alignedImage);
                    cv::threshold(alignedImage, alignedBinary, emphasizeBinaryList()[i].toInt(), 255, cv::THRESH_BINARY);
                    if(emphasizeBinaryliserveList()[i].toBool())
                        cv::bitwise_not(alignedBinary, alignedBinary);

                    cv::findContours(alignedBinary, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
                    for (size_t j = 0; j < contours.size(); ++j) {
                        if (cv::contourArea(contours[j]) <  m_emphasizeBinaryfilter) {
                            cv::drawContours(alignedBinary, contours, static_cast<int>(j), cv::Scalar(0), cv::FILLED);
                        }
                    }
                    int kernelInt = m_emphasizeBinaryKernel;
                    if(kernelInt%2 == 0)
                    {
                        kernelInt += 1 ;
                    }
                    cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(kernelInt, kernelInt)); // 可以根据需要调整大小

                    cv::Mat result , dilatedImage;
                    cv::dilate(alignedBinary, dilatedImage, kernel);
                    cv::subtract(emphaseBaseBinary, dilatedImage , preresult);
                    if(i == emphasizeIndex())
                    {
                        emphaseShowImage2 = emphaseBaseBinary; //显示数据
                        emphaseShowImage = dilatedImage; //显示数据
                    }
                    //preresult = emphasePreprocess(alignedImage , emphaseBase , false);
                }


                int closeKernel = 3;
                cv::Mat combined;
                if(get_hvdirections())
                {
                    if(roi.width < roi.height)
                        closeKernel = roi.width*0.4;
                    else
                        closeKernel = roi.height*0.4;
                    if(closeKernel%2==0)
                        closeKernel += 1;
                    cv::Mat horizontalKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(closeKernel, 1));
                    // 对图像进行闭运算，连接水平方向的断点
                    cv::Mat closedX;
                    cv::morphologyEx(preresult, closedX, cv::MORPH_CLOSE, horizontalKernel);

                    cv::Mat verticalKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(1, closeKernel)); // m_verticalSize 是核的高度

                    // 对图像进行闭运算，连接垂直方向的断点
                    cv::Mat closedY;
                    cv::morphologyEx(preresult, closedY, cv::MORPH_CLOSE, verticalKernel);
                    cv::bitwise_or(closedX, closedY, combined);
                }else{
                    combined = preresult;
                }
                //emphaseShowImage = combined;

                // 第七步：查找不同区域的轮廓
                std::vector<std::vector<cv::Point>> contours;
                cv::findContours(combined, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);



                // Draw the contours on the original image
                for (const auto& contour : contours) {
                    double area = cv::contourArea(contour);
                    if(emphasizeMax < area)
                    {
                        emphasizeMax = area;
                    }

                    if (area >= m_emphasizethreshold) {  // 3x3 实心区域的最小面积是 9 像素
                        std::vector<cv::Point> offsetContour;
                        for (const auto& point : contour) {
                            offsetContour.push_back(cv::Point(searchArea.x + minLoc.x + point.x ,searchArea.y + minLoc.y + point.y));
                        }
                        if(isDrawOutput)
                        {
                            cv::drawContours(imageRoteDraw, std::vector<std::vector<cv::Point>>{offsetContour}, -1, cv::Scalar(255, 0, 255), 4);
                        }else
                        {
                            cv::drawContours(imgAi, std::vector<std::vector<cv::Point>>{offsetContour}, -1, cv::Scalar(255, 0, 255), 4);
                        }
                        emphaseDefect = i;
                    }
                }
                if(!isDrawOutput)
                {
                    if(emphaseDefect != -1 )
                    {
                        ImageSaveManager::getInstance().saveImage(emphaseImage , i , 1.0);
                    }else
                    {
                        ImageSaveManager::getInstance().saveImage(emphaseImage , i );
                    }
                }
            }
        }else
        {
            outtext +=  QString("未启用强调检测\n");
        }
        if(emphaseDefect!=-1)
        {
            imgcard.setDefect(37);
            outtext +=  QString("发现字符缺块\n");

        }
        imgcard.emphasizeCurentList = imageList;
        imgcard.emphasizeCurentPointList = imageDrawROI;
        setemphasizeMaxarea(emphasizeMax);

        if(isDrawOutput && cvSelection_ == characterEmphasis)
        {
            emphasizeImgaeListLast = imageList;
            for (const auto &var : m_emphasizeAreaList) {
                QVariantList item = var.toList();
                if (item.size() != 4) continue;
                int x = item[0].toInt();
                int y = item[1].toInt();
                int width = item[2].toInt();
                int height = item[3].toInt();
                drawDashedRectangle(imageRoteDraw, x, y, width, height,1);
            }
            ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
            ImageProcessor::getInstance().setdetectDetail(outtext);
            //if(emphaseDefect>-1 && emphaseDefect < emphasizeImgaeList.size())
            ImageProvider::getInstance().image2qml(emphaseShowImage2,"Box1");
            ImageProvider::getInstance().image2qml(emphaseShowImage,"Box2");
            ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
            imgcard.setIsSkipDisplay(true);
            return true;
        }
    }

    //分码检测
    {
        QMutexLocker locker(&logoSizeMutex);
        cv::Mat drawOut;
        if(!logoSizeBase.isEmpty() && !logoSizeBinaryBase.empty())
        {

            double contorsCursimilarity ;
            cv::Rect curBox ;
            cv::Mat curbinary = extractLogoSizeFeatures(imgPrecessRolray , logoSizeBase , curBox);

            double widthHeightRatioDiff = 0.0;
            double shapeSimilarity = 0.0;
            double overlapRatio = 0.0;
            bool result ;


            if(isDrawOutput)
                result = logoSizeisSameStyle(curbinary, logoSizeBinaryBase , imageRoteDraw , curBox);
            else
                result = logoSizeisSameStyle(curbinary, logoSizeBinaryBase , imgAi , curBox);
            /*logoSizeisSameStyle(curbinary, logoSizeBase, curBox,
                                              logoSizeBox, widthHeightRatioDiff,
                                              shapeSimilarity, overlapRatio ,m_logoSizetolerance,
                                              m_logoSizeshapeTolerance, m_logoSizeoverlapThreshold);*/

            if(result)
            {
                logosizeisTrain = false;
                outtext +=  QString("码数正常\n");
                m_logoSizeErrNum = get_logoSizeContinuous();
                m_logoSizeErrid = -1;
            }else
            {
                if(ImageProcessor::getInstance().selectedStep() == ImageProcessor::MeauAutoDetect
                    && get_logoSizeRefuelling() == true)
                {
                    imgcard.setDefect(26);
                    if(m_logoSizeErrNum > 0 )
                    {
                        if(m_logoSizeErrid == -1)
                        {
                            m_logoSizeErrid = imgcard.getID();
                        }

                        m_logoSizeErrNum -- ;
                        if(m_logoSizeErrNum == 0)
                        {
                            m_logoSizedualErrid = m_logoSizeErrid;
                            dualisTrain = true;
                            logosizeisTrain = true;
                            qInfo() << "add logsieze << " << m_logoSizeErrid;
                            m_logoSizeErridList.append(m_logoSizeErrid);

                        }
                    }
                }
                else
                    imgcard.setDefect(27);
                outtext +=  QString("疑似出现错码\n");
                // if(m_logoSizeRefuelling && ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauAutoDetect)
                // {

                    //     logoSizeBase = extractLogoSizeFeatures(imageRoteBase , logoSizeROI , logoSizeBox).clone();
                    // }
            }

            if(logosizeisTrain)
            {
                //m_logoSizeGone = true;
                logoSizeBase.setImage(imageRoteBase(logoSizeBase.getRoi()));
                cv::Rect box;
                logoSizeBinaryBase = extractLogoSizeFeatures(imageRoteBase , logoSizeBase , box).clone();
                //m_logoSizeErrid = -1;
            }

            // int x = logoSizeROI[0].toInt();
            // int y = logoSizeROI[1].toInt();
            // int width = logoSizeROI[2].toInt();
            // int height = logoSizeROI[3].toInt();
            cv::Rect logoRect = logoSizeBase.getRoi();


            // 将 curBox 从相对于 logoSizeROI 的坐标转换为原图坐标
            // cv::Rect adjustedCurBox(
            //     logoRect.x + curBox.x, // 在原图上的 x 坐标
            //     logoRect.y + curBox.y, // 在原图上的 y 坐标
            //     curBox.width, curBox.height
            //     );
            if(isDrawOutput && cvSelection_ == LogoSize)
            {
                updateoverlapRatio(overlapRatio);
                updatewidthHeightRatioDiff(widthHeightRatioDiff);
                updateshapeSimilarity(shapeSimilarity);
                cv::rectangle(imageRoteDraw, logoRect, cv::Scalar(0, 255, 0), 2); // 绿色边框表示 logoSizeROI
                cv::rectangle(imageRoteDraw, curBox, cv::Scalar(255, 0, 0), 2); // 蓝色边框表示 curBox
                ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
                ImageProcessor::getInstance().setdetectDetail(outtext);
                //if(emphaseDefect>-1 && emphaseDefect < emphasizeImgaeList.size())
                ImageProvider::getInstance().image2qml(curbinary,"Box1");
                ImageProvider::getInstance().image2qml(logoSizeBinaryBase,"Box2");
                ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
                imgcard.setIsSkipDisplay(true);
                return true;
            }else
            {
                cv::rectangle(imgAi, curBox, cv::Scalar(255, 0, 0), 2); // 蓝色边框表示 curBox
            }
        }
        if(isDrawOutput && cvSelection_ == LogoSize)
        {
            ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
            imgcard.setIsSkipDisplay(true);
            return true;
        }
    }
    //斜麦检测
    if(m_tiltAngleEnable && currenttiltAngle_!=0)
    {

        float vx = topFitLine[0];
        float vy = topFitLine[1];

        // 计算 topFitLine 的角度
        double lineAngle = atan2(vy, vx) * 180.0 / CV_PI;
        if (lineAngle > 45) {
            lineAngle -= 90;
        } else if (lineAngle < -45) {
            lineAngle += 90;
        }
        // 计算角度差异
        double angleDiff = currenttiltAngle_ - lineAngle;
        settiltAngle(angleDiff);

        // 将差值限制在 -180 到 180 度之间
        if (angleDiff > 180) {
            angleDiff -= 360;
        } else if (angleDiff < -180) {
            angleDiff += 360;
        }
        if(angleDiff < m_tiltAngleMin || angleDiff > m_tiltAngleMax)
        {
            imgcard.setDefect(36);
        }
        outtext +=  QString("主方向:%1，标签方向:%2 \n").arg(currenttiltAngle_).arg(lineAngle);
        outtext +=  QString("当前斜唛角度:%1 阈值[%2-%3]\n")
                       .arg(angleDiff)
                       .arg(m_tiltAngleMin)
                       .arg(m_tiltAngleMax);

        if(isDrawOutput && cvSelection_ == TiltDetection)
        {
            saveTiltAngle_ = angleDiff;
            ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
            ImageProcessor::getInstance().setdetectDetail(outtext);
            ImageProvider::getInstance().image2qml(boundaryBinaryImg,"Box1");
            //ImageProvider::getInstance().image2qml(img_src,"Box2");
            ImageProvider::getInstance().image2qml(imgCapDraw, "Main");
            imgcard.setIsSkipDisplay(true);
            return true;
        }

    }else{
        outtext += "斜唛检测失败";
        if(isDrawOutput && cvSelection_ == TiltDetection)
        {
            ImageProvider::getInstance().image2qml(imgCapDraw, "Main");
            imgcard.setIsSkipDisplay(true);
            return true;
        }
    }

    //测量长度
    if(m_lengthDetectionEnable)
    {
        QMutexLocker locker(&lenthmutex);
        int leftX,rightX;
        double xDistacne = findLineDistanceV2(imgPrecessRolray,
                                              m_LengthROI,
                                              m_lengthDetectionThreshold,
                                              m_lengthDetectionThresholdRight,leftX,rightX,
                                              m_lengthRedundancy) * px2mm();
        outtext +=  QString("长度检测值:%1 [%2-%3]\n").arg(xDistacne)
                       .arg(m_lengthDetectionMax)
                       .arg(m_lengthDetectionMin);



        if(xDistacne > m_lengthDetectionMax || xDistacne < m_lengthDetectionMin)
        {
            imgcard.setDefect(35);
        }
        imgcard.lenthDistance =  xDistacne;

        if(saveLengthDistance == 0)
        {
            saveLengthDistance = xDistacne;
        }

        if(isDrawOutput && cvSelection_ == LengthDetection)
        {

            drawROIByQList(imageRoteDraw , m_LengthROI);
            saveLengthDistance = xDistacne;
            setlengthDistance(xDistacne);
            drawLinesOnImage(imageRoteDraw, leftX, rightX);
            drawHelperLinesOnImage(imageRoteDraw,
                                   leftX + m_lengthDetectionMax / px2mm(),
                                   leftX + m_lengthDetectionMin / px2mm());
            ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
            ImageProcessor::getInstance().setdetectDetail(outtext);
            ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
            ImageProvider::getInstance().image2qml(imgPreProcess_,"Box1");
            imgcard.setIsSkipDisplay(true);
            return true;
        }else
        {
            drawLinesOnImage(imgAi, leftX, rightX);
            drawHelperLinesOnImage(imgAi,leftX+m_lengthDetectionMax / px2mm() ,leftX+m_lengthDetectionMin/ px2mm());
        }
    }else {
        if(isDrawOutput && cvSelection_ == LengthDetection)
        {
            ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
            imgcard.setIsSkipDisplay(true);
            return true;
        }
        outtext += "未激活直线检测\n";
    }
    //特征差异V2
    {
        QReadLocker locker(&specialV2mutex);
        cv::Mat diff;
        cv::Mat eroded;
        cv::Mat thresholded;
        if(isSpecialDiagnosisV2())
        {
            if(specialV2BaseMatGray.empty() || specialV2BaseMat.empty())
            {
                outtext += "激活特征差异V2但是未设置标准\n";
            }
            else
            {
                cv::Mat imageRoteScale;
                cv::resize(imageRote , imageRoteScale, cv::Size(), isSpecialV2Scale()
                           , isSpecialV2Scale() , cv::INTER_LINEAR);
                diff = getSpecialDiff(imageRoteScale);
                if(!diff.empty())
                {
                    cv::Rect roi = specialRoi;

                    roi = adjustROI(roi, diff.size());
                    qInfo() << "roi:" << roi.width;
                    if (!specialV2BaseBac.empty() && specialV2BaseBac.size() == diff.size()) {
                        diff -= specialV2BaseBac;
                    }
                    else
                    {
                        qWarning() << "!specialV2BaseBac.empty() && specialV2BaseBac.size() == diff.size()";
                    }
                    qInfo() << "roi:" << diff.cols;
                    diff = diff(roi);

                    qInfo() << "111";

                    if (!specialV2BaseEdge.empty() && diff.size() == specialV2BaseEdge.size()) {
                        cv::subtract(diff , specialV2BaseEdge , diff);
                    }else
                    {
                        qWarning() << "Image sizes do not match:";
                        qWarning() << "diff size: width =" << diff.cols << ", height =" << diff.rows;
                        qWarning() << "specialV2BaseEdge size: width =" << specialV2BaseEdge.cols << ", height =" << specialV2BaseEdge.rows;

                    }
                    float scale = isSpecialV2Scale();

                    cv::Mat filtered;
                    cv::medianBlur(diff, filtered, 5);
                    int redundancy = isSpecialV2redundancy();
                    if(redundancy % 2 ==0)
                    {
                        setisSpecialV2redundancy(redundancy +1);
                    }
                    // 腐蚀操作
                    cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(redundancy, redundancy));
                    cv::erode(filtered, eroded, element);
                     qInfo() << "222";
                    // 找到图像中大于一定阈值的区域

                    cv::threshold(eroded, thresholded, isSpecialV2Detection(), 255, cv::THRESH_BINARY);

                    // 在图中标记出大于阈值的区域
                    std::vector<std::vector<cv::Point>> contours;
                    cv::findContours(thresholded, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

                    for (auto& contour : contours) {
                        for (auto& point : contour) {
                            point.x = static_cast<int>((point.x + roi.x) / isSpecialV2Scale()); // 还原 X 坐标
                            point.y = static_cast<int>((point.y + roi.y) / isSpecialV2Scale()); // 还原 Y 坐标
                        }
                    }
                    cv::Rect enlargedRegion(
                        static_cast<int>(roi.x / scale),
                        static_cast<int>(roi.y / scale),
                        static_cast<int>(roi.width / scale),
                        static_cast<int>(roi.height / scale)
                        );

                    // 在原始图像上绘制放大的矩形框
                    cv::rectangle(imageRoteDraw, enlargedRegion, cv::Scalar(255, 0, 0), 2); // 蓝色框，线宽2

                    cv::drawContours(imageRoteDraw, contours, -1, cv::Scalar(0, 0, 255), 2);
                    if(!contours.empty())
                    {
                        imgcard.setDefect(25);
                    }

                }else
                {
                    imgcard.setDefect(25);
                    outtext += "chafentu\n";
                }
            }
        }
        else
        {
            outtext += "未激活特征差异V2\n";
        }
        if(isDrawOutput && cvSelection_ == SpecialDiagnosisV2)
        {
            ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
            ImageProcessor::getInstance().setdetectDetail(outtext);
            ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
            ImageProvider::getInstance().image2qml(thresholded, "Box1");
            ImageProvider::getInstance().image2qml(diff, "Box2");
            imgcard.setIsSkipDisplay(true);
            return true;
        }

    }

    {
        if(diffEable())// && imgcard.isInROI)
        {

            if(dualisTrain == true)
            {
                setembedding();
            }
            float scale_;
            qint64 elapsed;
            cv::Mat result = resdiff.detect(imageRote,pred_score_,elapsed,scale_).clone();
            if(result.empty())
            {
                outtext += "特征识别失败请检查模型和基准" ;
            }else{
                emit currentpredscoreChanged();
                outtext += QString("特征检测a耗费时间:%1 \n").arg(elapsed / 1000) ;
                outtext += QString("差异:%1 阈值[%2] \n").arg(pred_score_).arg(resthreshold_) ;
                cv::Mat binary;
                cv::threshold(result, binary, resthreshold_*scale_, 255, cv::THRESH_BINARY);
                std::vector<std::vector<cv::Point>> contours;
                contours.clear();
                cv::findContours(binary, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
                float scaleX = static_cast<float>(imgDraw.cols) / result.cols;
                float scaleY = static_cast<float>(imgDraw.rows) / result.rows;
                bool defect = false;
                for (auto &contour : contours) {
                    for (auto &point : contour) {
                        defect = true;
                        point.x = static_cast<int>(point.x * scaleX);
                        point.y = static_cast<int>(point.y * scaleY);
                    }
                }

                if(pred_score_ > resthreshold_)
                {
                    imgcard.setDefect(34);
                }else
                {
                    dualisTrain = false;
                    if(m_logoSizedualErrid)
                    {
                        DetectCacheModel::instance().inPlaceadjustCacheRemove34(
                            m_logoSizedualErrid + 2, 5);
                        AfterDetect::getInstance().inPlaceadjustCacheremove34(
                            m_logoSizedualErrid + 2, 5);
                        m_logoSizedualErrid = 0;
                    }
                }
                if(isDrawOutput && cvSelection_ == SpecialDiagnosis)
                    cv::drawContours(imageRoteDraw, contours, -1, cv::Scalar(255, 147, 0), 2);
                else
                    cv::drawContours(imgAi, contours, -1, cv::Scalar(255, 147, 0), 2);
            }

            if (isDrawOutput && cvSelection_ == SpecialDiagnosis) {
                ImageProcessor::getInstance().displayOKNGReson(imgcard.getNgCard());
                ImageProcessor::getInstance().setdetectDetail(outtext);
                ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
                ImageProvider::getInstance().image2qml(result,"Box1");
                imgcard.setIsSkipDisplay(true);
                return true;
            }
        }else
        {
            if (isDrawOutput && cvSelection_ == SpecialDiagnosis) {
                ImageProvider::getInstance().image2qml(imageRoteDraw, "Main");
                imgcard.setIsSkipDisplay(true);
                return true;
            }
        }
    }

    {
        //特征差异V2




    }

    imgcard.setDetectStepTime(Cv,outtext);
    if(!imgAi.empty())
        imgcard.setStepImage(Cv, imgAi);
    return true;
#if 0
    // 显示结果图像并标记缺陷点
    //cv::Mat result = inputImage.clone();
    for (const auto& point : defects) {
        if(isDrawOutput && cvSelection_ == EdgeDetection)
        {
            cv::circle(imgCapDraw, point, 30, cv::Scalar(0, 0, 255), -1); // 红色标记缺陷
        }
        else
        {
            cv::Point point_Temp(point);
            point_Temp.y = point.y - monitorRect.y;
            cv::circle(imgDraw, point_Temp, 30, cv::Scalar(0, 0, 255), -1); // 红色标记缺陷
        }
        imgcard.setDefect(6);
    }
    if(isDrawOutput)
    {
        for (const auto& point : topPoints) {
            cv::circle(imgCapDraw, point, 5, cv::Scalar(0, 255, 0), -1); // 绿色点表示顶部边缘
        }
        for (const auto& point : bottomPoints) {
            cv::circle(imgCapDraw, point, 5, cv::Scalar(255, 0, 0), -1); // 蓝色点表示底部边缘
        }
    }

    if (!topPoints.empty()) {
        if(isDrawOutput && cvSelection_ == EdgeDetection)
            drawFitLine(imgCapDraw, topFitLine, cv::Scalar(255, 125, 0)); // 蓝色拟合直线
        else
            drawFitLine(imgDraw, topFitLine, cv::Scalar(255, 125, 0),monitorRect.y);
    }
    if (!bottomPoints.empty()) {
        if(isDrawOutput && cvSelection_ == EdgeDetection)
            drawFitLine(imgCapDraw, bottomFitLine, cv::Scalar(255, 125, 0)); // 蓝色拟合直线
        else
            drawFitLine(imgDraw, bottomFitLine, cv::Scalar(255, 125, 0),monitorRect.y);
    }

    LogDebug << "CvAlgEdgeDetection::detectEdgeDefects Succeesful";

    if(isDrawOutput && cvSelection_ == EdgeDetection)
    {
        LogDebug << "CvAlgEdgeDetection draw" << boundaryBinaryImg.size;
        LogDebug << "CvAlgEdgeDetection imgCapDraw " << imgCapDraw.size;
        ImageProvider::getInstance().image2qml(boundaryBinaryImg,"Box1");
        //ImageProvider::getInstance().image2qml(img_src,"Box2");
        ImageProvider::getInstance().image2qml(imgCapDraw, "Main");
        imgcard.setIsSkipDisplay(true);
        return true;
    }
    // ___------------------------大小边检测-------------------------

    // if(isDrawOutput)
    // {
    //     ImageProvider::getInstance().image2qml(boundaryBinaryImg,"Box1");
    //     //ImageProvider::getInstance().image2qml(img_src,"Box2");
    //     ImageProvider::getInstance().image2qml(imgCapDraw, "Main");
    //     imgcard.setIsSkipDisplay(true);
    //     return true;
    // }

    cv::Rect2d identRoi = imgcard.getIdentROi();
    offsetLineY(bottomFitLine,monitorRect.y);
    offsetLineY(topFitLine,monitorRect.y);
    // 计算ROI中心点
    cv::Point2d roiCenter(identRoi.x + identRoi.width / 2.0, identRoi.y + identRoi.height / 2.0);
    LogDebug << "CvAlgEdgeDetection roiCenter"<< roiCenter;
    double distanceToTop = pointToLineDistance(roiCenter, topFitLine);
    double distanceToBottom = pointToLineDistance(roiCenter, bottomFitLine);
    LogDebug << "CvAlgEdgeDetection distanceToTop:"<< distanceToTop <<
        " distanceToBottom: "<< distanceToBottom;

    cv::circle(imgDraw, roiCenter, 5, cv::Scalar(0, 255, 0), cv::FILLED); // 绿色点
    cv::rectangle(imgDraw, identRoi, cv::Scalar(100, 122, 125), 5);

    // 绘制两条线
    cv::line(imgDraw,cv::Point(topFitLine[2] - topFitLine[0] * 1000,
                                topFitLine[3] - topFitLine[1] * 1000),
             cv::Point(topFitLine[2] + topFitLine[0] * 1000,
                       topFitLine[3] + topFitLine[1] * 1000),
             cv::Scalar(0, 255, 0), 2);
    cv::line(imgDraw,cv::Point(bottomFitLine[2] - bottomFitLine[0] * 1000,
                                bottomFitLine[3] - bottomFitLine[1] * 1000),
             cv::Point(bottomFitLine[2] + bottomFitLine[0] * 1000,
                       bottomFitLine[3] + bottomFitLine[1] * 1000),
             cv::Scalar(0, 255, 0), 2);

    // 计算并绘制从中心点到边缘线的连线
    cv::Point2d topPoint(roiCenter.x - topFitLine[1] * distanceToTop,
                         roiCenter.y - topFitLine[0] * distanceToTop);
    cv::Point2d bottomPoint(roiCenter.x + bottomFitLine[1] * distanceToBottom,
                            roiCenter.y + bottomFitLine[0] * distanceToBottom);
    cv::line(imgDraw, roiCenter, topPoint, cv::Scalar(255, 0, 0), 4); // 蓝色线条
    cv::line(imgDraw, roiCenter, bottomPoint, cv::Scalar(255, 0, 0), 4); // 蓝色线条

    std::string textTop = "top: " + std::to_string(distanceToTop);
    std::string textBottom = "bottom: " + std::to_string(distanceToBottom);
    imgcard.setStepImage(StepProcess::Cv, imgDraw);
    int fontFace = cv::FONT_HERSHEY_SIMPLEX;
    double fontScale = 0.5;
    int thickness = 1;

    // 计算文本大小和背景矩形
    int baseline = 0;
    cv::Size textSizeTop = cv::getTextSize(textTop, fontFace, fontScale,
                                           thickness, &baseline);
    cv::Size textSizeBottom = cv::getTextSize(textBottom, fontFace, fontScale,
                                              thickness, &baseline);
    cv::Point textOrgTop(identRoi.x, identRoi.y - 10);
    cv::Point textOrgBottom(identRoi.x, identRoi.y + identRoi.height + textSizeBottom.height + 10);

    cv::rectangle(imgDraw, textOrgTop + cv::Point(0, baseline),
                  textOrgTop + cv::Point(textSizeTop.width, -textSizeTop.height),
                  cv::Scalar(0, 0, 0), cv::FILLED);
    cv::rectangle(imgDraw, textOrgBottom + cv::Point(0, baseline),
                  textOrgBottom + cv::Point(textSizeBottom.width, -textSizeBottom.height),
                  cv::Scalar(0, 0, 0), cv::FILLED);

    // 绘制文本
    cv::putText(imgDraw, textTop, textOrgTop, fontFace, fontScale,
                cv::Scalar(255, 255, 255), thickness);
    cv::putText(imgDraw, textBottom, textOrgBottom, fontFace, fontScale,
                cv::Scalar(255, 255, 255), thickness);


    if(isDrawOutput && cvSelection_ == LargeAsmallDetection)
    {
        LogDebug << "CvAlgEdgeDetection draw" << boundaryBinaryImg.size;
        LogDebug << "CvAlgEdgeDetection imgCapDraw " << imgCapDraw.size;
        //ImageProvider::getInstance().image2qml(imgDraw,"Box1");
        //ImageProvider::getInstance().image2qml(img_src,"Box2");
        ImageProvider::getInstance().image2qml(imgDraw, "Main");
        imgcard.setIsSkipDisplay(true);
        return true;
    }
    imgcard.setStepImage(Cv,imgDraw);
    return true;
#endif
}

void CvAlgEdgeDetection::offsetLineY(cv::Vec4f& line,int offsety)
{
    //line[1] -= offsety;
    line[3] -= offsety;
    // if(line[1] < 0)
    // {
    //     line[1] = 0;
    //     LogError << "drawFitLine line[1] < 0 :: line[1]: " <<line[1] << " offsety: "<< offsety ;
    // }

    // if(line[3] < 0)
    // {
    //     line[3] = 0;
    //     LogError << "drawFitLine line[3] < 0 :: line[3]: " <<line[3] << " offsety: "<< offsety ;
    // }
}


void CvAlgEdgeDetection::drawFitLine(cv::Mat& image, const cv::Vec4f& line, const cv::Scalar& color,int offsety) {

    float vx = line[0], vy = line[1];
    float x = line[2], y = line[3]-offsety;
    int cols = image.cols, rows = image.rows;

    // 计算直线的两个端点
    cv::Point pt1, pt2;
    pt1.x = cvRound(x - vx * 1000);
    pt1.y = cvRound(y - vy * 1000);
    pt2.x = cvRound(x + vx * 1000);
    pt2.y = cvRound(y + vy * 1000);

    // 绘制直线
    cv::line(image, pt1, pt2, color, 2);
}
cv::Mat CvAlgEdgeDetection::preprocessImage(const cv::Mat& imgCapGray, const cv::Rect2d& monitorRect)
{
    cv::Mat boundaryGrayImg = cropBoundaryAndAdjustROI(imgCapGray, monitorRect);
    LogTrack << "CvAlgEdgeDetection boundaryGrayImg size: " << boundaryGrayImg.size;

    cv::Mat boundaryBinaryImg;
    switch (ParamValue.getParameter("cvInspection", "boundaryMethods").toInt()) {
    case 0:
        boundaryBinaryImg = binarizeImage(boundaryGrayImg, boundaryExtraction1_,boundaryExtractionAuto_);
        break;
    case 1:
        boundaryBinaryImg = edgeDetectionAndBinarize(boundaryGrayImg, boundaryExtraction1_, boundaryExtraction2_);
        break;
    default:
        break;
    }
    cv::Mat AdjustboundaryBinaryImg = boundaryBinaryImg;
    if(overturn_ == 0)
    {
        AdjustboundaryBinaryImg = adjustBoundaryBinaryImage(boundaryBinaryImg);
    }else if(overturn_ == 2)
        cv::bitwise_not(boundaryBinaryImg, AdjustboundaryBinaryImg);

    LogTrack << "edgeSmoothing_:" <<edgeSmoothing_;
    // int morphSize = edgeSmoothing_/10;
    // cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT,
    //                                             cv::Size(2 * morphSize + 1, 2 * morphSize + 1),
    //                                             cv::Point(morphSize, morphSize));
    // 进行闭运算
    //cv::morphologyEx(AdjustboundaryBinaryImg, AdjustboundaryBinaryImg, cv::MORPH_OPEN, element);


    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(AdjustboundaryBinaryImg, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    for (size_t i = 0; i < contours.size(); ++i) {
        if (cv::contourArea(contours[i]) < edgeSmoothing_) {
            cv::drawContours(AdjustboundaryBinaryImg, contours, static_cast<int>(i), cv::Scalar(0), cv::FILLED);
        }
    }
    LogTrack << "finish binarizeImage";
    return AdjustboundaryBinaryImg;
}


void CvAlgEdgeDetection::processROI(cv::Mat& imgDraw, ImageRecord& imgcard, cv::Vec4f& topFitLine,
                                    cv::Vec4f& bottomFitLine, const cv::Rect2d& monitorRect, float topMax, float topmin,
                                    float btmmax, float btmMin , bool isText)
{
    cv::Rect2d identRoi = imgcard.getIdentROi();
    offsetLineY(bottomFitLine, monitorRect.y);
    offsetLineY(topFitLine, monitorRect.y);

    cv::Point2d roiCenter(identRoi.x + identRoi.width / 2.0, identRoi.y + identRoi.height / 2.0);
    LogTrack << "CvAlgEdgeDetection roiCenter" << roiCenter;

    double distanceToTop = pointToLineDistance(roiCenter, topFitLine);
    double distanceToBottom = pointToLineDistance(roiCenter, bottomFitLine);

    setLargeSmallLastTop(distanceToTop);
    setLargeSmallLastBottom(distanceToBottom);
    LogTrack << "CvAlgEdgeDetection distanceToTop:" << distanceToTop <<
        " distanceToBottom: " << distanceToBottom;

    cv::circle(imgDraw, roiCenter, 5, cv::Scalar(0, 255, 0), cv::FILLED);
    cv::rectangle(imgDraw, identRoi, cv::Scalar(100, 122, 125), 5);

    drawLine(imgDraw, topFitLine, cv::Scalar(0, 255, 0) , 4);
    drawLine(imgDraw, bottomFitLine,cv::Scalar(0, 255, 0) , 4 );

    drawDistanceLine(imgDraw, roiCenter, topFitLine, distanceToTop);
    drawDistanceLine(imgDraw, roiCenter, bottomFitLine, distanceToBottom);




    cv::Vec4f topFitLineMax = topFitLine;

    //all
    float total = LargeSmallLastTop_ + LargeSmallLastBottom_;


    topFitLineMax[3] -= (total * largeSmallUpperMax_ / 100 ) - LargeSmallLastTop_  ;
    cv::Vec4f topFitLineMin = topFitLine;
    topFitLineMin[3] -=  (total * largeSmallUpperMin_  / 100) - LargeSmallLastTop_; //rgb bgr
    drawLine(imgDraw, topFitLineMax, cv::Scalar(51, 87, 255) , 1);
    drawLine(imgDraw, topFitLineMin, cv::Scalar(245, 32, 172) , 1);

    cv::Vec4f bottomFitLineMax = bottomFitLine;
    bottomFitLineMax[3] +=  (total * largeSmallLowerMax_ / 100) - LargeSmallLastBottom_;
    cv::Vec4f bottomFitLineMin = bottomFitLine;
    bottomFitLineMin[3] +=  (total * largeSmallLowerMin_ / 100 ) - LargeSmallLastBottom_;
    drawLine(imgDraw, bottomFitLineMax, cv::Scalar(51, 87, 255) , 1);
    drawLine(imgDraw, bottomFitLineMin, cv::Scalar(245, 32, 172) , 1);
    distanceToTop = distanceToTop / total * 100;
    distanceToBottom = distanceToBottom / total * 100;
    if(isText) {
        drawText(imgDraw, "t:" + to_string_with_precision(distanceToTop, 1) + "%", cv::Point(identRoi.x, identRoi.y - 10));
        drawText(imgDraw, "b:" + to_string_with_precision(distanceToBottom, 1) + "%", cv::Point(identRoi.x, identRoi.y + identRoi.height + 20));
    }

    imgcard.setdistanceLargeSmall(distanceToTop,distanceToBottom);


    //imgcard.setStepImage(StepProcess::Cv, imgDraw);
}

void CvAlgEdgeDetection::drawLine(cv::Mat& img, const cv::Vec4f& line, const cv::Scalar& color , int thickness)
{
    cv::line(img, cv::Point(line[2] - line[0] * 1000, line[3] - line[1] * 1000),
             cv::Point(line[2] + line[0] * 1000, line[3] + line[1] * 1000), color, thickness);
}

void CvAlgEdgeDetection::drawDistanceLine(cv::Mat& img, const cv::Point2d& roiCenter, const cv::Vec4f& line, double distance)
{

    LogTrack << "line[3] " << line[3] << "roiCenter.y:"<< roiCenter.y;
    if(line[3] < roiCenter.y)
    {
        cv::Point2d  endPoint(roiCenter.x - line[1] * distance, roiCenter.y - line[0] * distance);
        cv::line(img, roiCenter, endPoint, cv::Scalar(255, 0, 0), 4);
    }
    else
    {
        cv::Point2d  endPoint(roiCenter.x + line[1] * distance, roiCenter.y + line[0] * distance);
        cv::line(img, roiCenter, endPoint, cv::Scalar(255, 0, 0), 4);
    }
}

void CvAlgEdgeDetection::drawText(cv::Mat& img, const std::string& text, const cv::Point& org)
{
    int fontFace = cv::FONT_HERSHEY_SIMPLEX;
    double fontScale = 0.5;
    int thickness = 1;

    int baseline = 0;
    cv::Size textSize = cv::getTextSize(text, fontFace, fontScale, thickness, &baseline);

    cv::rectangle(img, org + cv::Point(0, baseline), org + cv::Point(textSize.width, -textSize.height), cv::Scalar(0, 0, 0), cv::FILLED);
    cv::putText(img, text, org, fontFace, fontScale, cv::Scalar(255, 255, 255), thickness);
}


void CvAlgEdgeDetection::drawDefectsAndEdges(cv::Mat& imgCapDraw, const std::vector<cv::Point>& defects,
                                             const std::vector<cv::Point>& topPoints, const std::vector<cv::Point>& bottomPoints,
                                             const cv::Vec4f& topFitLine, const cv::Vec4f& bottomFitLine,
                                             bool isDrawOutput, const cv::Rect2d& monitorRect)
{
    if (isDrawOutput && cvSelection_ == EdgeDetection) {
        for (const auto& point : topPoints) {
            cv::circle(imgCapDraw, point, 2, cv::Scalar(0, 255, 0), -1);
        }
        for (const auto& point : bottomPoints) {
            cv::circle(imgCapDraw, point, 2, cv::Scalar(255, 0, 0), -1);
        }
        if (!topPoints.empty()) {
            drawFitLine(imgCapDraw, topFitLine, cv::Scalar(255, 125, 0));
        }
        if (!bottomPoints.empty()) {
            drawFitLine(imgCapDraw, bottomFitLine, cv::Scalar(255, 125, 0));
        }
    }


}

double CvAlgEdgeDetection::pointToLineDistance(const cv::Point2d& pt, const cv::Vec4f& line) {
    double a = line[1]; // 线的方向向量的y
    double b = -line[0]; // 线的方向向量的x
    double c = line[0] * line[3] - line[1] * line[2]; // 直线的常数项

    double denominator = std::sqrt(a * a + b * b); // 分母

    // 检查分母是否接近零
    if (denominator < 1e-9) { // 1e-9 是一个常用的小阈值
        LogWarning << "Denominator is too small, cannot calculate point-to-line distance.";
        return 0;
    }

    return std::abs(a * pt.x + b * pt.y + c) / denominator;
}

QString CvAlgEdgeDetection::getName() const
{
    return "EdgeDetectionAlgorithm";
}


void CvAlgEdgeDetection::monitorImgChangeSlots(cv::Mat &img)
{

}

void CvAlgEdgeDetection::configDataChanged(const QString &category, const QString &key)
{

}

void CvAlgEdgeDetection::valueTableLoad()
{
    LogDebug << "Attempting to CvAlgEdgeDetection::valueTableLoad";
    boundaryExtraction1_ = ParamValue.getParameter("cvInspection", "boundaryExtraction1").toInt();
    boundaryExtraction2_ =ParamValue.getParameter("cvInspection", "boundaryExtraction2").toInt();
    boundaryThreshold_ = ParamValue.getParameter("cvInspection", "boundaryThreshold").toInt();
    emit boundaryExtraction1Change();
    emit boundaryExtraction2Change();
    emit boundaryThresholdChange();

    largeSmallUpperMax_ = ParamValue.getParameter("cvInspection", "largeSmallUpperMax").toFloat();
    largeSmallUpperMin_ = ParamValue.getParameter("cvInspection", "largeSmallUpperMin").toFloat();
    largeSmallLowerMax_ = ParamValue.getParameter("cvInspection", "largeSmallLowerMax").toFloat();
    largeSmallLowerMin_ = ParamValue.getParameter("cvInspection", "largeSmallLowerMin").toFloat();
    emit largeSmallUpperMaxChanged();
    emit largeSmallUpperMinChanged();
    emit largeSmallLowerMaxChanged();
    emit largeSmallLowerMinChanged();

    resthreshold_ = ParamValue.getParameter("cvInspection", "resthreshold").toFloat();
    overturn_ = ParamValue.getParameter("cvInspection", "overturn").toInt();
    edgeSmoothing_ = ParamValue.getParameter("cvInspection", "edgeSmoothing").toInt();
    emit resthresholdChanged();
    emit edgeSmoothingChanged();
    emit overturnchanged();

    setLargeSmallLastTop(0);
    setLargeSmallLastBottom(0);
    LogDebug << "finish to CvAlgEdgeDetection::valueTableLoad";
}


void CvAlgEdgeDetection::setboundaryExtraction1(int boundaryExtraction1) {
    if (boundaryExtraction1_ != boundaryExtraction1) {
        emit boundaryExtraction1Change();
        boundaryExtraction1_ = boundaryExtraction1;
        ParamValue.setParameter("cvInspection", "boundaryExtraction1", boundaryExtraction1_);
        LogDebug << "CvAlgEdgeDetection::setBoundaryExtraction1:" << boundaryExtraction1_;
        setboundaryExtractionAuto(false);
        if(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv)
        {

            if(imgCapcache.empty())
            {
                ErrorDialogManager::instance().showNonBlockingError("预览阈值错误", "请现进行一次检测");
                LogWarning << "CvAlgEdgeDetection::setboundaryExtraction1 imgCapcache.empty";
                return;
            }
            ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
            testBoundaryExtraction1(false);
        }
    }
}

// 实现setBoundaryExtraction2方法
void CvAlgEdgeDetection::setBoundaryExtraction2(int boundaryExtraction2) {
    if (boundaryExtraction2_ != boundaryExtraction2) {
        boundaryExtraction2_ = boundaryExtraction2;
        ParamValue.setParameter<int>("cvInspection", "boundaryExtraction2", boundaryExtraction2_);
        qDebug() << "CvAlgEdgeDetection::setBoundaryExtraction2:" << boundaryExtraction2_;
        emit boundaryExtraction2Change();
    }
}

// 实现setBoundaryThreshold方法
void CvAlgEdgeDetection::setBoundaryThreshold(int boundaryThreshold) {
    if (boundaryThreshold_ != boundaryThreshold) {
        boundaryThreshold_ = boundaryThreshold;
        ParamValue.setParameter<int>("cvInspection", "boundaryThreshold", boundaryThreshold_);
        qDebug() << "CvAlgEdgeDetection::setBoundaryThreshold:" << boundaryThreshold_;
        emit boundaryThresholdChange();
    }
}

void CvAlgEdgeDetection::setcvSelection(CVSelectionEnum cvSelection)
{
    if (cvSelection_ != cvSelection) {
        cvSelection_ = cvSelection;
        LogDebug << "CvAlgEdgeDetection::setcvSelection:" << cvSelection;
        emit cvSelectionChanged();
    }
}

void CvAlgEdgeDetection::changecurentBoundaryValue(int curentBoundaryValue)
{
    if(curentBoundaryValue_ != curentBoundaryValue)
    {
        curentBoundaryValue_ = curentBoundaryValue;
        emit curentBoundaryValueChanged();
    }
}

void CvAlgEdgeDetection::setboundaryExtractionAuto(bool boundaryExtractionAuto)
{
    if (boundaryExtractionAuto_ != boundaryExtractionAuto) {
        boundaryExtractionAuto_ = boundaryExtractionAuto;
        qDebug() << "CvAlgEdgeDetection::setboundaryExtractionAuto:" << boundaryExtractionAuto_;
        ParamValue.setParameter("cvInspection", "setboundaryExtractionAuto",boundaryExtractionAuto_);
        emit boundaryExtractionAutoChange();
    }
}

void CvAlgEdgeDetection::setLargeSmallLastTop(float setLargeSmallLastTop)
{
    if(LargeSmallLastTop_!=setLargeSmallLastTop)
    {
        LargeSmallLastTop_ = setLargeSmallLastTop;
        //LogDebug  << "Attempting to  CvAlgEdgeDetection::setLargeSmallLastTop:" << setLargeSmallLastTop;
        emit LargeSmallLastTopChanged();
    }
}
void CvAlgEdgeDetection::setLargeSmallLastBottom(float LargeSmallLastBottom)
{
    if(LargeSmallLastBottom_!=LargeSmallLastBottom)
    {
        LargeSmallLastBottom_=LargeSmallLastBottom;
        //LogDebug  << "Attempting to  CvAlgEdgeDetection::setLargeSmallLastBottom:" << LargeSmallLastBottom_;
        emit LargeSmallLastBottomChanged();
    }
}

void CvAlgEdgeDetection::setresthreshold(float resthreshold)
{
    LogDebug  << "Attempting to  CvAlgEdgeDetection::setLargeSmallLastBottom:" << resthreshold;
    if(resthreshold!=resthreshold_)
    {
        resthreshold_ = resthreshold;
        ParamValue.setParameter("cvInspection", "resthreshold",resthreshold_);
        emit resthresholdChanged();
    }
}

void CvAlgEdgeDetection::setoverturn(int overturn)
{
    if (overturn_ != overturn) {
        emit overturnchanged();
        overturn_ = overturn;
        ParamValue.setParameter("cvInspection", "overturn", overturn_);
        LogDebug << "CvAlgEdgeDetection::setroverturn:" << overturn_;
        if(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv)
        {

            if(imgCapcache.empty())
            {
                ErrorDialogManager::instance().showNonBlockingError("预览阈值错误", "请现进行一次检测");
                LogWarning << "CvAlgEdgeDetection::setboundaryExtraction1 imgCapcache.empty";
                return;
            }
            ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
            testBoundaryExtraction1(false);
        }
    }
}

void CvAlgEdgeDetection::setedgeSmoothing(int edgeSmoothing)
{
    if (edgeSmoothing_ != edgeSmoothing) {
        emit edgeSmoothingChanged();
        edgeSmoothing_ = edgeSmoothing;
        ParamValue.setParameter("cvInspection", "edgeSmoothing", edgeSmoothing_);
        LogDebug << "CvAlgEdgeDetection::edgeSmoothing:" << edgeSmoothing_;
        if(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv)
        {

            if(imgCapcache.empty())
            {
                ErrorDialogManager::instance().showNonBlockingError("预览阈值错误", "请现进行一次检测");
                LogWarning << "CvAlgEdgeDetection::setboundaryExtraction1 imgCapcache.empty";
                return;
            }
            ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
            testBoundaryExtraction1(false);
        }
    }
}


void CvAlgEdgeDetection::testBoundaryExtraction1(bool isAuto)
{
    if(ImageProcessor::getInstance().selectedStep()==ImageProcessor::MeauCv && cvSelection_ == EdgeDetection)
    {

        cv::Mat boundaryBinaryImg = preprocessImage(imgCapcache,monitorRectCache);

        std::vector<cv::Point> defects, topPoints, bottomPoints;
        cv::Vec4f topFitLine, bottomFitLine;
        double topdiff,bottomdiff;
        int maxDeviation = ParamValue.getParameter("cvInspection","boundaryThreshold").toInt();
        CvAlgEdgeDetection::detectEdgeDefects(boundaryBinaryImg, maxDeviation, defects,
                                              topPoints, bottomPoints, topFitLine,
                                              bottomFitLine,topdiff,bottomdiff);
        cv::Mat imgDraw ;
        cv::cvtColor(boundaryBinaryImg, imgDraw, cv::COLOR_GRAY2RGB);
        for (const auto& point : defects) {
            cv::circle(boundaryBinaryImg, point, 20, cv::Scalar(0, 0, 255), 2);

        }


        if(cvSelection_ == EdgeDetection)
            drawDefectsAndEdges(imgDraw, defects, topPoints, bottomPoints, topFitLine,
                                bottomFitLine, true, monitorRectCache);

        if(topdiff > bottomdiff)
            changecurentBoundaryValue(topdiff);
        else
            changecurentBoundaryValue(bottomdiff);

        ImageProvider::getInstance().image2qml(imgCapcache,"Box1");
        ImageProvider::getInstance().image2qml(imgDraw, "Main");
    }
}

void CvAlgEdgeDetection::setlargeSmallUpperMax(float largeSmallUpperMax)
{
    if(largeSmallUpperMax != largeSmallUpperMax_)
    {
        largeSmallUpperMax_ = largeSmallUpperMax;
        ParamValue.setParameter<float>("cvInspection","largeSmallUpperMax",largeSmallUpperMax_);
        LogDebug << "CvAlgEdgeDetection::setlargeSmallUpperMax: " << largeSmallUpperMax_;
        emit largeSmallUpperMaxChanged();
    }

}

void CvAlgEdgeDetection::setlargeSmallUpperMin(float largeSmallUpperMin)
{
    if(largeSmallUpperMin != largeSmallUpperMin_)
    {
        largeSmallUpperMin_ = largeSmallUpperMin;
        ParamValue.setParameter<float>("cvInspection","largeSmallUpperMin",largeSmallUpperMin_);
        LogDebug << "CvAlgEdgeDetection::setlargeSmallUpperMin: " << largeSmallUpperMin_;
        emit largeSmallUpperMinChanged();
    }

}


void CvAlgEdgeDetection::setlargeSmallLowerMax(float largeSmallLowerMax)
{
    if(largeSmallLowerMax != largeSmallLowerMax_)
    {
        largeSmallLowerMax_ = largeSmallLowerMax;
        ParamValue.setParameter<float>("cvInspection","largeSmallLowerMax",largeSmallLowerMax_);
        LogDebug << "CvAlgEdgeDetection::setlargeSmallLowerMax: " << largeSmallLowerMax_;
        emit largeSmallLowerMaxChanged();
    }

}

void CvAlgEdgeDetection::setlargeSmallLowerMin(float largeSmallLowerMin)
{
    if(largeSmallLowerMin != largeSmallLowerMin_)
    {
        largeSmallLowerMin_ = largeSmallLowerMin;
        ParamValue.setParameter<float>("cvInspection","largeSmallLowerMin",largeSmallLowerMin_);
        LogDebug << "CvAlgEdgeDetection::largeSmallLowerMinChanged: " << largeSmallLowerMin_;
        emit largeSmallLowerMinChanged();
    }

}

void CvAlgEdgeDetection::emphasizeprocessImage(const cv::Mat& inputImage, cv::Mat& outputImage, int emphasizeThreshold, bool isInvert )
{
    // 图像均衡化
    cv::equalizeHist(inputImage, outputImage);

    // 阈值处理
    cv::threshold(outputImage, outputImage, emphasizeThreshold, 255, cv::THRESH_BINARY);

    // 根据需要反转图像
    if (isInvert) {
        cv::bitwise_not(outputImage, outputImage);
    }

    // 查找轮廓
    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(outputImage, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

    // 删除小区域的轮廓
    for (size_t i = 0; i < contours.size(); ++i) {
        if (cv::contourArea(contours[i]) < m_emphasizeBinaryfilter) {
            cv::drawContours(outputImage, contours, static_cast<int>(i), cv::Scalar(0), cv::FILLED);
        }
    }
}

bool CvAlgEdgeDetection::trainEmphasizeMaxBlend(QVector<cv::Mat> ImageList)
{
    if (ImageList.size() != emPhasizeBinaryList.size())
    {
        ErrorDialogManager::instance().showNonBlockingError("无法检测添加强调区", "请先识别或者预览");
        qWarning() << "Fail recognizeEmphasizeMaxBlend !=";
        return false;
    }

    for (int i = 0; i < emPhasizeBinaryList.size(); i++)
    {
        cv::Mat image = ImageList[i];

        // 调用公共的图像处理函数
        cv::Mat processedImage;
        emphasizeprocessImage(image, processedImage, emphasizeBinaryList()[i].toInt(), emphasizeBinaryliserveList()[i].toBool());

        // 比较图像尺寸与类型
        cv::Mat img1 = emPhasizeBinaryList[i];
        if (img1.size() != processedImage.size() || img1.type() != processedImage.type()) {
            qWarning() << "Images must be of the same size and type.";
            ErrorDialogManager::instance().showNonBlockingError("无法检测添加强调区", "格式错误");
            return false;
        }

        // 生成掩模并更新结果
        cv::Mat result = cv::Mat::zeros(img1.size(), img1.type());
        cv::Mat mask;
        cv::bitwise_and(img1, processedImage, mask);
        result.setTo(255, mask);

        // 更新 emPhasizeBinaryList 中的结果
        emPhasizeBinaryList[i] = result;
    }

    qInfo() << "Successful in CvAlgEdgeDetection::recognizeEmphasizeMaxBlend(QVector<cv::Mat> ImageList)";
    return true;
}

bool CvAlgEdgeDetection::recognizeEmphasizeMaxBlend(QVector<cv::Mat> ImageList ,
                                                    QVector<QPoint> ImagePointList ,
                                                    cv::Mat &draw)
{

    if (ImageList.size() != emPhasizeBinaryList.size())
    {
        ErrorDialogManager::instance().showNonBlockingError("无法检测添加强调区", "请先识别或者预览");
        qWarning() << "Fail trainEmphasizeMaxBlend !=";
        return false;
    }
    qDebug() << "ImageList.size :" << ImageList.size() <<
        " ImagePointList.size:" << ImagePointList.size();
    for (int i = 0; i < emPhasizeBinaryList.size(); i++)
    {
        cv::Mat image = ImageList[i];

        // 调用公共的图像处理函数
        cv::Mat processedImage;
        emphasizeprocessImage(image, processedImage, emphasizeBinaryList()[i].toInt(), emphasizeBinaryliserveList()[i].toBool());

        // 比较图像尺寸与类型
        cv::Mat img1 = emPhasizeBinaryList[i];
        if (img1.size() != processedImage.size() || img1.type() != processedImage.type()) {
            qWarning() << "Images must be of the same size and type.";
            ErrorDialogManager::instance().showNonBlockingError("无法检测添加强调区", "格式错误");
            return false;
        }

        // 膨胀操作
        int kernelInt = m_emphasizeBinaryKernel;
        if (kernelInt % 2 == 0) {
            kernelInt += 1;
        }
        cv::Mat kernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(kernelInt, kernelInt));
        cv::Mat dilatedImage;
        cv::dilate(processedImage, dilatedImage, kernel);

        // 求差异图
        cv::Mat result;
        cv::subtract(img1, dilatedImage, result);
        std::vector<std::vector<cv::Point>> contours;
        cv::findContours(result, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

        // 遍历轮廓，查看是否需要进行处理
        for (const auto& contour : contours) {
            double area = cv::contourArea(contour);
            if (area >= m_emphasizethreshold) {
                std::vector<cv::Point> offsetContour;
                for (const auto& point : contour) {
                    offsetContour.push_back(cv::Point(ImagePointList[i].x() + point.x ,ImagePointList[i].y() + point.y));
                }

                cv::drawContours(draw, std::vector<std::vector<cv::Point>>{offsetContour}, -1, cv::Scalar(255, 0, 255), 4);
                return false;
            }
        }
    }

    qInfo() << "Successful in CvAlgEdgeDetection::trainEmphasizeMaxBlend(QVector<cv::Mat> ImageList)";
    return true;
}




void CvAlgEdgeDetection::emphasizeBinaryTest()
{
    QMutexLocker locker(&emphasmutex);
    qInfo() << " CvAlgEdgeDetection::emphasizeBinaryTest() emphasizeImgaeList:" <<emphasizeImgaeList.size() << "index: " << emphasizeIndex()   ;
    if(!m_emphasizeEnable || emphasizeImgaeList.size() <= emphasizeIndex())
    {
        ErrorDialogManager::instance().showNonBlockingError("无法预览", "请先启动强调区,并设置强调区域");
        return;
    }
    if(emphasizeImgaeListLast.size() <= emphasizeIndex() || emphasizeIndex() < 0)
    {
        ErrorDialogManager::instance().showNonBlockingError("无法预览", "请先预览,或者单次");
        return;
    }
    cv::Mat  emphaseBase = emphasizeImgaeList[emphasizeIndex()];

    resetEmPhasizeBinaryList();


    emphasePreprocess(emphaseImageLast[emphasizeIndex()] , emphaseBase , true);
}
void CvAlgEdgeDetection::resetEmPhasizeBinaryList()
{
    emPhasizeBinaryList.clear();
    for(int k=0 ; k < emphasizeImgaeList.size() ; k++ )
    {
        cv::Mat emphaseBaseBinary;
        cv::Mat  emphaseBase = emphasizeImgaeList[k];
        std::vector<std::vector<cv::Point>> contours;
        cv::equalizeHist(emphaseBase , emphaseBase);

        cv::equalizeHist(emphaseBase , emphaseBase);
        cv::threshold(emphaseBase, emphaseBaseBinary,emphasizeBinaryList()[k].toInt(), 255, cv::THRESH_BINARY);
        if(emphasizeBinaryliserveList()[k].toBool())
            cv::bitwise_not(emphaseBaseBinary, emphaseBaseBinary);

        cv::findContours(emphaseBaseBinary, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
        for (size_t j = 0; j < contours.size(); ++j) {
            if (cv::contourArea(contours[j]) < m_emphasizeBinaryfilter) {
                cv::drawContours(emphaseBaseBinary, contours, static_cast<int>(j), cv::Scalar(0), cv::FILLED);
            }
        }
        emPhasizeBinaryList.append(emphaseBaseBinary.clone());
    }
}

void CvAlgEdgeDetection::addCurentemphasizemaxBlendByCache(int id)
{
    qInfo() << "addCurentemphasizemaxBlendByCache : " << id ;
    FaultCacheModel::FaultImageCacheItem item = FaultCacheModel::instance().getImageEmListBy(id);
    AfterDetect::getInstance().removeCacheDetect(item.id , 37);
    trainEmphasizeMaxBlend(item.imageList);
    FaultCacheModel::instance().removeRowByIndex(id);

}

void CvAlgEdgeDetection::addCurentemphasizemaxBlendByAuto(bool isDefect)
{
    if(currentProcess == -1)
    {
        return;
    }
    qInfo() << "addCurentemphasizemaxBlendByAuto : " << currentProcess << " isDefect:" << isDefect;
    if(!isDefect)
    {
        FaultCacheModel::FaultImageCacheItem item = FaultCacheModel::instance().getImageEmListBy(currentProcess);
        AfterDetect::getInstance().removeCacheDetect(item.id , 37);
        trainEmphasizeMaxBlend(item.imageList);
        FaultCacheModel::instance().removeRowByIndex(currentProcess);


        std::vector<int> vec = item.ngcard.getDefects();
        vec.erase(std::remove(vec.begin(), vec.end(), 37), vec.end());
        DetectCacheModel::instance().changeItemByID(item.id , NGTable::getInstance().parseNGListToDescription(vec));

        QVector<FaultCacheModel::FaultImageCacheItem>& cacheList = FaultCacheModel::instance().getCacheList();
        for (int i = 0; i < cacheList.size(); ++i) {
            // 获取当前元素的引用
            FaultCacheModel::FaultImageCacheItem& currentItem = cacheList[i];
            // 如果当前项尚未训练
            if (!currentItem.isTrain)
            {
                std::vector<int> vec = currentItem.ngcard.getDefects();
                // 如果找到 defect 37
                if (std::find(vec.begin(), vec.end(), 37) != vec.end())
                {
                    cv::Mat imgDraw = currentItem.aiImage.clone();
                    qInfo() << "into currentItem :" ;
                    if(recognizeEmphasizeMaxBlend(currentItem.imageList , currentItem.imagePointList , imgDraw) == true)
                    {
                        qInfo() << "["<< currentItem.id << "] has been train";
                        AfterDetect::getInstance().removeCacheDetect(currentItem.id , 37);
                        FaultCacheModel::instance().removeRowByIndex(currentProcess);
                        vec.erase(std::remove(vec.begin(), vec.end(), 37), vec.end());
                        DetectCacheModel::instance().changeItemByID(item.id , NGTable::getInstance().parseNGListToDescription(vec));

                    }else
                    {
                        qInfo() << "["<< currentItem.id << "] has No train";
                        currentItem.resultImage = imgDraw;
                    }
                }
            }
        }
    }

    characterDefectTraining();
}



void CvAlgEdgeDetection::addCurentemphasizemaxBlend()
{
    trainEmphasizeMaxBlend(emphasizeImgaeListLast);
}

bool CvAlgEdgeDetection::logoSizeisSameStyle(cv::Mat binaryImage1,
                                             cv::Mat binaryImage2 , cv::Mat &out , cv::Rect Box)
{
    bool isSame = true;
    // 检查两个输入图像是否大小一致
    if (binaryImage1.size() != binaryImage2.size())
    {
        return false; // 图像大小不一致，直接返回false
    }

    // 计算两张图像的差异，得到绝对值差异图像
    cv::Mat diffImage;
    cv::absdiff(binaryImage1, binaryImage2, diffImage);

    // 腐蚀操作，使用3x3的结构元素
    cv::Mat erodedImage;
    cv::Mat kernel = cv::Mat::ones(get_logoSizeRedundancy(), get_logoSizeRedundancy(), CV_8U);  // 创建3x3的结构元素
    cv::erode(diffImage, erodedImage, kernel);
    // 使用积分图来加速区域计算
    cv::Mat integralImage;
    cv::integral(erodedImage, integralImage, CV_32S); // 计算积分图

    // 遍历6x6区域，使用积分图快速判断是否有完全为255的区域
    int blockSize = get_logoSizethreshold();
    for (int i = 0; i <= erodedImage.rows - blockSize; ++i)
    {
        for (int j = 0; j <= erodedImage.cols - blockSize; ++j)
        {
            // 计算当前6x6区域内的白色像素数量
            int sum = integralImage.at<int>(i + blockSize, j + blockSize)
                      - integralImage.at<int>(i, j + blockSize)
                      - integralImage.at<int>(i + blockSize, j)
                      + integralImage.at<int>(i, j);

            if (sum == blockSize * blockSize * 255) // 36个像素，每个像素值255
            {
                isSame =  false; // 找到完全为255的6x6区域，返回true
                // qDebug() << "Test" << QPoint(Box.x + j, Box.y + i) << "Point2:" <<  QPoint(Box.x + j + blockSize, Box.y + i + blockSize);
                cv::rectangle(out, cv::Point(Box.x + j, Box.y + i), cv::Point(Box.x + j + blockSize, Box.y + i + blockSize), cv::Scalar(0, 0, 255),  2);
            }
        }
    }
    // 没有找到符合条件的6x6区域，返回false
    return isSame;
}




cv::Mat CvAlgEdgeDetection::extractLogoSizeFeatures(cv::Mat greayImage, IdentifiImg &logosizeItem,
                                                    cv::Rect &boundingBox, bool isShow)
{
    if (logosizeItem.isEmpty())
    {
        return cv::Mat();
    }

    // 从QVariantList中提取ROI的坐标和尺寸
    int x = logosizeItem.getRoi().x;
    int y = logosizeItem.getRoi().y;
    int width = logosizeItem.getRoi().width;
    int height = logosizeItem.getRoi().height;

    // 定义偏移量，左右偏移50个像素
    int offsetX = 20;

    // 在初步区域附近进行偏移，扩大搜索范围
    int searchX = std::max(0, x - offsetX); // 防止越界
    int searchY = std::max(0, y - offsetX); // 防止越界
    int searchWidth = std::min(greayImage.cols - searchX, width + 2 * offsetX);
    int searchHeight = std::min(greayImage.rows - searchY, height + 2 * offsetX);

    // 创建一个新的ROI区域，这个区域包含了偏移后的区域
    cv::Rect searchRegion(searchX, searchY, searchWidth, searchHeight);
    cv::Mat searchImage = greayImage(searchRegion);

    // 获取模板图像（baseImage），它应该是一个完整的Logo或标识图像
    cv::Mat baseImage = logosizeItem.getImage();

    // 进行模板匹配
    cv::Mat result;
    cv::matchTemplate(searchImage, baseImage, result, cv::TM_CCOEFF_NORMED); // 使用模板匹配

    // 找到匹配结果中最大值的位置，作为新的ROI位置
    cv::Point maxLoc;
    cv::minMaxLoc(result, 0, 0, 0, &maxLoc); // 获取最大匹配位置

    // 计算新的准确位置，相对于原始图像
    int newX = searchX + maxLoc.x;
    int newY = searchY + maxLoc.y;

    // 创建新的准确ROI区域
    cv::Rect accurateRegion(newX, newY, width, height); // 使用原始宽高
    boundingBox = accurateRegion;
    // 提取准确位置的ROI区域
    cv::Mat accurateRoiImage = greayImage(accurateRegion);

    // 噪声滤波（中值滤波）
    cv::Mat filteredImage;
    cv::medianBlur(accurateRoiImage, filteredImage, 3);

    // 直方图均衡化LogoSize
    cv::Mat equalizedImage =filteredImage ;
    //cv::equalizeHist(filteredImage, equalizedImage);

    // 二值化
    cv::Mat binaryImage;
    cv::threshold(equalizedImage, binaryImage, m_logoSizeBinary, 255, cv::THRESH_BINARY);

    // 可选：是否反转二值化图像.
    if (m_logoSizeisFlip)
    {
        cv::bitwise_not(binaryImage, binaryImage);
    }

    // 查找整个图像的非零区域的外接矩形框

    //boundingBox = cv::boundingRect(binaryImage);

    if (isShow && cvSelection_ == LogoSize)
    {
        //cv::Mat rgbImage;
        //cv::cvtColor(binaryImage, rgbImage, cv::COLOR_GRAY2RGB);
        //cv::rectangle(rgbImage, boundingBox, cv::Scalar(0, 0, 255), 2); // 红色边框
        ImageProcessor::getInstance().setcaptureImgMode(ImageProcessor::CapPause);
        //ImageProvider::getInstance().image2qml(result,"Box2");
        //ImageProvider::getInstance().image2qml(binaryImage, "Box1");
        ImageProvider::getInstance().image2qml(binaryImage, "Main");
    }

    return binaryImage;
}

bool CvAlgEdgeDetection::logoSizeisSameStyle(cv::Mat curBinary, cv::Mat baseBinary, cv::Rect curBox, cv::Rect baseBox,
                                             double& widthHeightRatioDiff, double& shapeSimilarity, double& overlapRatio,
                                             double tolerance , double shapeTolerance ,double overlapThreshold )
{
    // Step 1: 宽高比例比较
    double ratioCur = static_cast<double>(curBox.width) / curBox.height;
    double ratioBase = static_cast<double>(baseBox.width) / baseBox.height;
    widthHeightRatioDiff = std::abs(ratioCur - ratioBase);

    if (widthHeightRatioDiff > tolerance ||
        std::abs(curBox.width - baseBox.width) / baseBox.width > tolerance ||
        std::abs(curBox.height - baseBox.height) / baseBox.height > tolerance) {
        return false;
    }

    // Step 2: 形状相似性检测
    // 只对bounding box区域进行轮廓提取
    cv::Mat curCropped = curBinary;
    cv::Mat baseCropped = baseBinary;

    std::vector<std::vector<cv::Point>> contoursCur, contoursBase;
    cv::findContours(curCropped, contoursCur, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    cv::findContours(baseCropped, contoursBase, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

    if (!contoursCur.empty() && !contoursBase.empty()) {
        auto maxContourCur = *std::max_element(contoursCur.begin(), contoursCur.end(),
                                               [](const std::vector<cv::Point>& a, const std::vector<cv::Point>& b) {
                                                   return cv::contourArea(a) < cv::contourArea(b);
                                               });
        auto maxContourBase = *std::max_element(contoursBase.begin(), contoursBase.end(),
                                                [](const std::vector<cv::Point>& a, const std::vector<cv::Point>& b) {
                                                    return cv::contourArea(a) < cv::contourArea(b);
                                                });

        shapeSimilarity = cv::matchShapes(maxContourCur, maxContourBase, cv::CONTOURS_MATCH_I1, 0);
        if (shapeSimilarity > shapeTolerance) {
            return false;
        }
    } else {
        shapeSimilarity = 1.0; // 若没有有效轮廓，设为最大不同值
        return false;
    }

    // Step 3: 像素重叠度检测
    // 调整两个图像为相同大小
    cv::Mat resizedCur, resizedBase;
    cv::resize(curCropped, resizedCur, cv::Size(100, 100));
    cv::resize(baseCropped, resizedBase, cv::Size(100, 100));

    // 计算重叠区域
    cv::Mat overlap;
    cv::bitwise_and(resizedCur, resizedBase, overlap);
    overlapRatio = static_cast<double>(cv::countNonZero(overlap)) / cv::countNonZero(resizedCur);
    if (overlapRatio < overlapThreshold) {
        return false;
    }

    return true;
}

void CvAlgEdgeDetection::lenthTest(int isShow)
{
    qInfo() << "CvAlgEdgeDetection::lenthTest " << isShow;
    int leftX, rightX;
    if(imageRoteBase.empty())
    {
        return;
    }
    findLineDistanceV2(imageRoteBase,
                       m_LengthROI,
                       m_lengthDetectionThreshold,
                       m_lengthDetectionThresholdRight,leftX,rightX,
                       m_lengthRedundancy, isShow);
    qInfo() << "CvAlgEdgeDetection::lenthTest leftX:" <<
        leftX << " rightX:" << rightX;
}



// 计算给定图像中每个颜色通道的比例
float CvAlgEdgeDetection::calculateColorPercentage(const cv::Mat& img, const cv::Mat& hsv_img, cv::Scalar lower, cv::Scalar upper) {
    cv::Mat mask;
    cv::inRange(hsv_img, lower, upper, mask);  // 通过颜色范围提取指定颜色区域
    float color_area = cv::countNonZero(mask);  // 计算指定颜色区域的像素数量
    return color_area / (float)(img.rows * img.cols);  // 返回颜色比例
}

// 加载基准图并计算颜色比例
void CvAlgEdgeDetection::loadBaselineColorRatios(const cv::Mat& baselineImg, const cv::Mat& baselineHSV) {
    // 遍历每个颜色范围并计算其在基准图中的比例
    for (const auto& color : colorRanges) {
        const std::string& colorName = color.first;
        const cv::Scalar& lower = color.second.first;
        const cv::Scalar& upper = color.second.second;

        // 计算基准图中的颜色比例，并缓存
        float percentage = calculateColorPercentage(baselineImg, baselineHSV, lower, upper);
        baselineColorRatios.insert(QString::fromStdString(colorName), percentage);
    }
}

// 比较待测图的颜色比例变化
void CvAlgEdgeDetection::compareColorChange(const cv::Mat& baselineImg, const cv::Mat& testImg) {

    cv::Mat baselineHSV, testHSV;
    cv::cvtColor(baselineImg, baselineHSV, cv::COLOR_BGR2HSV);  // 转换基准图到HSV格式
    cv::cvtColor(testImg, testHSV, cv::COLOR_BGR2HSV);  // 转换待测图到HSV格式

    // 加载并计算基准图的颜色比例
    loadBaselineColorRatios(baselineImg, baselineHSV);

    // 遍历每个颜色范围并计算其在待测图中的比例
    for (const auto& color : colorRanges) {
        const std::string& colorName = color.first;
        const cv::Scalar& lower = color.second.first;
        const cv::Scalar& upper = color.second.second;

        // 获取基准图中的颜色比例
        float baselinePercentage = baselineColorRatios.value(QString::fromStdString(colorName), -1);

        if (baselinePercentage != -1) {
            // 计算待测图中的颜色比例
            float testPercentage = calculateColorPercentage(testImg, testHSV, lower, upper);
            float difference = std::abs(testPercentage - baselinePercentage);

            // 使用 qDebug 输出日志
            qDebug() << QString::fromStdString(colorName)
                     << "color percentage change:"
                     << "Baseline:" << baselinePercentage * 100 << "%"
                     << "Test:" << testPercentage * 100 << "%";

            // 判断颜色比例变化是否显著
            if (difference > 0.05f) {  // 如果变化超过5%，则视为显著变化
                qDebug() << QString::fromStdString(colorName) << "color has changed significantly.";
            }
        }
    }
}

cv::Mat CvAlgEdgeDetection::getHomographyMatrix(const cv::Mat& image1, const cv::Mat& base) {
    // 检查图像是否为空
    QElapsedTimer timer;
    timer.start();  // 启动计时器

    if (image1.empty() || base.empty()) {
        qWarning() << "getHomographyMatrix 无法读取图像" ;
        return cv::Mat();
    }

    // 如果图像不是灰度图，则转换为灰度图
    cv::Mat gray_image1, gray_base;
    if (image1.channels() == 3) {
        cv::cvtColor(image1, gray_image1, cv::COLOR_BGR2GRAY);
    } else {
        gray_image1 = image1;
    }

    if (base.channels() == 3) {
        cv::cvtColor(base, gray_base, cv::COLOR_BGR2GRAY);
    } else {
        gray_base = base;
    }
    // 创建 ORB 检测器
    cv::Ptr<cv::ORB> orb = cv::ORB::create(isSpecialV2orbNum(), 1.5,  8, 0, 0, 2, cv::ORB::FAST_SCORE);

    // 检测关键点和描述符
    std::vector<cv::KeyPoint> keypoints1, keypoints2;
    cv::Mat descriptors1, descriptors2;
    orb->detectAndCompute(gray_image1, cv::noArray(), keypoints1, descriptors1);
    orb->detectAndCompute(gray_base, cv::noArray(), keypoints2, descriptors2);

    qint64 elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    qDebug() << "Elapsed time 2:" << elapsed << "ms";

    // 创建 BFMatcher 对象
    cv::BFMatcher matcher(cv::NORM_HAMMING, false);
    std::vector<cv::DMatch> matches;

    std::vector<std::vector<cv::DMatch>> knn_matches;
    matcher.knnMatch(descriptors1, descriptors2, knn_matches, 2);

    const float ratio_thresh = 0.7f;
    for (size_t i = 0; i < knn_matches.size(); i++) {
        if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance) {
            matches.push_back(knn_matches[i][0]);
        }
    }

    std::sort(matches.begin(), matches.end(), [](const cv::DMatch& a, const cv::DMatch& b) {
        return a.distance < b.distance;
    });

    elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    qDebug() << "Elapsed time 3:" << elapsed << "ms";

    const float max_position_diff = 20.0;
    const float min_distance = 3.0;
    std::vector<cv::DMatch> good_matches;
    std::vector<cv::Point2f> used_points_image1;
    std::vector<cv::Point2f> used_points_image2;

    // 筛选匹配点
    for (const auto& match : matches) {
        cv::Point2f pt1 = keypoints1[match.queryIdx].pt;
        cv::Point2f pt2 = keypoints2[match.trainIdx].pt;

        // 检查 X 和 Y 坐标偏差是否在允许范围内
        if (std::abs(pt1.x - pt2.x) <= max_position_diff && std::abs(pt1.y - pt2.y) <= max_position_diff) {
            // 检查与已选点的最小距离是否符合要求
            bool valid = true;
            for (const auto& used_pt : used_points_image1) {
                if (cv::norm(pt1 - used_pt) <= min_distance) {
                    valid = false;
                    break;
                }
            }
            for (const auto& used_pt : used_points_image2) {
                if (cv::norm(pt2 - used_pt) <= min_distance) {
                    valid = false;
                    break;
                }
            }

            if (valid) {
                good_matches.push_back(match);
                used_points_image1.push_back(pt1);
                used_points_image2.push_back(pt2);
            }
        }
    }

    // elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    // qDebug() << "Elapsed time 4:" << elapsed << "ms";

    // 提取匹配点
    std::vector<cv::Point2f> src_points;
    std::vector<cv::Point2f> dst_points;
    for (const auto& match : good_matches) {
        src_points.push_back(keypoints1[match.queryIdx].pt);
        dst_points.push_back(keypoints2[match.trainIdx].pt);
    }
    elapsed = timer.elapsed();  // 获取毫秒为单位的时间
    qDebug() << "Elapsed time 5:" << elapsed << "ms";

    if (!validatePoints(src_points, dst_points)) {
        qWarning() << "Point validation failed. Exiting..." ;
        return cv::Mat(); // 退出程序
    }

    // 计算单应矩阵
    cv::Mat H = cv::findHomography(src_points, dst_points, cv::RANSAC, 3.0);
    qDebug() << "get H :" << src_points.size();
    return H;
}
