#pragma execution_character_set("utf-8")
#include "jointimage.h"
#include <QMessageBox>
#include <QDebug>
using namespace std;
using namespace cv;
jointImage::jointImage(QObject *parent) : QObject(parent)
{
}
//szDirPath = "E:/jointImg/dstImage"
//szPic:filename
void jointImage::getTowerLines(QString szDirPath, QString szPic)
{
    m_resultImageDir = szDirPath;
    m_srcImagePath = szPic;
    std::string imagePath = szPic.toLocal8Bit().toStdString();
//1、 读入图像
    cv::Mat src = cv::imread(imagePath);
//判断图像是否加载成功
    if (src.empty()) {
        cout << "Images loaded failed..." << endl;
        return ;
    } else {
        cout << "Images loaded success..." << endl;
    }
    int nCols = src.cols;
    int nRows = src.rows;
    cv::Mat srcGray;
    cv::cvtColor(src, srcGray, cv::COLOR_RGB2GRAY);// 转换为灰度图像
// 创建与原图像同样大小的目标图像（全为0）
    Mat dst = Mat::zeros(nRows, nCols, CV_8UC3);
    Mat dst2 = Mat::zeros(nRows, nCols, CV_8UC3);
// 二值化操作
// srcGray：这是一个输入的灰度图像。
// dst：这是输出图像，将会是一个二值图像。
// 120：这是一个阈值。如果源图像的像素值超过这个阈值，该像素将被设置为255（最大值）。
// 255：这是另一个阈值。如果源图像的像素值低于这个阈值，该像素将被设置为0（最小值）。
// THRESH_BINARY：这是二值化操作的类型。在这种类型下，只有两个阈值（如上面提到的120和255）被使用。
// 如果像素值超过高阈值（120），则像素被设置为最大值（255）
//threshold(srcGray, dst, 120, 255, THRESH_BINARY);
    threshold(srcGray, dst, 120, 255, THRESH_BINARY);
    string szPath = m_resultImageDir.toLocal8Bit().toStdString();
    std::string szThresholdPicPath = szPath + "\\threshold11.jpg";
    imwrite(szThresholdPicPath, dst);
//imshow("显示灰度图", dst);
//进行形态学变化
    Mat element = getStructuringElement(MORPH_RECT, Size(6, 6));//返回指定大小的矩形，锚点默认在中心
//先腐蚀后膨胀的过程，开运算可以用来消除小黑点，开运算可以用来抹除图像外部的细节
    morphologyEx(dst, dst2, MORPH_OPEN, element, Point(-1, -1), 1);
    std::string szmorphologyPicPath = szPath + "\\morphology11.jpg";
    imwrite(szmorphologyPicPath, dst2);
    std::vector<std::vector<cv::Point>> contours;
    vector<Vec4i>hierarchy;
//查找轮廓
    this->FindContours(dst2, contours, hierarchy, CV_RETR_LIST, CHAIN_APPROX_SIMPLE, Point());
    double MaxArea = 0;
    double fTotalArea = 0;//轮廓总面积
    int nCoutoursCount = 0;//轮廓数量
    std::vector<std::vector<cv::Point>>::iterator itr;
    itr = contours.begin();
    while (itr != contours.end()) {
//计算轮廓面积
        double fArea = contourArea(*itr);
        fTotalArea += fArea;
        nCoutoursCount++;
        if (fArea >= MaxArea) {
            MaxArea = fArea;
        }
        itr++;
    }
//计算平均轮廓面积
    double fAverageArea = fTotalArea / nCoutoursCount;
//--------------------------------------------------
    itr = contours.begin();
    while (itr != contours.end()) {
        std::vector<cv::Point> aContour = *itr;
        cv::Point forstPt = aContour[0];
        double g_dConArea = contourArea(*itr);
        if ( (g_dConArea < fAverageArea) || (forstPt.y < 300)) {
            itr = contours.erase(itr);
        } else {
            itr++;
        }
    }
    Mat data = Mat::zeros(nRows, nCols, CV_8UC3);
    int nIndex = 0;
    itr = contours.begin();
    while (itr != contours.end()) {
        drawContours(data, contours, nIndex, Scalar(255, 255, 255), -1, 8, hierarchy, 0, Point());
        nIndex++;
        itr++;
    }
    string szContourPicPath = szPath + "\\contour1.jpg";
    imwrite(szContourPicPath, data);
//2、 轮廓线条
    cv::Mat cannyImg;
    cv::Canny(data, cannyImg, 120, 180);//dst
    std::string szCannyPicPath = szPath + "\\canny1.jpg";
    imwrite(szCannyPicPath, cannyImg);
}
//static int nPixelCutLen = -20;//斜边截取
static int nPixelCutLen = 200;//斜边截取
static int nXCutExtend = 1;//底边点的右边或左边截取
QString SECONDIMG = "02.jpg";
int jointImage::getPicTowerPart(std::string szPic)
{
    string szPath = m_resultImageDir.toLocal8Bit().toStdString();
// 1、 读入图像
    m_src = cv::imread(szPic);
    int nCols = m_src.cols;
    int nRows = m_src.rows;
    cv::Mat srcGray;
    cv::cvtColor(m_src, srcGray, cv::COLOR_RGB2GRAY);
    int nLineCount = 0;
    std::vector<cv::Vec4i> savedLines;
    std::vector<double> linesAngle;
    cv::Mat drawImg(nRows, nCols, CV_8U, cv::Scalar(255));
    std::vector<cv::Vec4i> lines;//检测图像中的直线
//二号图像使用这个参数CV_PI / 180, 60, 100, 60
    QFileInfo fileinfo(m_srcImagePath);
    QString fileName2 = fileinfo.fileName();
    if(fileName2 == SECONDIMG) {
        cv::HoughLinesP(srcGray, lines, 1, CV_PI / 180, 60, 100, 60);
    } else {
        cv::HoughLinesP(srcGray, lines, 1, CV_PI / 180, 60, 100, 5);
    }
//遍历所有线段，找出
    std::vector<cv::Vec4i>::const_iterator it = lines.begin();
    while (it != lines.end()) {
        cv::Point point1((*it)[0], (*it)[1]);
        cv::Point point2((*it)[2], (*it)[3]);
        int xLen = abs(point2.x - point1.x);
        int yLen = abs(point2.y - point1.y);
        int nBigger = 0;
        int nSmall = 0;
        if (xLen > yLen) {
            nBigger = xLen;
            nSmall = yLen;
        } else {
            nBigger = yLen;
            nSmall = xLen;
        }
        if (nSmall > 0) {
            double fPercent = double(nSmall) / double(nBigger);
            if (fPercent > 0.25 ) {//选取符合偏移角度的线段
                line(drawImg, point1, point2, cv::Scalar(0, 0, 255), 1);
                nLineCount++;
                savedLines.push_back(cv::Vec4i(point1.x, point1.y, point2.x, point2.y));
                linesAngle.push_back(fPercent);
            }
        }
        ++it;
    }
    cout << "Line Count==" << nLineCount << endl;
    std::string szLinesPicPath = szPath + "\\lines1.jpg";
    imwrite(szLinesPicPath, drawImg);
// 合并同角度线段，找到最长两个线段并在图像中画出来:bug
    std::vector<cv::Vec4i> validLines = getMergeLines(savedLines, linesAngle);
    cv::Mat linesImg(nRows, nCols, CV_8U, cv::Scalar(255));
    it = validLines.begin();
    while (it != validLines.end()) {
        cv::Point point1((*it)[0], (*it)[1]);
        cv::Point point2((*it)[2], (*it)[3]);
        line(linesImg, point1, point2, cv::Scalar(0, 0, 255), 1);
        ++it;
    }
    std::string szLinesLastPath = szPath + "\\linesLast.jpg";
    imwrite(szLinesLastPath, linesImg);
// 3. 找到两个方程和三个点
    cv::Vec4i line1 = validLines[0];
    cv::Vec4i line2 = validLines[1];
    double a1 = double(line1[3] - line1[1]) / double(line1[2] - line1[0]);//斜率
    double b1 = double(line1[2] * line1[1] - line1[3] * line1[0]) / double(line1[2] - line1[0]);//截距
    double a2 = double(line2[3] - line2[1]) / double(line2[2] - line2[0]);
    double b2 = double(line2[2] * line2[1] - line2[3] * line2[0]) / double(line2[2] - line2[0]);
//根据两个直线的斜率和截距计算出交点的X坐标
    int nJointX = int((b1 - b2) / (a2 - a1));
//根据两个直线的斜率和截距计算出两条直线在图像中的位置
    int x1Loc = int( (nRows - 1 - b1) / a1); //当y=1080时，x的大小
    int x2Loc = int((nRows - 1 - b2) / a2);
//边界检测
    if (x1Loc < 0) {
        x1Loc = 0;
    }
    if (x1Loc >= nCols) {
        x1Loc = nCols - 1;
    }
    if (x2Loc < 0) {
        x2Loc = 0;
    }
    if (x2Loc >= nCols) {
        x2Loc = nCols - 1;
    }
    mp_srcImg = new QImage();
    if(! ( mp_srcImg->load(m_srcImagePath) ) ) { //加载图像到内存
        cout << "load pic failed" << endl;
        delete mp_srcImg;
        return 1;
    }
    int nXSize = mp_srcImg->width();
    int nYSize = mp_srcImg->height();
    QImage splitImg(nXSize, nYSize, QImage::Format_RGB888);
    splitImg.fill(Qt::black); // 填成黑色的
// 截图：将原始图像的像素点挨个取出，拷贝到纯黑图像对应像素点
    if (x1Loc < x2Loc) { // 1在左，2在右
        for (int i = 0; i < nXSize; i++) {
            for (int j = 0; j < nYSize; j++) {
                if ((i >= (x1Loc - nXCutExtend)) && (i < nJointX)) { //判断的是x1往左多截一点nXCutExtend
                    int nYloc = int(a1 * i + b1);
                    if (j >= (nYloc - nPixelCutLen)) { //判断的是斜边往上多截nPixelCutLen
                        QColor aSrcColor = mp_srcImg->pixelColor(i, j);
                        splitImg.setPixelColor(i, j, aSrcColor);
                    }
                } else if ((i >= nJointX) && (i <= (x2Loc + nXCutExtend)) ) {
                    int nYloc = int(a2 * i + b2);
                    if (j >= (nYloc - nPixelCutLen)) {
                        QColor aSrcColor = mp_srcImg->pixelColor(i, j);
                        splitImg.setPixelColor(i, j, aSrcColor);
                    }
                }
            }
        }
    } else { // 2在左，1在右
        for (int i = 0; i < nXSize; i++) {
            for (int j = 0; j < nYSize; j++) {
                if ((i >= (x2Loc - nXCutExtend)) && (i < nJointX)) {
                    int nYloc = int(a2 * i + b2);
                    if (j >= (nYloc - nPixelCutLen)) {
                        QColor aSrcColor = mp_srcImg->pixelColor(i, j);
                        splitImg.setPixelColor(i, j, aSrcColor);
                    }
                } else if ((i >= nJointX) && (i <= (x1Loc + nXCutExtend))) {
                    int nYloc = int(a1 * i + b1);
                    if (j >= (nYloc - nPixelCutLen)) {
                        QColor aSrcColor = mp_srcImg->pixelColor(i, j);
                        splitImg.setPixelColor(i, j, aSrcColor);
                    }
                }
            }
        }
    }
    QString filename = m_resultImageDir + "/split.jpg";
    bool flag = splitImg.save(filename);
    if(flag) {
        std::cout << "Img save success ...";
    }
    QByteArray path = filename.toLocal8Bit();
    std::string pathimg(path);
    Mat tmpImg = imread(pathimg);
    Mat result, warpMat;
    float diff = 0.0;
//先平移将图像放置在中间
    if(x1Loc > x2Loc) {
        int offsetR = nCols - (x1Loc + nXCutExtend);
        int offsetL = x2Loc - nXCutExtend;
        int avg = (offsetL + offsetR) / 2;
        diff = avg - offsetL;
        float warpMatValues[] = { 1.0, 0.0, diff,
                                  0.0, 1.0, 0
                                };
        warpMat = Mat(2, 3, CV_32F, warpMatValues);
        Size outDim = tmpImg.size();//得到图像尺寸
        warpAffine(tmpImg, result, warpMat, outDim);
        std::string movePath = szPath + "\\pingyi.jpg";
        imwrite(movePath, result);
    } else { //x1Loc<x2Loc
        int offsetR = nCols - (x2Loc + nXCutExtend);
        int offsetL = x1Loc - nXCutExtend;
        int avg = (offsetL + offsetR) / 2;
        diff = avg - offsetL;
        float warpMatValues[] = { 1.0, 0.0, diff,
                                  0.0, 1.0, 0
                                };
        warpMat = Mat(2, 3, CV_32F, warpMatValues);
        Size outDim = tmpImg.size();//得到图像尺寸
        warpAffine(tmpImg, result, warpMat, outDim);
        std::string movePath = szPath + "\\pingyi.jpg";
        imwrite(movePath, result);
    }
//进行图像仿射变化
//根据定义的三个点进行仿射变换
    Point2f src_points[3];
    Point2f dst_points[3];
//Point2f pda(nJointX, ((nJointX) * a1 + b1));
    src_points[0] = Point2f(nJointX + diff, ((nJointX) * a1 + b1) - nPixelCutLen); //原始三角中的三个点
    src_points[1] = Point2f(x1Loc + diff, (float)(nRows - 1));
    src_points[2] = Point2f(x2Loc + diff, (float)(nRows - 1));
    Mat rotation1, img_warp1;
//放射变换后图像中的三个点
    dst_points[0] = Point2f((nCols - 1) / 2.0, 0);
    if(x1Loc < x2Loc) { //针对图3,4仿射变换
        dst_points[2] = Point2f(nCols - 1, nRows - 1);
        dst_points[1] = Point2f(0, nRows - 1);
    } else {
        dst_points[1] = Point2f(nCols - 1, nRows - 1);
        dst_points[2] = Point2f(0, nRows - 1);
    }
    rotation1 = getAffineTransform(src_points, dst_points); //根据对应点求取仿射变换矩阵
    warpAffine(result, img_warp1, rotation1, tmpImg.size()); //进行仿射变换
    std::string fangshePath = szPath + "\\fangshe.jpg";
    imwrite(fangshePath, img_warp1);
    return 0;
}
std::vector<cv::Vec4i> jointImage::getMergeLines(std::vector<cv::Vec4i> &savedLines, std::vector<double> &linesAngle)
{
    std::vector< std::vector<cv::Vec4i>> needMergeLines;
    std::vector<cv::Vec4i> tmpLines;
    for (size_t i = 0; i < linesAngle.size(); ++i) {
        double curntAngle = linesAngle[i];
        cv::Vec4i curntLine = savedLines[i];
        double curntRealAngle = double(double(curntLine[3] - curntLine[1]) / double(curntLine[2] - curntLine[0]));
        if (curntAngle > 0) {
            tmpLines.push_back(curntLine);
            for (size_t j = i + 1; j < linesAngle.size(); ++j) {
                cv::Vec4i inLine = savedLines[j];
                double inRealAngle = double(double(inLine[3] - inLine[1]) / double(inLine[2] - inLine[0]));
                if (abs(inRealAngle - curntRealAngle) < 0.01) {
                    tmpLines.push_back(inLine);
                    linesAngle[j] = -1;
                }
            }
            needMergeLines.push_back(tmpLines);
            tmpLines.clear();
        }
    }
    tmpLines.clear();
    double fMaxLen = 0;
    int nMaxLoc = 0;
    std::vector<cv::Vec4i> retnLines;
    std::vector<double> fLinesLen;
    for (size_t i = 0; i < needMergeLines.size(); ++i) {
        tmpLines = needMergeLines[i];
        cv::Point minPt, maxPt;
        maxPt.x = 0;
        minPt.x = 100000;
        for (size_t j = 0; j < tmpLines.size(); ++j) {
            cv::Vec4i aLine = tmpLines[j];
            if (aLine[0] < minPt.x) {
                minPt.x = aLine[0];
                minPt.y = aLine[1];
            }
            if (aLine[0] > maxPt.x) {
                maxPt.x = aLine[0];
                maxPt.y = aLine[1];
            }
            if (aLine[2] < minPt.x) {
                minPt.x = aLine[2];
                minPt.y = aLine[3];
            }
            if (aLine[2] > maxPt.x) {
                maxPt.x = aLine[2];
                maxPt.y = aLine[3];
            }
        }
        cv::Vec4i tmp = { minPt.x, minPt.y, maxPt.x, maxPt.y };
        retnLines.push_back(tmp);
        double fDist = getDistance(minPt, maxPt);
        if (fDist > fMaxLen) {
            fMaxLen = fDist;
            nMaxLoc = i;
        }
        fLinesLen.push_back(fDist);
    }
    std::vector<cv::Vec4i> retn2Lines;
    cv::Vec4i maxLine = retnLines[nMaxLoc];
    retn2Lines.push_back(maxLine);
    fLinesLen[nMaxLoc] = 0;
    double fsendMaxLen = 0;
    int nsendMaxLoc = 0;
    for (size_t i = 0; i < fLinesLen.size(); ++i) {
        double fLen = fLinesLen[i];
        if (fLen > fsendMaxLen) {
            fsendMaxLen = fLen;
            nsendMaxLoc = i;
        }
    }
    cv::Vec4i scndLine = retnLines[nsendMaxLoc];
    retn2Lines.push_back(scndLine);
    return retn2Lines;
}
int jointImage::FindContours(cv::Mat &src, std::vector< std::vector<cv::Point>> &contours, std::vector<cv::Vec4i> &hierarchy,
                             int retr, int method, cv::Point offset)
{
    CvMat c_image = src;
//创建一个内存存储器，管理各种动态对象的内存：存储找到的轮廓
    MemStorage storage(cvCreateMemStorage());
//可动态增长元素序列
    CvSeq *_ccontours = nullptr;
//二值图像中寻找轮廓：找到的轮廓存在storage中，并通过_ccontours指针来访问它们
    cvFindContours(&c_image, storage, &_ccontours, sizeof(CvContour), retr, method, CvPoint(cvPoint(offset.x, offset.y)));
    if (!_ccontours) {
        contours.clear();
        return 1;
    }
//将树的节点指针挨个的存放到线性表中
    Seq<CvSeq *> all_contours(cvTreeToNodeSeq(_ccontours, sizeof(CvSeq), storage));
    int total = static_cast<int>(all_contours.size());
    contours.resize(static_cast<size_t>(total));
    SeqIterator<CvSeq *> it = all_contours.begin();
    for (int i = 0; i < total; i++, ++it) {
        CvSeq *c = *it;
//将当前轮廓的颜色设置为循环的当前索引值，即i
        (reinterpret_cast<CvContour *>(c))->color = static_cast<int>(i);
        int count = static_cast<int>(c->total);//表示稠密序列的元素个数
        int *data = new int[static_cast<size_t>(count * 2)];//data数组包含了每个轮廓点的x和y坐标
        cvCvtSeqToArray(c, data);//将轮廓转换为数组，数据保存在之前分配的内存空间中
        for (int j = 0; j < count; j++) { //处理每个轮廓点
//将每个轮廓点的坐标添加到contours向量中，每个点的坐标是一个Point对象
            contours[static_cast<size_t>(i)].push_back(Point(data[j * 2], data[j * 2 + 1]));
        }
        delete[] data;
    }
    hierarchy.resize(static_cast<size_t>(total));
    it = all_contours.begin();
//处理每个轮廓的上下左右相邻轮廓的颜色，并将这些颜色保存在hierarchy向量中
//如果轮廓没有相邻的轮廓，则对应的颜色值为-1。
    for (int i = 0; i < total; i++, ++it) {
        CvSeq *c = *it;
        int h_next = c->h_next ? (reinterpret_cast<CvContour *>(c->h_next))->color : -1;
        int h_prev = c->h_prev ? (reinterpret_cast<CvContour *>(c->h_prev))->color : -1;
        int v_next = c->v_next ? (reinterpret_cast<CvContour *>(c->v_next))->color : -1;
        int v_prev = c->v_prev ? (reinterpret_cast<CvContour *>(c->v_prev))->color : -1;
        hierarchy[static_cast<size_t>(i)] = Vec4i(h_next, h_prev, v_next, v_prev);
    }
    return 0;
}
// 图像边缘化处理
int jointImage::genEdgeImg(QImage *pSrcImg, QImage *pDstImg,
                           QString dstImageDir)
{
    mp_srcImg = pSrcImg;
    mp_dstImg = pDstImg;
    m_resultImageDir = dstImageDir;
// 分配处理用缓冲区
    alloBuffers4Edge();
// 生成灰度图
    generateGreyValue();
// 滤波：用高斯滤波器平滑图像 genfilterValue3308();
    genfilterValue5514();
// 用Sobel等梯度算子计算梯度幅值和方向
    sobelGradDirction();
// 对梯度幅值进行非极大值抑制
    localMaxValue();
// 用双阈值算法检测
    doubleThreshhold();
// 用双阈值算法检测和连接边缘
    doubleThreshhdLink();
    return 0;
}
// 生成灰度值
void jointImage::generateGreyValue()
{
// 先判断一下图的格式
    mp_greyImg = new QImage(*mp_srcImg);
    int nXSize = mp_greyImg->width();
    int nYSize = mp_greyImg->height();
    for (int i = 0; i < nXSize; i++)
        for (int j = 0; j < nYSize; j++) {
            QColor aColor = mp_srcImg->pixelColor(i, j);
// R ?0.3 + G ?0.59 + B ?0.11
            int greyVal = int(0.3 * aColor.red() + 0.59 * aColor.green() + 0.11 * aColor.blue());
            QColor greyColor(greyVal, greyVal, greyVal);
            mp_greyImg->setPixelColor(i, j, greyColor);
        }
    QString greyImgPath = m_resultImageDir + "/Grey.jpg";
    mp_greyImg->save(greyImgPath);
}
static int Loc5x5[25][2] = { {-2, -2}, { -1, -2 }, { 0, -2}, { 1, -2 }, { 2, -2},
    {-2, -1}, { -1, -1 }, { 0, -1}, { 1, -1 }, { 2, -1 },
    {-2, 0 }, {-1, 0 }, { 0, 0 }, { 1, 0 }, { 2, 0 },
    {-2, 1 }, { -1, 1 }, { 0, 1 }, { 1, 1 }, { 2, 1 },
    {-2, 2}, { -1, 2 }, { 0, 2 }, { 1, 2 }, { 2, 2 }
};
static int Gausi5x5[25] = { 1, 4, 7, 4, 1,
                            4, 16, 26, 16, 4,
                            7, 26, 41, 26, 7,
                            4, 16, 26, 16, 4,
                            1, 4, 7, 4, 1
                          };
//static int Gausi5x5Total = 273;
// 滤波
// 对图像pSrcImg进行滤波
// Sigma=0.8,3*3
void jointImage::genfilterValue5514()
{
    QImage filterImg(*mp_greyImg); //mp_srcImg
    int nXSize = mp_srcImg->width();
    int nYSize = mp_srcImg->height();
    for (int i = 0; i < nXSize; i++)
        for (int j = 0; j < nYSize; j++) { // 遍历每一个点
            int nGreyCount = 0;
            int nCount = 0;
            for (int n = 0; n < 25; n++) {
                int xLoc = i + Loc5x5[n][0];
                int yLoc = j + Loc5x5[n][1];
                if (xLoc >= 0 && xLoc < nXSize && yLoc >= 0 && yLoc < nYSize) {
                    QColor neightColor = mp_srcImg->pixelColor(xLoc, yLoc);
                    int neighborGrey = neightColor.red();
                    nGreyCount += neighborGrey * Gausi5x5[n];
                    nCount += Gausi5x5[n];
                }
            }
            int nFileterVal = int (nGreyCount / nCount);
            filteredImg[i][j] = nFileterVal;
            QColor aCOlor(nFileterVal, nFileterVal, nFileterVal);
            filterImg.setPixelColor(i, j, aCOlor);
        }
    QString filterImgPath = m_resultImageDir + "/Filt.jpg";
    filterImg.save(filterImgPath);
}
// 用Sobel等梯度算子计算梯度幅值和方向
void jointImage::sobelGradDirction()
{
    int nXSize = mp_srcImg->width();
    int nYSize = mp_srcImg->height();
    for (int i = 1; i < (nXSize - 1); i++)
        for (int j = 1; j < (nYSize - 1); j++) { // 遍历每一个点
            int nGx = filteredImg[i + 1][j - 1] - filteredImg[i - 1][j - 1] +
                      2 * filteredImg[i + 1][j] - 2 * filteredImg[i - 1][j] +
                      filteredImg[i + 1][j + 1] - filteredImg[i - 1][j + 1];
            int nGy = filteredImg[i - 1][j - 1] - filteredImg[i - 1][j + 1] +
                      2 * filteredImg[i][j - 1] - 2 * filteredImg[i][j + 1] +
                      filteredImg[i + 1][j - 1] - filteredImg[i + 1][j + 1];
            GxImg[i][j] = nGx;
            GyImg[i][j] = nGy;
            double fGg = sqrt(nGx * nGx + nGy * nGy);
            GImg[i][j] = float(fGg);
            float fGx = float(nGx);
            float fGy = float(nGy);
            float fAngle;
            if (nGx == 0) {
                if (nGy > 0) {
                    fAngle = 90.0f;
                } else {
                    fAngle = -90.0f;
                }
            } else {
                fAngle = atan(fGy / fGx) * float(57.3); //弧度转换为度
            }
            PointDirection[i][j] = fAngle; // +90
        }
}
// 对梯度幅值进行非极大值抑制
void jointImage::localMaxValue()
{
    m_localMaxValImg = (*mp_srcImg);
    m_localMaxValImg.fill(Qt::black);
    int nXSize = mp_srcImg->width();
    int nYSize = mp_srcImg->height();
    for (int i = 1; i < (nXSize - 1); i++)
        for (int j = 1; j < (nYSize - 1); j++) { // 遍历不包括边缘的每一个点
            float fCurentVal = GImg[i][j];
            float fAngle = PointDirection[i][j];
            int nAngle;
            if (fAngle > 0) {
                nAngle = int(fAngle + 0.5f);
            } else {
                nAngle = int(fAngle - 0.5f);
            }
            float tanAngl = fAngle / float(57.3f); //
            float Gp1, Gp2;
            if (nAngle >= 0 && nAngle < 45) {
                float tanVal = tan(tanAngl);
                Gp1 = tanVal * GImg[i + 1][j - 1] + (1 - tanVal) * GImg[i + 1][j];
                Gp2 = tanVal * GImg[i - 1][j + 1] + (1 - tanVal) * GImg[i - 1][j];
            } else if (nAngle >= -45 && nAngle < 0) {
                float tanVal = tan(tanAngl);
                Gp1 = -tanVal * GImg[i + 1][j + 1] + (1 + tanVal) * GImg[i + 1][j];
                Gp2 = -tanVal * GImg[i - 1][j - 1] + (1 + tanVal) * GImg[i - 1][j];
            } else if (nAngle >= 45 && nAngle <= 90) {
                float ctanAngl = (90 - fAngle) / float(57.3f); //
                float ctanVal = tan(ctanAngl);
                Gp1 = ctanVal * GImg[i + 1][j - 1] + (1 - ctanVal) * GImg[i][j - 1];
                Gp2 = ctanVal * GImg[i - 1][j + 1] + (1 - ctanVal) * GImg[i][j + 1];
            } else { // if (fAngle>=-90 && fAngle<-45)
                float ctanAngl = (-90 - fAngle) / float(57.3f); //
                float ctanVal = tan(ctanAngl);
                Gp1 = -ctanVal * GImg[i + 1][j + 1] + (1 + ctanVal) * GImg[i][j + 1];
                Gp2 = -ctanVal * GImg[i - 1][j - 1] + (1 + ctanVal) * GImg[i][j - 1];
            }
// 进行比较
            if ( (fCurentVal > Gp1) && (fCurentVal > Gp2) ) {
                localMaxVal[i][j] = 1;
                m_localMaxValImg.setPixelColor(i, j, Qt::white);
            }
        }
    QString localMaxValuePath = m_resultImageDir + "/LMV.jpg";
    m_localMaxValImg.save(localMaxValuePath);
}
#define LOW_THRESHHOLD 60
#define HIGH_THRESHHOLD 120
//双阈值算法检测和连接,取低阈值为60，高阈值100
void jointImage::doubleThreshhold()
{
    DoubleThresholdImg = m_localMaxValImg;
    DoubleThresholdImg.fill(Qt::black);
    int nXSize = DoubleThresholdImg.width();
    int nYSize = DoubleThresholdImg.height();
    for (int i = 1; i < (nXSize - 1); i++)
        for (int j = 1; j < (nYSize - 1); j++) { // 遍历不包括边缘的每一个点
            if (localMaxVal[i][j] > 0) {
                int nCurentVal = int(GImg[i][j]);
                if (nCurentVal > HIGH_THRESHHOLD) {
                    DublThreshold[i][j] = 2; // 强边缘像素
                    DoubleThresholdImg.setPixelColor(i, j, Qt::white);
                } else if (nCurentVal > LOW_THRESHHOLD) {
                    DublThreshold[i][j] = 1; // 弱边缘像素
// DoubleThresholdImg.setPixelColor(i,j,Qt::gray);
                }
            }
        }
    QString doubleThreshholdPath = m_resultImageDir + "/DbT.jpg";
    DoubleThresholdImg.save(doubleThreshholdPath);
}
static int Neight[8][2] = { { -1, -1 }, { 0, -1}, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, {-1, 0} };
//双阈值算法检测和连接
void jointImage::doubleThreshhdLink()
{
    DblThresholdLinkImg = DoubleThresholdImg;
    DblThresholdLinkImg.fill(Qt::black);
    int nXSize = DblThresholdLinkImg.width();
    int nYSize = DblThresholdLinkImg.height();
    for (int i = 1; i < (nXSize - 1); i++)
        for (int j = 1; j < (nYSize - 1); j++) { // 遍历不包括边缘的每一个点
            if (DublThreshold[i][j] == 2) {
                DublThresholdLink[i][j] = 1;
                DblThresholdLinkImg.setPixelColor(i, j, Qt::white);
            }
            if (DublThreshold[i][j] == 1) {
                bool isStrong = false;
                for (int n = 0; n < 8; n++) {
                    int xLoc = i + Neight[n][0];
                    int yLoc = j + Neight[n][1];
                    if (xLoc >= 0 && xLoc < nXSize && yLoc >= 0 && yLoc < nYSize) {
                        if (DublThreshold[i][j] == 2) {
                            isStrong = true;
                            break;
                        }
                    }
                }
                if (isStrong) {
                    DublThresholdLink[i][j] = 1;
                    DblThresholdLinkImg.setPixelColor(i, j, Qt::white);
                }
            }
        }
    QString dblThreshholdLkPath = m_resultImageDir + "/DbTLk.jpg";
    DblThresholdLinkImg.save(dblThreshholdLkPath);
}
void jointImage::alloBuffers4Edge()
{
    int nXSize = mp_srcImg->width();
    int nYSize = mp_srcImg->height();
    filteredImg.resize(nXSize);
    GxImg.resize(nXSize);
    GyImg.resize(nXSize);
    GImg.resize(nXSize);
    PointDirection.resize(nXSize);
    localMaxVal.resize(nXSize);
    DublThreshold.resize(nXSize);
    DublThresholdLink.resize(nXSize);
    for (int j = 0; j < nXSize; j++) {
        filteredImg[j].resize(nYSize);
        GxImg[j].resize(nYSize);
        GyImg[j].resize(nYSize);
        GImg[j].resize(nYSize);
        PointDirection[j].resize(nYSize);
        localMaxVal[j].resize(nYSize);
        DublThreshold[j].resize(nYSize);
        DublThresholdLink[j].resize(nYSize);
    }
    for (int i = 0; i < nXSize; i++)
        for (int j = 0; j < nYSize; j++) {
            GxImg[i][j] = 0;
            GyImg[i][j] = 0;
            GImg[i][j] = 0;
            PointDirection[i][j] = 0;
            localMaxVal[i][j] = -1;
            DublThreshold[i][j] = -1;
            DublThresholdLink[i][j] = -1;
        }
}
double jointImage::getDistance(CvPoint pointO, CvPoint pointA)
{
    double distance;
    distance = double(powf((pointO.x - pointA.x), 2) + powf((pointO.y - pointA.y), 2) );
    distance = double( sqrtf(float(distance)) );
    return distance;
}
