﻿#include "SVCalculateModel.h"

SVCalculateModel::SVCalculateModel(QObject *parent)
    : QObject(parent)
{
    mOrbMat = cv::ORB::create(PCB_TEMPLATE_FEATURES_ORB);
    mOpenKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
    mCloseKernel = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(8, 8));
}

PCBTemplateFeatures SVCalculateModel::extractFeatures(const QImage &image)
{
    cv::Mat mat = MEMPOOL->convertQImageToCVImage(image);
    std::vector<cv::KeyPoint> keypoints(PCB_TEMPLATE_FEATURES_ORB);
    cv::Mat descriptors;
    mOrbMat->detectAndCompute(mat, cv::noArray(), keypoints, descriptors);
    return {mat, descriptors, keypoints};
}

void SVCalculateModel::matchPCBTemplate(const QImage &image,
                                        const std::vector<PCBTemplateFeatures> &templates)
{
    if (mStandardMat.data == nullptr) {
        SVScopedPool scope(MEMPOOL);

        cv::Mat mat = scope.convertQImageToCVImage(image);

        std::vector<cv::KeyPoint> keypoints(PCB_TEMPLATE_FEATURES_ORB);
        cv::Mat descriptors;
        mOrbMat->detectAndCompute(mat, cv::noArray(), keypoints, descriptors);

        QVector<double> scores(templates.size(), 1e9);
        cv::parallel_for_(cv::Range(0, templates.size()), [&](const cv::Range &range) {
            thread_local cv::BFMatcher matcher(cv::NORM_HAMMING);
            for (int i = range.start; i < range.end; ++i) {
                const auto &tpl = templates[i];
                std::vector<cv::DMatch> matches;
                matcher.match(tpl.descriptors, descriptors, matches);
                double score = 0.0;
                for (auto &match : matches) {
                    score += match.distance;
                }
                scores[i] = score / matches.size();
            }
        });

        int index = 0;
        double bestScore = scores[0];
        for (int i = 1; i < scores.size(); ++i) {
            if (scores[i] < bestScore) {
                bestScore = scores[i];
                index = i;
            }
        }

        cv::Mat M = calculatePerspectiveMatrix(templates[index].mat, templates[index].mat.size());
        cv::warpPerspective(templates[index].mat,
                            mStandardMat,
                            M,
                            templates[index].mat.size(),
                            cv::INTER_LINEAR,
                            cv::BORDER_CONSTANT);
    }
}

QList<QRect> SVCalculateModel::detectDifferenceRegions(const QImage &image)
{
    SVScopedPool scope(MEMPOOL);

    cv::Mat mat = scope.convertQImageToCVImage(image);
    cv::Mat temp = scope.allocate(mStandardMat.size());

    cv::Mat M = calculatePerspectiveMatrix(mat, mStandardMat.size());
    cv::warpPerspective(mat, temp, M, mStandardMat.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT);
    M = M.inv();

    //像素运算可以融合，且这里有由于通道转换出现的几个隐形拷贝
    cv::absdiff(temp, mStandardMat, mat);
    cv::cvtColor(mat, temp, cv::COLOR_BGR2GRAY);
    cv::threshold(temp, mat, 20, 255, cv::THRESH_BINARY);

    cv::morphologyEx(mat, temp, cv::MORPH_OPEN, mOpenKernel);
    cv::morphologyEx(temp, mat, cv::MORPH_CLOSE, mCloseKernel);

    std::vector<std::vector<cv::Point>> defects;
    cv::findContours(mat, defects, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    QList<QRect> defectRects;
    for (auto &defect : defects) {
        cv::Rect rect = warpRectBack(cv::boundingRect(defect), M);
        defectRects.append(QRect(rect.x, rect.y, rect.width, rect.height));
    }
    return defectRects;
}

cv::Mat SVCalculateModel::calculatePerspectiveMatrix(const cv::Mat &mat, cv::Size size)
{
    SVScopedPool scope(MEMPOOL);

    cv::Size targetSize(size.width * SV_PERSPECTIVE_SCALE, size.height * SV_PERSPECTIVE_SCALE);
    cv::Mat mask = scope.allocate(targetSize);
    cv::Mat temp = scope.allocate(targetSize);

    cv::resize(mat, mask, targetSize, 0, 0, cv::INTER_AREA);
    cv::cvtColor(mask, temp, cv::COLOR_BGR2HSV);
    cv::Mat maskSingleton(mask.size(), CV_8UC1, mask.data);
    cv::inRange(temp, cv::Scalar(35, 50, 50), cv::Scalar(85, 255, 255), maskSingleton);

    std::vector<std::vector<cv::Point>> contours;
    cv::findContours(maskSingleton, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
    double maxArea = 0.0;
    std::vector<cv::Point> maxContour;
    for (const auto &contour : contours) {
        double area = cv::contourArea(contour);
        if (area > maxArea) {
            maxArea = area;
            maxContour = contour;
        }
    }

    cv::Mat M;
    if (maxArea < targetSize.area() * 0.01) {
        M = cv::Mat::eye(3, 3, CV_32F);
    } else {
        cv::Point2f rectPoints[4];
        cv::RotatedRect minRect = cv::minAreaRect(maxContour);
        minRect.points(rectPoints);
        for (auto &point : rectPoints) {
            point.x /= SV_PERSPECTIVE_SCALE;
            point.y /= SV_PERSPECTIVE_SCALE;
        }

        int minIdx = 0;
        float minSum = rectPoints[0].x + rectPoints[0].y;
        for (int i = 1; i < 4; ++i) {
            float s = rectPoints[i].x + rectPoints[i].y;
            if (s < minSum) {
                minSum = s;
                minIdx = i;
            }
        }
        for (int r = 0; r < minIdx; ++r) {
            cv::Point2f first = rectPoints[0];
            for (int i = 0; i < 3; ++i)
                rectPoints[i] = rectPoints[i + 1];
            rectPoints[3] = first;
        }

        M = cv::getPerspectiveTransform(
            std::vector<cv::Point2f>(rectPoints, rectPoints + 4),
            std::vector<cv::Point2f>{{0.f, 0.f},
                                     {size.width - 1.f, 0.f},
                                     {size.width - 1.f, size.height - 1.f},
                                     {0.f, size.height - 1.f}});
    }
    return M;
}

cv::Rect SVCalculateModel::warpRectBack(const cv::Rect &rect, const cv::Mat &M)
{
    std::vector<cv::Point2f> keyPoints = {cv::Point2f(rect.x, rect.y),
                                          cv::Point2f(rect.x + rect.width, rect.y),
                                          cv::Point2f(rect.x + rect.width, rect.y + rect.height),
                                          cv::Point2f(rect.x, rect.y + rect.height)};
    cv::perspectiveTransform(keyPoints, keyPoints, M);
    return cv::boundingRect(keyPoints);
}
