import cv2
import numpy as np
import math
from numpy import array


def Harris_detect():
    # img = cv2.imread('imgs/chessboard.png')
    # img = cv2.imread('imgs/aaa.png')
    img = cv2.imread('imgs/home.jpg')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    cv2.imshow('img', img)

    # 获取角点检测初步结果
    cornerStrength = cv2.cornerHarris(gray, 2, 3, 0.04)

    # 找到角点检测结果中的值域，这个是为了后面进行计算阈值用的
    minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(cornerStrength)
    # 对结果进行膨胀操作，放大显示效果
    dilate = cv2.dilate(cornerStrength, None)
    # 局部最大值检测，使得出现角点聚集的情况
    localMax = cv2.compare(cornerStrength, dilate, cv2.CMP_EQ)

    # 将cornerStrength 进行二值化，这个是
    threshold = 0.01 * maxVal
    thre, cornerth = cv2.threshold(cornerStrength, threshold, 255, cv2.THRESH_BINARY)

    cv2.imshow('cornerth', cornerth)

    cornerMap = array(np.float32(cornerth))  # 转换颜色空间为8位颜色
    result = array(np.float32(cornerth))
    localMax = array(np.float32(localMax))
    # 非极大值抑制
    cv2.bitwise_and(cornerMap, localMax, cornerMap)

    cv2.bitwise_and(result, cornerMap, result)

    points = []
    [rows, cols] = cornerMap.shape
    for i in range(rows):
        for j in range(cols):
            if cornerMap[i, j]:
                points.append((i, j))

    print(len(points), points[0:10])

    img2 = np.copy(img)
    for x, y in points:
        cv2.circle(img2, (y, x), 1, (0, 0, 255), 1)

    cv2.imshow('img2', img2)


# Surf 是sift的变种，更高效
def SURF_detect():
    """
    hessianThreshold代表Hessian矩阵行列式所计算出的曲率强度，
    数值越高，代表区分匹配点的要求越高，一般推荐1000-2500之间
    """
    img1 = cv2.imread('imgs/home.jpg')
    img2 = cv2.imread('imgs/home2.png')
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    minThreashold = 7500
    surf = cv2.xfeatures2d_SURF.create(minThreashold)
    keypoints, descriptor = surf.detectAndCompute(gray1, None)
    keypoints2, descriptor2 = surf.detectAndCompute(gray2, None)

    bfMatcher = cv2.BFMatcher()
    matches = bfMatcher.knnMatch(descriptor, descriptor2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
    img3 = cv2.drawMatchesKnn(img1, keypoints, img2, keypoints2, matches, None, flags=2)
    cv2.imshow('img3', img3)


def ORB_detect():
    img1 = cv2.imread('imgs/home.jpg')
    img2 = cv2.imread('imgs/home2.png')
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    nkeypoint = 100  # 算法在图片中找到匹配点的对数

    orb = cv2.ORB.create(nkeypoint)

    kp1, descriptors1 = orb.detectAndCompute(gray1,None)
    kp2, descriptors2 = orb.detectAndCompute(gray2,None)

    bf = cv2.DescriptorMatcher.create('BruteForce')
    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(descriptors1, descriptors2)
    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)
    cv2.imshow('img3', img3)


def Fast_detect():
    img = cv2.imread('imgs/home.jpg')
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    cv2.imshow('img', img)
    fast = cv2.FastFeatureDetector.create(40)
    keypoints = fast.detect(gray)
    # print(type(keypoints), keypoints)

    cv2.drawKeypoints(
        image=img,
        keypoints=keypoints,
        outImage=img,
        color=(0, 255, 0),
        flags=cv2.DrawMatchesFlags_DRAW_OVER_OUTIMG)
    cv2.imshow('outimg', img)


if __name__ == '__main__':
    # Harris_detect()
    # Fast_detect()
    SURF_detect()
    # ORB_detect()

    cv2.waitKey(0)
    cv2.destroyAllWindows()
