import numpy as np
import cv2# as cv

np.set_printoptions(suppress=True)

# https://www.bilibili.com/video/BV1s44y1M7bo

# MAX_FEATURES = 500  #default
# GOOD_MATCH_PERCENT = 0.15

def unpackOctave(keypoint):
    """Compute octave, layer, and scale from a keypoint
    """
    octave = keypoint.octave & 255
    layer = (keypoint.octave >> 8) & 255
    if octave >= 128:
        octave = octave | -128
    scale = 1 / np.float32(1 << octave) if octave >= 0 else np.float32(1 << -octave)
    return octave, layer, scale

# 计算函数
def calculate(a, b):
    while True:
        try:
            x_position = pow(a[0] - b[0], 2)  # 平方运算
            y_position = pow(a[1] - b[1], 2)
            result = np.sqrt(x_position + y_position)  # 相加并开方
        except Exception as e:  # 异常处理
            print(e)
            continue
        else:  # 输出结果
            print(f'A到B点的距离为: {result}')
            return result
        finally:
            print(f'\n{"-"*15}\ncalculate finished')

def stitchImage(imageA, imageB):
    #2.转换为灰度图，
    gray1 = cv2.cvtColor(imageA,cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(imageB,cv2.COLOR_BGR2GRAY)
    #方法1：擦除第1张图片的左半部分，第2张图片的右半部分，仍然超过极限数量
    # cv2.rectangle(gray1, (0, 0), (gray1.shape[1]//2,gray1.shape[0]), (0, 0, 0), thickness=-1)
    # cv2.rectangle(gray2, (gray2.shape[1]//2, 0), (gray2.shape[1],gray2.shape[0]), (0, 0, 0), thickness=-1)
    #方法2：先缩小，再放大，也就是模糊处理
    # gray1 = cv2.resize(gray1, None, fx=0.2, fy=0.2, interpolation=cv2.INTER_AREA)
    # gray1 = cv2.resize(gray1, None, fx=5, fy=5, interpolation=cv2.INTER_AREA)
    # gray2 = cv2.resize(gray2, None, fx=0.2, fy=0.2, interpolation=cv2.INTER_AREA)
    # gray2 = cv2.resize(gray2, None, fx=5, fy=5, interpolation=cv2.INTER_AREA)
    # 方法3：mask
    # mask1 = np.zeros(gray1.shape[0:2],dtype="uint8")
    # cv2.rectangle(mask1,(0,1400),(gray1.shape[1],gray1.shape[0]),255,-1)

    #3.构造检测对象，求解特征点和特征向量
    finder = cv2.SIFT_create(5000,6)
    #finder = cv2.ORB_create(5000)

    maskLeft1 = np.zeros(imageA.shape[0:2],dtype="uint8")   #maskLeft
    cv2.rectangle(maskLeft1,(int(imageA.shape[1]/3),0),(imageA.shape[1],imageA.shape[0]),255,-1)

    # maskTop = np.zeros(imageA.shape[0:2],dtype="uint8")   #maskTop
    # cv2.rectangle(maskTop,(0,10),(imageA.shape[1],imageA.shape[0]),255,-1)

    # grayBlur1 = cv2.blur(gray1,(3,3))
    # ret, maskBW1 = cv2.threshold(grayBlur1, 50, 255, cv2.THRESH_BINARY)
    maskAll1 = maskLeft1#cv2.bitwise_and(maskTop,maskLeft1)
    cv2.imwrite('out/maskAll1.jpg', maskAll1)

    kpsA, despA = finder.detectAndCompute(gray1, maskAll1)    #计算关键点和描述符

    #统计各层特征点个数
    counts1 = {}
    for kp in kpsA:
        kpr = unpackOctave(kp)
        counts1[kpr[1]] = counts1.get(kpr[1],0) + 1
    counts1 = sorted(counts1.items(),key=lambda x: x[0], reverse=False)
    print(counts1)

    #print(type(kp1)) list<cv2.KeyPoint>,numpy.ndarray
    # grayBlur2 = cv2.blur(gray2,(3,3))
    # ret, maskBW2 = cv2.threshold(grayBlur2, 50, 255, cv2.THRESH_BINARY)
    maskRight2 = np.zeros(imageB.shape[0:2],dtype="uint8")   #maskRight
    cv2.rectangle(maskRight2,(0,0),(int(imageB.shape[1]*2/3),imageB.shape[0]),255,-1)
    maskAll2 = maskRight2#cv2.bitwise_and(maskTop,maskRight2)
    cv2.imwrite('out/maskAll2.jpg', maskAll2)

    kpsB, despB = finder.detectAndCompute(gray2, maskAll2)

    #统计各层特征点个数
    counts2 = {}
    for kp in kpsB:
        kpr = unpackOctave(kp)
        counts2[kpr[1]] = counts2.get(kpr[1],0) + 1
    counts2 = sorted(counts2.items(),key=lambda x: x[0], reverse=False)
    print(counts2)

    # 4.构造BFMatcher对象，用暴力匹配的方法寻找匹配点
    bf = cv2.BFMatcher() #crossCheck=True knnMatch(k=1)

	#5.用KnnMatch方法匹配关键点
    matches = bf.knnMatch(despA,despB,2)

    #6.手动去掉不可靠匹配
    good_matches = []
    good_matches0 = []
    for m in matches:
        if len(m) == 2 and m[0].distance < 0.8 * m[1].distance: #d1 < 0.4*d2
            good_matches.append((m[0].queryIdx,m[0].trainIdx))
            good_matches0.append(m)
    print(len(good_matches))
    # # Draw top matches,一个点会绘制两条线
    imMatches = cv2.drawMatchesKnn(imageA, kpsA, imageB, kpsB, good_matches0[0:10], None)
    cv2.imwrite("out/matches.jpg", imMatches)

    #7.将可靠的匹配转换数据类型matches]
    kps1 = np.float32([kp.pt for kp in kpsA])   #求出所有关键点的坐标
    kps2 = np.float32([kp.pt for kp in kpsB])

    gkps1 = np.float32([kps1[a[0]] for a in good_matches])   #求出可靠匹配的x,y坐标
    gkps2 = np.float32([kps2[a[1]] for a in good_matches])

    #8.求解转换矩阵
    (M, mask) = cv2.findHomography(gkps2, gkps1, cv2.RANSAC,4.0)	#默认3.0，错误阈值
    #print(M)
    for line in M:
        for cell in line:
            print(cell,end="\t")
        print()
    #用M矩阵验证匹配点
    good_matches_check = []
    good_matches_check0 = []
    for m in matches:
        kpA = kpsA[m[0].queryIdx]
        kpB1 = kpsB[m[0].trainIdx]
        kpB2 = kpsB[m[1].trainIdx]
        ptB1 = cv2.transform(np.array([[[kpB1.pt[0], kpB1.pt[1], 1]]]),M)[0][0]
        ptB2 = cv2.transform(np.array([[[kpB2.pt[0], kpB2.pt[1], 1]]]),M)[0][0]
        if(abs(kpA.pt[0]-ptB1[0])<=16 and abs(kpA.pt[1]-ptB1[1])<=16): #abs(kpA.pt[0]-ptB1[0])<=16 and abs(kpA.pt[1]-ptB1[1]<=16) #calculate(kpA.pt,ptB1)<=16
            print(m[0].distance/m[1].distance,kpA.pt[0],kpA.pt[1],kpA.size,kpA.angle,kpA.response,unpackOctave(kpA),sep='\t')
            print("  ",kpB1.pt[0],kpB1.pt[1],kpB1.size,kpB1.angle,kpB1.response,unpackOctave(kpB1),sep='\t')
            good_matches_check.append((m[0].queryIdx,m[0].trainIdx))
            good_matches_check0.append(m[0])   #m中仍然包括两个，绘制时会包含两条线
        # if(calculate(kpA.pt,ptB2)<=16): #abs(kpA.pt[0]-ptB2[0])<=16 and abs(kpA.pt[1]-ptB2[1])<=16
        #     print(m[0].distance/m[1].distance,kpA.pt[0],kpA.pt[1],kpA.size,kpA.angle,kpA.response,unpackOctave(kpA),sep='\t')
        #     print("  ",kpB2.pt[0],kpB2.pt[1],kpB2.size,kpB2.angle,kpB2.response,unpackOctave(kpB1),sep='\t')
        #     good_matches_check.append((m[0].queryIdx,m[1].trainIdx))
        #     good_matches_check0.append(m[1])
    print(len(good_matches_check0))
    gkps1 = np.float32([kps1[a[0]] for a in good_matches_check])   #求出可靠匹配的x,y坐标
    gkps2 = np.float32([kps2[a[1]] for a in good_matches_check])
    #再次求取矩阵做验证
    (M, mask) = cv2.findHomography(gkps2, gkps1, cv2.RANSAC,4.0)	#默认3.0，错误阈值
    print(M)
    # # Draw top matches
    imMatches = cv2.drawMatches(imageA, kpsA, imageB, kpsB, good_matches_check0[0:10], None)
    cv2.imwrite("out/matches2.jpg", imMatches)

    #9.拼接图像
    widthNew = int((imageB.shape[1] + M[0][2])/M[0][0]) #//原先都在x0，B现在向右移动了一些
    print(widthNew)
    hightNew = imageA.shape[0] + int(abs(M[1][2]))

    result = cv2.warpPerspective(imageB, M, (widthNew, hightNew))
    #重合部分取中间值:x=(w+d)/2
    widthANew = int((imageA.shape[1]+M[0][2])/2)
    imageA2=imageA[:,0:widthANew]
    # 将img1的值赋给结果图像
    result[0:imageA2.shape[0], 0:imageA2.shape[1]] = imageA2

    return result


if __name__ == "__main__":
    # 1.读入图片
    img1 = cv2.imread(r"D:\data\CUGW\test2.new\IMG_5314.jpg")
    img2 = cv2.imread(r"D:\data\CUGW\test2.new\IMG_5315.jpg")

    # 进行拼接
    result = stitchImage(img1, img2)

    # 结果图逆时针旋转90度
    #resultRotate = opencv_utils.rotateAntiClockWise90(result)
    cv2.imwrite('out/stitched.jpg', result)
    cv2.imshow('result', result)

    # 原图与结果图一起展示，易于对比
    #resultMerge = opencv_utils.merge3Image(img1, img2, stitched)
    #cv.imshow('result', resultMerge)
    #cv.imwrite('resultMerge.png', resultMerge)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
