import numpy as np
import cv2 as cv
# 导入自己写的一个工具库
#import sys
#sys.path.append("./")
#import opencv_utils
# https://blog.csdn.net/Apple_Coco/article/details/106480669/

np.set_printoptions(suppress=True)

MAX_FEATURES = 5000  #default
GOOD_MATCH_PERCENT = 0.2

def stitchImage(img1, img2):
    # topAdd = int(len(img2)*0.4)
    # img2 = cv.copyMakeBorder(img2,topAdd,0,320,320,cv.BORDER_CONSTANT) #拓展img2边界
    # img1 = cv.copyMakeBorder(img1,topAdd,0,0,0,cv.BORDER_CONSTANT) #拓展img1边界,要同时拓展，否则变换矩阵会出错
    # cv.imwrite('img2.png', img2)
    #2.转换为灰度图，todo
    img_gray1 = cv.cvtColor(img1,cv.COLOR_BGR2GRAY)
    img_gray2 = cv.cvtColor(img2,cv.COLOR_BGR2GRAY)
    #左半部分抹掉
    # cv.rectangle(img_gray1, (0, 0), (img_gray1.shape[1]//2,img_gray1.shape[0]), (0, 0, 0), thickness=-1)
    # cv.rectangle(img_gray2, (img_gray2.shape[1]//2, 0), (img_gray2.shape[1],img_gray2.shape[0]), (0, 0, 0), thickness=-1)
    #抹掉上半部分500像素
    #cv.rectangle(img_gray1, (0, 0), (img_gray1.shape[1],500), (0, 0, 0), thickness=-1)
    #cv.rectangle(img_gray2, (0, 0), (img_gray2.shape[1],500), (0, 0, 0), thickness=-1)
    #cv.imwrite("img_gray1.jpg", img_gray1)
    #cv.imwrite("img_gray2.jpg", img_gray2)
	
    #3.构造检测对象，求解特征点和特征向量
    orb = cv.ORB_create(MAX_FEATURES)
    mask1 = np.zeros(img1.shape[0:2],dtype="uint8")
    cv.rectangle(mask1,(int(img1.shape[1]/3),1400),(img1.shape[1],img1.shape[0]),255,-1)
    cv.imwrite("img_out/mask1.jpg",mask1)
    kp1, des1 = orb.detectAndCompute(img_gray1, mask=mask1) #print(type(kp1)) list<cv2.KeyPoint>,numpy.ndarray
    mask2 = np.zeros(img1.shape[0:2],dtype="uint8")
    cv.rectangle(mask2,(0,1300),(int(img1.shape[1]*2/3),img1.shape[0]),255,-1)
    cv.imwrite("img_out/mask2.jpg",mask2)
    kp2, des2 = orb.detectAndCompute(img_gray2, mask=mask2)

    # 4.构造BFMatcher对象，用暴力匹配的方法寻找匹配点
    # Match features.
    #matcher = cv.DescriptorMatcher_create(cv.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING,True) #
    matcher = cv.BFMatcher(cv.NORM_HAMMING, True)   #交叉验证
    #matcher = cv.DescriptorMatcher_create(cv.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
    #5.用match方法匹配关键点
    matches = matcher.match(des1, des2)

    #6.手动去掉不可靠匹配
    # Sort matches by score
    matches = sorted(matches,key=lambda x: x.distance, reverse=False)
    #统计各层特征点个数
    counts2 = {}
    for match in matches:
        #kp = kp1[match.queryIdx]
        kp = kp2[match.trainIdx]
        counts2[kp.octave] = counts2.get(kp.octave,0) + 1
    counts2 = sorted(counts2.items(),key=lambda x: x[0], reverse=False)
    print(counts2)   #{2: 224, 1: 280, 0: 391, 7: 64, 3: 178, 4: 136, 5: 101, 6: 93}
                    #{2: 214, 1: 301, 3: 164, 5: 85, 0: 415, 6: 82, 7: 76, 4: 130}
    #Remove not so good matches
    numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
    good_matches = matches[:numGoodMatches]
    #good_matches = [match for match in matches if kp1[match.queryIdx].octave==kp2[match.trainIdx].octave]   #by layer
    print(len(good_matches))

    # Draw top matches
    imMatches = cv.drawMatches(img1, kp1, img2, kp2, good_matches, None)
    cv.imwrite("out/matches.jpg", imMatches)

    #7.将可靠的匹配转换数据类型
    # Extract location of good matches
    points1 = np.zeros((len(good_matches), 2), dtype=np.float32)
    points2 = np.zeros((len(good_matches), 2), dtype=np.float32)

    for i, match in enumerate(good_matches):
        points1[i, :] = kp1[match.queryIdx].pt
        points2[i, :] = kp2[match.trainIdx].pt

    #8.求解转换矩阵
    # 通过两个图像的特征点计算变换矩阵，获得变换矩阵和掩模
    (M, mask) = cv.findHomography(points2, points1, cv.RANSAC,4.0)	#默认3.0，错误阈值
    print(M)
    #print(mask)
    #9.拼接图像
    # 对 img2 透视变换，M 是变换矩阵， 变换后的大小是 (img1.w + img2.w, img2.h)
    widthNew = img1.shape[1] + img2.shape[1] - int(M[0][2]/M[2][2])   #int((img1.shape[1] + img2.shape[1])*0.9)
    result = cv.warpPerspective(img2, M, (widthNew, img2.shape[0]))
    #cv.imwrite('warp.png', result)
    # 将img1的值赋给结果图像
    #result[topAdd:img1.shape[0], 0:img1.shape[1]] = img1[topAdd:img1.shape[0]]
    #result[0:img1.shape[0], 0:int(img1.shape[1]*0.9)] = img1[:,:int(img1.shape[1]*0.9)]
    widthDiff = (img1.shape[1]-M[0][2])/2
    img11=img1[:,0:int(img1.shape[1]-widthDiff)]
    result[0:img11.shape[0], 0:img11.shape[1]] = img11

    return result


if __name__ == "__main__":
    # 1.读入图片
    img1 = cv.imread(r"D:\data\CUGW\test4.bak\g2\IMG_5354.JPG")
    img2 = cv.imread(r"D:\data\CUGW\test4.bak\g2\IMG_5355.JPG")
    
    # 顺时针旋转90度
    #img1_90 = opencv_utils.rotateClockWise90(img1)
    #img2_90 = opencv_utils.rotateClockWise90(img2)

    # 进行拼接
    result = stitchImage(img1, img2)

    # 结果图逆时针旋转90度
    #resultRotate = opencv_utils.rotateAntiClockWise90(result)
    cv.imwrite(r"out/stitched.jpg", result)
    cv.imshow('result', result)

    # 原图与结果图一起展示，易于对比
    #resultMerge = opencv_utils.merge3Image(img1, img2, stitched)
    #cv.imshow('result', resultMerge)
    #cv.imwrite('resultMerge.png', resultMerge)

    cv.waitKey(0)
    cv.destroyAllWindows()
