import numpy as np
import cv2
import math

from numpy import ma
MIN_MATCH_COUNT = 10
def getKeyPoints(query, train, output_name):

    sift = cv2.SIFT_create()

    img_query = cv2.imread(query)
    gray_q = cv2.cvtColor(img_query, cv2.COLOR_BGR2GRAY)
    # cv2.imshow("shwo", img_query)
    # cv2.waitKey()
    kps_q, des_q = sift.detectAndCompute(gray_q, None)
    print((des_q.dtype))

    img_train = cv2.imread(train)
    gray_t = cv2.cvtColor(img_train, cv2.COLOR_BGR2GRAY)
    kps_t, des_t = sift.detectAndCompute(gray_t, None)
    
    bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)

    # img=cv2.drawKeypoints(gray_q, kps_q, img_query)#绘制关键点
    # cv2.imwrite("./SIFT_With_Ku/keyPoints_rawImage_rotate.jpg", img)
    matches = bf.match(des_q, des_t)
    matches = sorted(matches, key = lambda x:x.distance)

    img3 = cv2.drawMatches(img_query, kps_q, img_train, kps_t, matches[:20], None, flags=2)
    cv2.imshow("show",img3)
    cv2.waitKey()
    cv2.imwrite(output_name, img3)

def imagestitching(query, train, output_name):
    sift = cv2.SIFT_create()

    img_query = cv2.imread(query)
    gray_q = cv2.cvtColor(img_query, cv2.COLOR_BGR2GRAY)
    # cv2.imshow("shwo", img_query)
    # cv2.waitKey()
    kps_q, des_q = sift.detectAndCompute(gray_q, None)
    print((des_q.dtype))

    img_train = cv2.imread(train)
    gray_t = cv2.cvtColor(img_train, cv2.COLOR_BGR2GRAY)
    kps_t, des_t = sift.detectAndCompute(gray_t, None)

    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des_q, des_t, k=2)

    # Lowe's ratio test
    good = []
    for m, n in matches:
        if m.distance < 0.7 * n.distance:
            good.append(m)

    if len(good) > MIN_MATCH_COUNT:
        # Estimate homography between template and scene
        src_pts = np.float32([ kps_q[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([ kps_t[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)

        M = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)[0]

        transformedImg = cv2.warpPerspective(img_query, M, (700, 400))
        for i in range(img_train.shape[0]):
            for j in range(img_train.shape[1]):
                transformedImg[i][j] = img_train[i][j]
        cv2.imshow("show",transformedImg)
        cv2.waitKey()
        cv2.imwrite(output_name, transformedImg)

def visionConversion():
    img1 = cv2.imread('./SIFT_With_Ku/down.jpg')
    img2 = cv2.imread('./SIFT_With_Ku/up.jpg')
    rows,cols,ch = img1.shape
    # pts1 = np.float32([[243,107],[247,282],[415,12],[431,383]])
    # pts2 = np.float32([[35,20],[38,364],[513,15],[518,369]])
    pts1 = np.float32([[107,243],[282,247],[12,415],[383,431]])
    pts2 = np.float32([[20,35],[364,38],[15,513],[369,518]])
    M = cv2.getPerspectiveTransform(pts1,pts2)
    dst = cv2.warpPerspective(img1,M,(600,700))
    # cv2.imshow("show", dst)
    # cv2.waitKey()
    cv2.imwrite("./SIFT_With_Ku/visionConversion.jpg", dst)
# imagestitching("./SIFT_With_Ku/houser.png", "./SIFT_With_Ku/housel.png", "./SIFT_With_Ku/houseStitching.jpg")

visionConversion()