import colorsys
import os
import random

import cv2
import numpy as np

import random_color

print("Version of opencv:", cv2.__version__)

if __name__ == "__main__":

    # 要对比的两张图像路径
    image_path = "images"
    image_name1 = "gakki_eng_Page1.png"
    image_name2 = "gakki_eng_Page2.png"

    image1 = cv2.imread(os.path.join(image_path, image_name1))
    image2 = cv2.imread(os.path.join(image_path, image_name2))

    MATCH_THRESHOLD = 0.5 # 匹配判定阈值，值越小匹配成功的点越少

    # 设置sift算子
    sift = cv2.xfeatures2d.SIFT_create()

    # 获取并计算关键点的描述符
    kps1, des1 = sift.detectAndCompute(image1, None)
    kps2, des2 = sift.detectAndCompute(image2, None)
    print("Number of key points: Image1:{}, Image2:{}".format(len(kps1), len(kps2)))

    # 画出关键点
    image3 = cv2.drawKeypoints(image1, kps1, None, color=[0, 0, 255])
    image4 = cv2.drawKeypoints(image2, kps2, None, color=[255, 0, 0])
    cv2.imwrite("Keypoints.png", np.hstack([image3, image4]))

    # Knn计算匹配点
    bf = cv2.BFMatcher(cv2.NORM_L2)
    matches = bf.knnMatch(des1, des2, k=2)

    # 存放匹配成功的特征点
    good_kps1 = []
    good_kps2 = []
    good_match = [] # 存放配对的 keypoints
    good_match_distance = [] # 记录描述符距离
    max_x = 0
    max_y = 0
    points_translations = [] #每个元素是一个字典, ((x1, y1), (x2, y2)): (tran_x, tran_y)

    # 计算平移量
    for m, n in matches:
        tmp_dict = {}
        if m.distance < MATCH_THRESHOLD*n.distance:
            good_match_distance.append(m.distance)
            good_match.append(m)
            points1 = kps1[m.queryIdx].pt # image1 关键点坐标
            points2 = kps2[m.trainIdx].pt # image2 关键点坐标
            good_kps1.append(kps1[m.queryIdx])
            good_kps2.append(kps2[m.trainIdx])

            # 两个方向的平移量
            translation_X = kps2[m.trainIdx].pt[0] - kps1[m.queryIdx].pt[0]
            translation_Y = kps2[m.trainIdx].pt[1] - kps1[m.queryIdx].pt[1]

            # 计算最大平移量
            max_x = max(translation_X, max_x)
            max_y = max(translation_Y, max_y)
            
            tmp_dict[(points1, points2)] = (translation_X, translation_Y)
            points_translations.append(tmp_dict)
    print("Max translation X:{}, Y:{}".format(max_x, max_y))
    
    translations = []
    colors = random_color.ncolors(int(max_y//40)+2)
    random.shuffle(colors)
    neighbor = [-1, 0, 1]

    for translations_area_index in range(1, int(max_y//40)+2):
        t_color = colors[translations_area_index]
        t_lower = (translations_area_index-1) * 40
        t_upper = translations_area_index * 40
        t_lower -= 40
        t_upper += 40
        print("t upper:{}, t lower:{}".format(t_lower, t_upper))
        for points_trainslation in points_translations:
            for points, trainslation in points_trainslation.items():
                
                translation_X = trainslation[0]
                translation_Y = trainslation[1]
                translations.append((translation_X, translation_Y))
                # X 和 Y不能同步判断
                if translation_X >= t_lower and translation_X <= t_upper and translation_Y >= t_lower and translation_Y <= t_upper:
                    image1_translation = cv2.circle(image1, 
                                                   center=(int(points[0][0]), int(points[0][1])),
                                                   radius=4,
                                                   color=t_color,
                                                   thickness=2
                                                   )
                    image2_translation = cv2.circle(image2,
                                                    center=(int(points[1][0]), int(points[1][1])),
                                                    radius=4,
                                                    color=t_color,
                                                    thickness=2
                                                    )
    # print("translation:{}".format(translations))
    print("Kinds of translation:{}".format(len(set(translations))))
    try:
        cv2.imwrite("translation.png", np.hstack([image1_translation, image2_translation]))
    except Exception as err:
        print("Error:{}".format(err))

    print("Avg good match distance:{}".format(sum(good_match_distance)/len(good_match_distance)))
    
    # 画匹配成功的点
    image5 = cv2.drawKeypoints(image1, good_kps1, None, color=[0, 0, 255])
    image6 = cv2.drawKeypoints(image2, good_kps2, None, color=[255, 0, 0])
    cv2.imwrite("good_Keypoints.png", np.hstack([image5, image6]))
    
    # 画匹配线
    res = cv2.drawMatches(image1, kps1, 
                          image2, kps2, 
                          good_match, 
                          None, 
                          singlePointColor=[255, 0, 0],
                          matchColor=[0, 0, 255]
    )
    cv2.imwrite("matches.png", res)
