from datetime import datetime

import cv2
import numpy as np


def timeit(method):
    """装饰器：测量函数执行时间"""

    def timed(*args, **kw):
        start_time = datetime.now()
        result = method(*args, **kw)
        end_time = datetime.now()
        elapsed_time_ms = (end_time - start_time).total_seconds() * 1000  # 转换为毫秒
        print(f"{method.__name__} 耗时: {elapsed_time_ms:.0f} 毫秒")
        return result

    return timed


@timeit
def orbinit():
    # 创建ORB检测器
    orb = cv2.ORB_create(nfeatures=500, scaleFactor=1.2, nlevels=8, edgeThreshold=31, firstLevel=0, WTA_K=2,
                         scoreType=cv2.ORB_HARRIS_SCORE, patchSize=31, fastThreshold=20)
    return orb


@timeit
def siftinit():
    # 创建SURF对象
    sift = cv2.SIFT_create(nfeatures=500, nOctaveLayers=4, contrastThreshold=0.03, edgeThreshold=15, sigma=1.7)
    return sift


@timeit
def detect(image1, image2, detection):
    # 对两幅图像找到关键点和描述符
    keypoints1, descriptors1 = detection.detectAndCompute(image1, None)

    # keypoints2, descriptors2 = detection.detectAndCompute(image2, None)

    # 仅计算这些关键点的描述符
    keypoints2, descriptors2 = detection.compute(image2, keypoints1)

    return keypoints1, descriptors1, keypoints2, descriptors2


@timeit
def match(keypoints1, descriptors1, keypoints2, descriptors2, detection):
    if type(detection).__name__ == "ORB":
        FLANN_INDEX_LSH = 6
        index_params = dict(algorithm=FLANN_INDEX_LSH,
                            table_number=6,  # 12
                            key_size=12,  # 20
                            multi_probe_level=1)  # 2
        search_params = dict(checks=50)  # or pass empty dictionary
    elif type(detection).__name__ == "SIFT":
        # 创建FLANN匹配器
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)


    flann = cv2.FlannBasedMatcher(index_params, search_params)
    # 使用KNN算法匹配描述符
    matches = flann.knnMatch(descriptors1, descriptors2, k=2)
    # 仅保留好的匹配项（Lowe's ratio test）
    good_matches = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good_matches.append(m)
    return good_matches


@timeit
def process():
    # 读取两幅图像
    image1 = cv2.imread('data/result/yolo/1.png')
    image2 = cv2.imread('data/result/yolo/3.png')
    image1 = cv2.resize(image1, (800, 800))
    image2 = cv2.resize(image2, (800, 800))

    # detection = orbinit()
    detection = siftinit()
    keypoints1, descriptors1, keypoints2, descriptors2 = detect(image1, image2, detection)
    matches = match(keypoints1, descriptors1, keypoints2, descriptors2,detection)

    # 绘制前N个匹配项
    N = 30  # 选择匹配好的前N个点
    matched_image = cv2.drawMatches(image1, keypoints1, image2, keypoints2, matches[:N], None, flags=2)

    matched_image = cv2.resize(matched_image, (1600, 800))

    # 显示匹配结果
    cv2.imshow('Matches', matched_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()


if __name__ == "__main__":
    process()
    # orb = cv2.ORB_create(nfeatures=500, scaleFactor=1.2, nlevels=8, edgeThreshold=31, firstLevel=0, WTA_K=2,
    #                      scoreType=cv2.ORB_HARRIS_SCORE, patchSize=31, fastThreshold=20)
    # # surf = cv2.xfeatures2d.SURF_create(400)  # Hessian Threshold to 400
    # sift = cv2.SIFT_create()
    # print(type(sift).__name__)
