import cv2
import numpy as np
from matplotlib import pyplot as plt

def extract_sift_feature(img):
    #创建SIFT检测器
    sift = cv2.SIFT.create()
    #提取图像的特征点和描述子信息
    keypoints, descriptors = sift.detectAndCompute(img, None)
    return keypoints,descriptors

def show_img_sift_dot(img):
    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #提取图像的SIFT特征点信息
    keypoints, descriptors = extract_sift_feature(img)
    #在图像上绘制特征点的位置
    img1 = cv2.drawKeypoints(rgb_img, keypoints, img)
    plt.imshow(img1)
    plt.show()

def draw_match_image(img1,img2):
    rgb_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
    rgb_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)

    #提取图像的SIFT关键点信息
    keypoints1, descriptors1 = extract_sift_feature(img1)
    keypoints2, descriptors2 = extract_sift_feature(img2)

    #创建一个关键点匹配器,采用L1距离
    bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
    #关键点匹配,通过计算两张图像提取的描述子之间的距离
    matches = bf.match(descriptors1, descriptors2)
    #根据描述子之间的距离进行排序
    matches = sorted(matches, key=lambda x: x.distance)

    #只匹配前50个关键点
    img3 = cv2.drawMatches(rgb_img1, keypoints1, rgb_img2, keypoints2, matches[:50], rgb_img2, flags=2)
    plt.imshow(img3)
    plt.show()

def cal_SIFT_sim(img1,img2):
    #将图片转换为灰度图
    img1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    #提取图片的SIFT特征
    keypoints1,descriptors1 = extract_sift_feature(img1)
    keypoints2,descriptors2 = extract_sift_feature(img2)

    #创建一个匹配器
    bf = cv2.BFMatcher()
    #记录图1和图2的匹配的关键点
    matches1 = bf.knnMatch(descriptors1,descriptors2,k=2)
    top_results1 = []
    for m,n in matches1:
        if m.distance < 0.7 * n.distance:
            top_results1.append(m)
    #记录图2和图1匹配的关键点
    matches2 = bf.knnMatch(descriptors2,descriptors1,k=2)
    top_results2 = []
    for m,n in matches2:
        if m.distance < 0.7 * n.distance:
            top_results2.append(m)
    #从匹配的关键点中选择出有效的匹配
    #确保匹配的关键点信息在图1和图2以及图2和图1是一致的
    top_results = []
    for m1 in top_results1:
        m1_query_idx = m1.queryIdx
        m1_train_idx = m1.trainIdx

        for m2 in top_results2:
            m2_query_idx = m2.queryIdx
            m2_train_idx = m2.trainIdx

            if m1_query_idx == m2_train_idx and m1_train_idx == m2_query_idx:
                top_results.append(m1)

    #计算图像之间的相似度
    #通过计算两张图片之间的匹配的关键点的个数来计算相似度
    image_sim = len(top_results) / min(len(keypoints1),len(keypoints2))
    return image_sim

def extract_SIFT_vector(img,vector_size):
    gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    sift = cv2.SIFT.create()
    keypoints = sift.detect(gray_img, None)
    # 根据关键点的返回值进行排序,越大越好
    keypoints = sorted(keypoints, key=lambda x: -x.response)
    img_kps = keypoints[:vector_size]
    # 根据关键点来计算描述子向量
    kps, des = sift.compute(gray_img, img_kps)
    # 将向量展开为1维的
    vector = des.flatten()
    # 检查提取特征的长度,SIFT特征一个关键点的描述子是128维
    vector_len = vector_size * 128
    # 对于提取图片特征向量长度不够的使用0进行补充
    if vector.size < vector_len:
        vector = np.concatenate(vector, np.zeros(vector_len - vector.size))
    return vector


 
import scipy.spatial as T

#设置提取特征向量特征的个数
vector_size = 20
img_path1 = r"D:\Users\user\Pictures\new_dir\1.jpg"
img_path2 = r"D:\Users\user\Pictures\new_dir\3.jpg"
img1 = cv2.imread(img_path1)
img2 = cv2.imread(img_path2)
#提取图片的特征向量
img1_vector = extract_SIFT_vector(img1,vector_size).reshape(-1,128*vector_size)
img2_vector = extract_SIFT_vector(img2,vector_size).reshape(-1,128*vector_size)
#计算图片之间的相似度
sim = T.distance.cdist(img1_vector, img2_vector, 'cosine')
print("current vector=",vector_size,",sim=",sim)
