import cv2
import numpy as np
from PIL import Image
class FeatureMatcher:
    '''
    用于特征检测、描述、匹配和变换矩阵计算
    '''
    
    def __init__(self, match_feature_num=2000, scaleFactor=1.2, nlevels=8,image_resolution=224):
        '''
        初始化方法
        参数:
            match_feature_num (int): 要检测的特征点数量，默认值为2000
            scaleFactor (float): 图像金字塔的比例因子，默认值为1.2
            nlevels (int): 金字塔的层数，默认值为8
        '''
        # 创建ORB特征检测器对象，设置参数
        self.orb = cv2.ORB_create(
            nfeatures=match_feature_num,
            scaleFactor=scaleFactor,
            nlevels=nlevels
        )
        # 初始化变量
        self.base_image = None
        self.base_keypoints = None
        self.base_descriptors = None
        self.image_resolution = image_resolution
    def _set_base_image(self, base_image_path):
        '''
        设置基底图像
        输入:
            base_image_path (str): 基底图像路径
        '''
        # 加载基底图像
        base_image = Image.open(base_image_path).resize((self.image_resolution,self.image_resolution))
        
        self.base_image = np.array(base_image)
        # 检测基底图像的特征点和描述符
        self.base_keypoints, self.base_descriptors = self.detect_features(self.base_image)
        
    def detect_features(self, image : np.ndarray):
        '''
        检测图像特征点和描述符
        输入:
            image (array): 输入的图像
        输出:
            keypoints (list[cv2.KeyPoint]): 特征点列表
            descriptors (array): 特征描述符
        '''
        # 检测特征点并计算描述符
        keypoints, descriptors = self.orb.detectAndCompute(image, None)
        return keypoints, descriptors
    
    def match_features(self, descriptors):
        '''
        匹配特征点描述符
        输入:
            descriptors (array): 当前图像的描述符
        输出:
            matches (list[cv2.DMatch]): 初步匹配点对列表
        '''
        # 创建BFMatcher对象，使用汉明距离
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
        # 使用KNN算法匹配特征点，k=2表示每个特征点找两个最佳匹配
        matches = bf.knnMatch(self.base_descriptors, descriptors, k=2)
        return matches
    
    def visual_keypoints(self, image_path, keypoints, save_path):
        '''
        在图像上可视化特征点，并把结果保存在save_path
        输入:
            image_path (str): 输入图像的路径
            keypoints (list[cv2.KeyPoint]): 特征点列表
            save_path (str): 保存可视化结果的路径
        '''
        # 加载图像
        image = cv2.imread(image_path)
        if image is None:
            print(f"无法加载图像：{image_path}")
            return
        # 绘制特征点
        
        # 首先在左上角用白色字体，写上特征点的数量
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(image, f"Keypoints: {len(keypoints)}", (10, 30), font, 1, (255, 255, 255), 2)
        # 然后在图像中绘制特征点    
        image_with_keypoints = cv2.drawKeypoints(
            image, keypoints, None, color=(0, 255, 0),
            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
        )
        # 保存结果图像
        cv2.imwrite(save_path, image_with_keypoints)
        print(f"特征点可视化结果已保存到：{save_path}")
        
    def visual_matches(self, image_path, keypoints, good_matches, save_path):
        '''
        可视化基底图像和当前图像之间的匹配结果，并保存到save_path
        输入:
            image_path (str): 当前图像的路径
            keypoints (list[cv2.KeyPoint]): 当前图像的特征点列表
            descriptors (array): 当前图像的描述符
            matches (list[list[cv2.DMatch]]): 初步匹配点对列表
            good_matches (list[cv2.DMatch]): 过滤后的匹配点对列表
            save_path (str): 保存可视化结果的路径
        '''
        # 加载当前图像
        image = cv2.imread(image_path)
        if image is None:
            print(f"无法加载图像：{image_path}")
            return
        # 绘制匹配结果
        # 使用过滤后的匹配进行可视化
        match_img = cv2.drawMatches(
            self.base_image, self.base_keypoints,
            image, keypoints,
            good_matches, None,
            flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS
        )
        # 保存匹配结果图像
        cv2.imwrite(save_path, match_img)
        print(f"匹配结果可视化已保存到：{save_path}")
    
    def filter_matches(self, matches):
        '''
        使用Lowe's ratio test过滤错误匹配
        输入:
            matches (list[list[cv2.DMatch]]): 初步匹配点对列表
        输出:
            good_matches (list[cv2.DMatch]): 过滤后的匹配点对
        '''
        good_matches = []
        # Lowe's ratio test
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        return good_matches
    
    def calculate_transform_matrix(self, keypoints,  matches):
        '''
        计算变换矩阵
        输入:
            keypoints1 (list[cv2.KeyPoint]): 基底图像的特征点
            keypoints2 (list[cv2.KeyPoint]): 当前图像的特征点
            matches (list[cv2.DMatch]): 匹配点对列表
        输出:
            transform_matrix (array): 转移矩阵
        '''
        if len(matches) >= 4:
            # 从匹配中提取匹配点坐标
            src_pts = np.float32([self.base_keypoints[m.queryIdx].pt for m in matches]).reshape(-1,1,2)
            dst_pts = np.float32([keypoints[m.trainIdx].pt for m in matches]).reshape(-1,1,2)
            # 使用RANSAC算法计算变换矩阵
            transform_matrix, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
            return transform_matrix
        else:
            print("匹配点不足，无法计算变换矩阵")
            return None
        
if __name__ == '__main__':
    import os,json,random
    # 创建 FeatureMatcher 对象
    feature_matcher = FeatureMatcher()
    key_point_number_list = []
    # 获取配置
    data_path='../Dataset/infantImages'

    # 读取数据字典
    with open(os.path.join(data_path, 'annotations.json'), 'r') as f:
        data_dict = json.load(f)
    
    # 随机选择 300 个图片名称
    image_number = 300
    sample_image_list = random.sample(list(data_dict.keys()), image_number)
    
    # 处理每张图片，获取特征点数量
    for image_name in sample_image_list:
        with open(os.path.join(data_path, data_dict[image_name]['vascular_path']), 'rb') as f:
            vascular = Image.open(f).resize((feature_matcher.image_resolution, feature_matcher.image_resolution))
            vascular = np.array(vascular)
        
        # 检测特征点数量
        keypoints, _ = feature_matcher.detect_features(vascular)
        key_point_number_list.append(len(keypoints))
    
    # 计算统计数据
    min_keypoints = np.min(key_point_number_list)
    max_keypoints = np.max(key_point_number_list)
    mean_keypoints = np.mean(key_point_number_list)
    median_keypoints = np.median(key_point_number_list)
    std_keypoints = np.std(key_point_number_list)

    # 输出结果
    print(f"Minimum keypoints: {min_keypoints}")
    print(f"Maximum keypoints: {max_keypoints}")
    print(f"Mean keypoints: {mean_keypoints}")
    print(f"Median keypoints: {median_keypoints}")
    print(f"Standard deviation of keypoints: {std_keypoints}")