import time
import cv2
import numpy as np

class Imgproc:
    def __init__(self):
        self.prev_frame_time = 0
        self.curr_frame_time = 0
        self.fps = 0
        
        # 初始化特征检测器
        self.sift = None
        self.orb = None
        self.surf = None
        self.fast = None
        self.brisk = None
        self.akaze = None
        self.kaze = None
        self.blob = None
        self.harris = None
        self.gftt = None
        self.current_detector = None
        
        # 连续检测相关变量
        self.prev_keypoints = None
        self.prev_descriptors = None
        self.prev_frame = None
        self.feature_velocity = 0.0  # 特征点速度（米/秒）
        self.velocity_history = []   # 速度历史记录
        self.max_history_size = 10   # 最大历史记录数量
        self.frame_rate = 30.0       # 默认帧率（帧/秒）
        self.max_velocity = 0.0      # 最大速度（米/秒）
        self.max_velocity_timestamp = 0  # 最大速度时间戳
        
        # 速度平滑滤波相关变量
        self.velocity_filter_size = 5  # 速度滤波器大小
        self.velocity_filter_buffer = []  # 速度滤波缓冲区
        self.smoothed_velocity = 0.0  # 平滑后的速度
        
        # 光流显示相关变量
        self.prev_matches = None  # 前一帧的匹配信息
        
        # 像素大小设置（2微米 = 2e-6米）
        self.pixel_size = 2e-6  # 米/像素
    
    def set_pixel_size(self, pixel_size_meters):
        """
        设置像素大小
        
        Args:
            pixel_size_meters (float): 像素大小（米）
        """
        self.pixel_size = pixel_size_meters
        print(f"像素大小设置为: {pixel_size_meters} 米")
    
    def get_pixel_size(self):
        """
        获取当前像素大小
        
        Returns:
            float: 像素大小（米）
        """
        return self.pixel_size
    
    def set_frame_rate(self, frame_rate):
        """
        设置帧率
        
        Args:
            frame_rate (float): 帧率（帧/秒）
        """
        self.frame_rate = frame_rate
        print(f"帧率设置为: {frame_rate} 帧/秒")
    
    def get_frame_rate(self):
        """
        获取当前帧率
        
        Returns:
            float: 帧率（帧/秒）
        """
        return self.frame_rate
    
    def get_max_velocity(self):
        """
        获取最大速度
        
        Returns:
            tuple: (max_velocity, timestamp) 最大速度（米/秒）和时间戳
        """
        return self.max_velocity, self.max_velocity_timestamp
    
    def reset_max_velocity(self):
        """
        重置最大速度记录
        """
        self.max_velocity = 0.0
        self.max_velocity_timestamp = 0
        print("最大速度记录已重置")
    
    def smooth_velocity(self, current_velocity):
        """
        对速度进行平滑滤波
        
        Args:
            current_velocity (float): 当前速度值
            
        Returns:
            float: 平滑后的速度值
        """
        # 将当前速度添加到滤波缓冲区
        self.velocity_filter_buffer.append(current_velocity)
        
        # 保持缓冲区大小
        if len(self.velocity_filter_buffer) > self.velocity_filter_size:
            self.velocity_filter_buffer.pop(0)
        
        # 如果缓冲区太小，直接返回当前值
        if len(self.velocity_filter_buffer) < 3:
            return current_velocity
        
        # 使用中位数滤波进行平滑
        velocity_array = np.array(self.velocity_filter_buffer)
        
        # 移除异常值
        q1 = np.percentile(velocity_array, 25)
        q3 = np.percentile(velocity_array, 75)
        iqr = q3 - q1
        lower_bound = q1 - 1.5 * iqr
        upper_bound = q3 + 1.5 * iqr
        
        filtered_velocities = velocity_array[(velocity_array >= lower_bound) & (velocity_array <= upper_bound)]
        
        if len(filtered_velocities) > 0:
            # 使用加权平均，最近的帧权重更大
            weights = np.linspace(0.5, 1.0, len(filtered_velocities))
            weights = weights / np.sum(weights)
            smoothed_velocity = np.average(filtered_velocities, weights=weights)
        else:
            smoothed_velocity = current_velocity
        
        return smoothed_velocity
        
    def calculate_fps(self):
        """
        计算当前帧率
        :return: 当前帧率值
        """
        self.curr_frame_time = time.time()
        if self.prev_frame_time == 0:
            self.prev_frame_time = self.curr_frame_time
            return 0
            
        # 计算帧率
        time_diff = self.curr_frame_time - self.prev_frame_time
        if time_diff > 0:
            self.fps = 1.0 / time_diff
            
        self.prev_frame_time = self.curr_frame_time
        return self.fps
    
    def calculate_feature_velocity(self, current_keypoints, prev_keypoints, calibration_factor=None):
        """
        计算特征点速度（改进版）
        
        Args:
            current_keypoints: 当前帧特征点
            prev_keypoints: 前一帧特征点
            calibration_factor: 标定系数（米/像素），如果为None则使用内部pixel_size
            
        Returns:
            tuple: (avg_velocity, match_count) 平均特征点速度（米/秒）和匹配点数
        """
        if current_keypoints is None or prev_keypoints is None:
            return 0.0, 0
        
        if len(current_keypoints) == 0 or len(prev_keypoints) == 0:
            return 0.0, 0
        
        if self.prev_descriptors is None:
            return 0.0, 0
        
        try:
            # 计算特征点匹配
            if len(current_keypoints) > 0 and len(prev_keypoints) > 0:
                # 使用FLANN匹配器进行特征点匹配
                if (self.current_detector == self.orb or 
                    self.current_detector == self.brisk or 
                    self.current_detector == self.akaze):
                    # 使用汉明距离匹配器（适用于二进制描述符）
                    # 直接使用crossCheck=False，避免兼容性问题
                    matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
                elif (self.current_detector == 'HARRIS' or 
                      self.current_detector == 'GFTT' or 
                      self.current_detector == self.fast or 
                      self.current_detector == self.blob):
                    # 这些检测器不产生描述符，无法进行匹配
                    return 0.0, 0
                else:
                    # 使用BFMatcher作为默认选择，更稳定
                    matcher = cv2.BFMatcher(cv2.NORM_L2)
                
                # 注意：current_descriptors应该从detect_features_continuous函数传入
                # 这里我们无法重新计算，因为需要原始图像
                # 所以我们需要修改调用方式
                print("警告：无法在calculate_feature_velocity中重新计算描述符")
                return 0.0, 0
                
                if current_descriptors is not None and self.prev_descriptors is not None:
                    # 检查描述符的有效性
                    if len(current_descriptors) == 0 or len(self.prev_descriptors) == 0:
                        return 0.0, 0
                    
                    # 确保描述符类型匹配
                    if current_descriptors.dtype != self.prev_descriptors.dtype:
                        return 0.0, 0
                    
                    # 检查描述符维度
                    if len(current_descriptors.shape) != 2 or len(self.prev_descriptors.shape) != 2:
                        return 0.0, 0
                    
                    # 确保描述符数量合理
                    if len(current_descriptors) < 1 or len(self.prev_descriptors) < 1:
                        return 0.0, 0
                    
                    # 进行匹配
                    try:
                        # 检查描述符数量，确保k值合理
                        min_descriptors = min(len(current_descriptors), len(self.prev_descriptors))
                        if min_descriptors < 2:
                            # 如果描述符太少，使用k=1
                            matches = matcher.knnMatch(current_descriptors, self.prev_descriptors, k=1)
                            # 将单匹配转换为双匹配格式
                            matches = [[match] for match in matches if match]
                        else:
                            # 正常情况使用k=2
                            matches = matcher.knnMatch(current_descriptors, self.prev_descriptors, k=2)
                    except cv2.error as e:
                        print(f"特征匹配错误: {e}")
                        return 0.0, 0
                    
                    # 应用更严格的比率测试
                    good_matches = []
                    try:
                        for match_pair in matches:
                            if len(match_pair) == 2:
                                m, n = match_pair
                                # 更严格的比率测试，提高匹配质量
                                if m.distance < 0.6 * n.distance:
                                    good_matches.append(m)
                            elif len(match_pair) == 1:
                                # 处理k=1的情况，直接接受匹配
                                m = match_pair[0]
                                good_matches.append(m)
                        
                        # 如果匹配点太少，放宽条件
                        if len(good_matches) < 5:
                            good_matches = []
                            for match_pair in matches:
                                if len(match_pair) == 2:
                                    m, n = match_pair
                                    if m.distance < 0.75 * n.distance:
                                        good_matches.append(m)
                                elif len(match_pair) == 1:
                                    # 处理k=1的情况
                                    m = match_pair[0]
                                    good_matches.append(m)
                    except Exception as e:
                        print(f"比率测试错误: {e}")
                        good_matches = []
                    
                    # 保存匹配信息用于光流显示
                    self.prev_matches = good_matches
                    
                    # 计算匹配点之间的距离和方向
                    velocities = []
                    valid_matches = 0
                    
                    for match in good_matches:
                        if match.queryIdx < len(current_keypoints) and match.trainIdx < len(prev_keypoints):
                            current_pt = current_keypoints[match.queryIdx].pt
                            prev_pt = prev_keypoints[match.trainIdx].pt
                            
                            # 计算欧几里得距离（像素）
                            pixel_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                            
                            # 过滤异常大的位移（可能是错误匹配）
                            max_pixel_displacement = 50  # 最大像素位移
                            if pixel_distance > max_pixel_displacement:
                                continue
                            
                            # 转换为米，优先使用传入的标定系数
                            if calibration_factor is not None:
                                distance = pixel_distance * calibration_factor
                            else:
                                distance = pixel_distance * self.pixel_size
                            
                            # 计算瞬时速度（米/秒）
                            velocity = distance * self.frame_rate
                            velocities.append(velocity)
                            valid_matches += 1
                    
                    if valid_matches >= 3:  # 至少需要3个有效匹配
                        # 使用中位数而不是平均值，减少异常值影响
                        velocities = np.array(velocities)
                        
                        # 移除异常值（使用IQR方法）
                        q1 = np.percentile(velocities, 25)
                        q3 = np.percentile(velocities, 75)
                        iqr = q3 - q1
                        lower_bound = q1 - 1.5 * iqr
                        upper_bound = q3 + 1.5 * iqr
                        
                        filtered_velocities = velocities[(velocities >= lower_bound) & (velocities <= upper_bound)]
                        
                        if len(filtered_velocities) > 0:
                            # 计算平均速度
                            avg_velocity = np.mean(filtered_velocities)
                            
                            # 对速度进行平滑滤波
                            smoothed_avg_velocity = self.smooth_velocity(avg_velocity)
                            
                            # 更新速度历史
                            self.velocity_history.append(smoothed_avg_velocity)
                            if len(self.velocity_history) > self.max_history_size:
                                self.velocity_history.pop(0)
                            
                            return smoothed_avg_velocity, valid_matches
                    
                    # 如果没有足够的有效匹配，返回0
                    return 0.0, valid_matches
            
            return 0.0, 0
            
        except Exception as e:
            print(f"速度计算错误: {e}")
            return 0.0, 0
    
    def calculate_feature_velocity_with_descriptors(self, current_keypoints, current_descriptors, prev_keypoints, prev_descriptors, calibration_factor=None):
        """
        使用描述符计算特征点速度（修复版）
        
        Args:
            current_keypoints: 当前帧特征点
            current_descriptors: 当前帧描述符
            prev_keypoints: 前一帧特征点
            prev_descriptors: 前一帧描述符
            calibration_factor: 标定系数（米/像素），如果为None则使用内部pixel_size
            
        Returns:
            tuple: (avg_velocity, match_count) 平均特征点速度（米/秒）和匹配点数
        """
        if (current_keypoints is None or prev_keypoints is None or 
            current_descriptors is None or prev_descriptors is None):
            return 0.0, 0
        
        if (len(current_keypoints) == 0 or len(prev_keypoints) == 0 or
            len(current_descriptors) == 0 or len(prev_descriptors) == 0):
            return 0.0, 0
        
        try:
            # 根据检测器类型选择匹配器
            if (self.current_detector == self.orb or 
                self.current_detector == self.brisk or 
                self.current_detector == self.akaze):
                # 使用汉明距离匹配器（适用于二进制描述符）
                matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
            elif (self.current_detector == 'HARRIS' or 
                  self.current_detector == 'GFTT' or 
                  self.current_detector == self.fast or 
                  self.current_detector == self.blob):
                # 这些检测器不产生描述符，无法进行匹配
                return 0.0, 0
            else:
                # 使用BFMatcher作为默认选择，更稳定
                matcher = cv2.BFMatcher(cv2.NORM_L2)
            
            # 检查描述符的有效性
            if len(current_descriptors) == 0 or len(prev_descriptors) == 0:
                return 0.0, 0
            
            # 确保描述符类型匹配
            if current_descriptors.dtype != prev_descriptors.dtype:
                return 0.0, 0
            
            # 检查描述符维度
            if len(current_descriptors.shape) != 2 or len(prev_descriptors.shape) != 2:
                return 0.0, 0
            
            # 确保描述符数量合理
            if len(current_descriptors) < 1 or len(prev_descriptors) < 1:
                return 0.0, 0
            
            # 进行匹配
            try:
                # 检查描述符数量，确保k值合理
                min_descriptors = min(len(current_descriptors), len(prev_descriptors))
                if min_descriptors < 2:
                    # 如果描述符太少，使用k=1
                    matches = matcher.knnMatch(current_descriptors, prev_descriptors, k=1)
                    # 将单匹配转换为双匹配格式
                    matches = [[match] for match in matches if match]
                else:
                    # 正常情况使用k=2
                    matches = matcher.knnMatch(current_descriptors, prev_descriptors, k=2)
            except cv2.error as e:
                print(f"特征匹配错误: {e}")
                return 0.0, 0
            
            # 应用更严格的匹配条件，包括描述符距离和空间距离约束
            good_matches = []
            try:
                for match_pair in matches:
                    if len(match_pair) == 2:
                        m, n = match_pair
                        # 严格的比率测试
                        if m.distance < 0.6 * n.distance:
                            # 添加空间距离约束
                            if m.queryIdx < len(current_keypoints) and m.trainIdx < len(prev_keypoints):
                                current_pt = current_keypoints[m.queryIdx].pt
                                prev_pt = prev_keypoints[m.trainIdx].pt
                                
                                # 计算空间距离
                                spatial_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                                
                                # 空间距离约束：最大允许位移
                                max_spatial_displacement = 50  # 最大50像素位移
                                if spatial_distance <= max_spatial_displacement:
                                    good_matches.append(m)
                                else:
                                    print(f"空间距离过大: {spatial_distance:.1f}像素，跳过匹配")
                    elif len(match_pair) == 1:
                        # 处理k=1的情况，要求更严格
                        m = match_pair[0]
                        if m.distance < 40:  # 更严格的距离阈值
                            # 添加空间距离约束
                            if m.queryIdx < len(current_keypoints) and m.trainIdx < len(prev_keypoints):
                                current_pt = current_keypoints[m.queryIdx].pt
                                prev_pt = prev_keypoints[m.trainIdx].pt
                                
                                spatial_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                                
                                # 单匹配要求更小的空间距离
                                max_spatial_displacement = 30  # 最大30像素位移
                                if spatial_distance <= max_spatial_displacement:
                                    good_matches.append(m)
                                else:
                                    print(f"单匹配空间距离过大: {spatial_distance:.1f}像素，跳过匹配")
                
                # 如果匹配点太少，适度放宽条件但保持空间约束
                if len(good_matches) < 3:
                    print("匹配点太少，适度放宽条件...")
                    good_matches = []
                    for match_pair in matches:
                        if len(match_pair) == 2:
                            m, n = match_pair
                            if m.distance < 0.7 * n.distance:
                                # 放宽空间距离约束
                                if m.queryIdx < len(current_keypoints) and m.trainIdx < len(prev_keypoints):
                                    current_pt = current_keypoints[m.queryIdx].pt
                                    prev_pt = prev_keypoints[m.trainIdx].pt
                                    spatial_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                                    
                                    # 放宽后的空间距离约束
                                    max_spatial_displacement = 80  # 放宽到80像素
                                    if spatial_distance <= max_spatial_displacement:
                                        good_matches.append(m)
                        elif len(match_pair) == 1:
                            m = match_pair[0]
                            if m.distance < 60:  # 放宽距离阈值
                                if m.queryIdx < len(current_keypoints) and m.trainIdx < len(prev_keypoints):
                                    current_pt = current_keypoints[m.queryIdx].pt
                                    prev_pt = prev_keypoints[m.trainIdx].pt
                                    spatial_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                                    
                                    max_spatial_displacement = 50  # 放宽到50像素
                                    if spatial_distance <= max_spatial_displacement:
                                        good_matches.append(m)
            except Exception as e:
                print(f"匹配条件测试错误: {e}")
                good_matches = []
            
            # 保存匹配信息用于光流显示
            self.prev_matches = good_matches
            
            # 计算匹配点之间的距离和速度
            velocities = []
            valid_matches = 0
            
            for match in good_matches:
                if match.queryIdx < len(current_keypoints) and match.trainIdx < len(prev_keypoints):
                    current_pt = current_keypoints[match.queryIdx].pt
                    prev_pt = prev_keypoints[match.trainIdx].pt
                    
                    # 计算欧几里得距离（像素）
                    pixel_distance = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                    
                    # 过滤异常大的位移（可能是错误匹配）
                    max_pixel_displacement = 100  # 增加最大像素位移阈值
                    if pixel_distance > max_pixel_displacement:
                        continue
                    
                    # 过滤过小的位移（可能是噪声）
                    min_pixel_displacement = 1  # 最小像素位移阈值
                    if pixel_distance < min_pixel_displacement:
                        continue
                    
                    # 转换为米，优先使用传入的标定系数
                    if calibration_factor is not None:
                        distance = pixel_distance * calibration_factor
                    else:
                        distance = pixel_distance * self.pixel_size
                    
                    # 计算瞬时速度（米/秒）
                    velocity = distance * self.frame_rate
                    velocities.append(velocity)
                    valid_matches += 1
            
            if valid_matches >= 2:  # 降低最小匹配数要求
                # 使用中位数而不是平均值，减少异常值影响
                velocities = np.array(velocities)
                
                # 移除异常值（使用IQR方法）
                q1 = np.percentile(velocities, 25)
                q3 = np.percentile(velocities, 75)
                iqr = q3 - q1
                lower_bound = q1 - 1.5 * iqr
                upper_bound = q3 + 1.5 * iqr
                
                filtered_velocities = velocities[(velocities >= lower_bound) & (velocities <= upper_bound)]
                
                if len(filtered_velocities) > 0:
                    # 计算平均速度
                    avg_velocity = np.mean(filtered_velocities)
                    
                    # 对速度进行平滑滤波
                    smoothed_avg_velocity = self.smooth_velocity(avg_velocity)
                    
                    # 更新速度历史
                    self.velocity_history.append(smoothed_avg_velocity)
                    if len(self.velocity_history) > self.max_history_size:
                        self.velocity_history.pop(0)
                    
                    # 更新最大速度
                    if smoothed_avg_velocity > self.max_velocity:
                        self.max_velocity = smoothed_avg_velocity
                        self.max_velocity_timestamp = time.time()
                    
                    print(f"速度计算成功: {smoothed_avg_velocity:.6f} m/s, 匹配数: {valid_matches}")
                    return smoothed_avg_velocity, valid_matches
            
            # 如果没有足够的有效匹配，返回0
            print(f"匹配数不足: {valid_matches}")
            return 0.0, valid_matches
            
        except Exception as e:
            print(f"速度计算错误: {e}")
            return 0.0, 0
    
    def init_feature_detector(self, detector_type='SIFT'):
        """
        初始化特征检测器
        
        Args:
            detector_type (str): 检测器类型 ('SIFT', 'ORB', 'SURF', 'FAST', 'BRISK', 'AKAZE', 'KAZE', 'BLOB', 'HARRIS', 'GFTT')
        """
        try:
            if detector_type == 'SIFT':
                self.sift = cv2.SIFT_create()
                self.current_detector = self.sift
            elif detector_type == 'ORB':
                self.orb = cv2.ORB_create()
                self.current_detector = self.orb
            elif detector_type == 'SURF':
                # 尝试使用新版本的SURF实现
                try:
                    # OpenCV 4.5.4+ 中的SURF实现
                    self.surf = cv2.SURF_create(400)
                    self.current_detector = self.surf
                except AttributeError:
                    try:
                        # 尝试使用xfeatures2d模块（旧版本）
                        self.surf = cv2.xfeatures2d.SURF_create(400)
                        self.current_detector = self.surf
                    except AttributeError:
                        # 如果SURF不可用，使用SIFT作为替代
                        self.sift = cv2.SIFT_create()
                        self.current_detector = self.sift
                        return True
            elif detector_type == 'FAST':
                self.fast = cv2.FastFeatureDetector_create()
                self.current_detector = self.fast
            elif detector_type == 'BRISK':
                self.brisk = cv2.BRISK_create()
                self.current_detector = self.brisk
            elif detector_type == 'AKAZE':
                self.akaze = cv2.AKAZE_create()
                self.current_detector = self.akaze
            elif detector_type == 'KAZE':
                self.kaze = cv2.KAZE_create()
                self.current_detector = self.kaze
            elif detector_type == 'BLOB':
                # 使用SimpleBlobDetector
                params = cv2.SimpleBlobDetector_Params()
                params.minThreshold = 10
                params.maxThreshold = 200
                params.filterByArea = True
                params.minArea = 100
                params.filterByCircularity = True
                params.minCircularity = 0.1
                params.filterByConvexity = True
                params.minConvexity = 0.87
                params.filterByInertia = True
                params.minInertiaRatio = 0.01
                self.blob = cv2.SimpleBlobDetector_create(params)
                self.current_detector = self.blob
            elif detector_type == 'HARRIS':
                # Harris角点检测器（自定义实现）
                self.harris = None  # 将在检测时实现
                self.current_detector = 'HARRIS'
            elif detector_type == 'GFTT':
                # Good Features To Track检测器
                self.gftt = None  # 将在检测时实现
                self.current_detector = 'GFTT'
            else:
                return False
            return True
        except Exception as e:
            return False
    
    def detect_features(self, image, detector_type='SIFT', max_features=None):
        """
        检测图像特征点
        
        Args:
            image: 输入图像
            detector_type (str): 检测器类型 ('SIFT', 'ORB', 'SURF', 'FAST', 'BRISK', 'AKAZE', 'KAZE', 'BLOB', 'HARRIS', 'GFTT')
            max_features (int): 最大特征点数量，None表示不限制
            
        Returns:
            tuple: (keypoints, descriptors, detection_time) 特征点、描述符和检测时间
        """
        if image is None:
            return None, None, 0
        
        try:
            # 初始化检测器
            if not self.init_feature_detector(detector_type):
                return None, None, 0
            
            # 转换为灰度图像
            if len(image.shape) == 3:
                gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            else:
                gray = image
            
            # 记录检测开始时间
            start_time = time.time()
            
            # 检测特征点
            if self.current_detector == 'HARRIS':
                # Harris角点检测
                harris_response = cv2.cornerHarris(gray, blockSize=2, ksize=3, k=0.04)
                harris_response = cv2.dilate(harris_response, None)
                
                # 找到角点
                threshold = 0.01 * harris_response.max()
                corner_points = np.where(harris_response > threshold)
                
                # 转换为KeyPoint格式
                keypoints = []
                for y, x in zip(corner_points[0], corner_points[1]):
                    keypoints.append(cv2.KeyPoint(x, y, 1))
                
                descriptors = None
            elif self.current_detector == 'GFTT':
                # Good Features To Track检测
                corners = cv2.goodFeaturesToTrack(gray, maxCorners=1000, qualityLevel=0.01, minDistance=10)
                
                # 转换为KeyPoint格式
                keypoints = []
                if corners is not None:
                    for corner in corners:
                        x, y = corner.ravel()
                        keypoints.append(cv2.KeyPoint(x, y, 1))
                
                descriptors = None
            else:
                # 其他检测器使用标准的detectAndCompute方法
                keypoints, descriptors = self.current_detector.detectAndCompute(gray, None)
            
            # 计算检测时间
            detection_time = (time.time() - start_time) * 1000  # 转换为毫秒
            
            # 限制特征点数量（如果指定了上限）
            if max_features is not None and len(keypoints) > max_features:
                keypoints = keypoints[:max_features]
                if descriptors is not None:
                    descriptors = descriptors[:max_features]
            
            return keypoints, descriptors, detection_time
            
        except Exception as e:
            return None, None, 0
    
    def detect_features_continuous(self, frame, detector_type='SIFT', max_features=None, calibration_factor=None):
        """
        连续帧特征点检测
        
        Args:
            frame: 当前帧
            detector_type (str): 检测器类型
            max_features (int): 最大特征点数量
            calibration_factor (float): 标定系数（米/像素），如果为None则使用内部pixel_size
            
        Returns:
            tuple: (keypoints, descriptors, detection_time, avg_velocity, match_count) 特征点、描述符、检测时间、平均速度（米/秒）和匹配点数
        """
        if frame is None:
            return None, None, 0, 0.0, 0
        
        try:
            # 初始化检测器
            if not self.init_feature_detector(detector_type):
                return None, None, 0, 0.0, 0
            
            # 转换为灰度图像
            if len(frame.shape) == 3:
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            else:
                gray = frame
            
            # 记录检测开始时间
            start_time = time.time()
            
            # 检测特征点
            if self.current_detector == 'HARRIS':
                # Harris角点检测
                harris_response = cv2.cornerHarris(gray, blockSize=2, ksize=3, k=0.04)
                harris_response = cv2.dilate(harris_response, None)
                
                # 找到角点
                threshold = 0.01 * harris_response.max()
                corner_points = np.where(harris_response > threshold)
                
                # 转换为KeyPoint格式
                keypoints = []
                for y, x in zip(corner_points[0], corner_points[1]):
                    keypoints.append(cv2.KeyPoint(x, y, 1))
                
                descriptors = None
            elif self.current_detector == 'GFTT':
                # Good Features To Track检测
                corners = cv2.goodFeaturesToTrack(gray, maxCorners=1000, qualityLevel=0.01, minDistance=10)
                
                # 转换为KeyPoint格式
                keypoints = []
                if corners is not None:
                    for corner in corners:
                        x, y = corner.ravel()
                        keypoints.append(cv2.KeyPoint(x, y, 1))
                
                descriptors = None
            else:
                # 其他检测器使用标准的detectAndCompute方法
                keypoints, descriptors = self.current_detector.detectAndCompute(gray, None)
            
            # 计算检测时间
            detection_time = (time.time() - start_time) * 1000  # 转换为毫秒
            
            # 限制特征点数量
            if max_features is not None and len(keypoints) > max_features:
                keypoints = keypoints[:max_features]
                if descriptors is not None:
                    descriptors = descriptors[:max_features]
            
            # 计算特征点速度
            avg_velocity = 0.0
            match_count = 0
            if self.prev_keypoints is not None and self.prev_frame is not None and self.prev_descriptors is not None:
                avg_velocity, match_count = self.calculate_feature_velocity_with_descriptors(
                    keypoints, descriptors, self.prev_keypoints, self.prev_descriptors, calibration_factor
                )
            
            # 更新前一帧信息
            self.prev_keypoints = keypoints
            self.prev_descriptors = descriptors
            self.prev_frame = frame.copy()
            
            return keypoints, descriptors, detection_time, avg_velocity, match_count
            
        except Exception as e:
            return None, None, 0, 0.0, 0
    
    def draw_features(self, image, keypoints, detection_time=0, color=(0, 255, 0), radius=3):
        """
        在图像上绘制特征点
        
        Args:
            image: 输入图像
            keypoints: 特征点列表
            detection_time: 检测时间（毫秒）
            color: 特征点颜色 (B, G, R)
            radius: 特征点半径
            
        Returns:
            numpy.ndarray: 绘制了特征点的图像
        """
        if image is None or keypoints is None:
            return image
        
        try:
            # 创建图像副本
            result_image = image.copy()
            
            # 绘制特征点
            cv2.drawKeypoints(image, keypoints, result_image, 
                            color=color, 
                            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            
            # 添加特征点数量信息
            text1 = f"Features: {len(keypoints)}"
            cv2.putText(result_image, text1, (10, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            # 添加检测时间信息
            text2 = f"Time: {detection_time:.2f}ms"
            cv2.putText(result_image, text2, (10, 60), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            return result_image
            
        except Exception as e:
            return image
    
    def draw_features_with_velocity(self, image, keypoints, detection_time=0, avg_velocity=0.0, match_count=0, prev_keypoints=None, matches=None, color=(0, 255, 0), radius=3, show_optical_flow=False):
        """
        在图像上绘制特征点和速度信息，包括光流效果
        
        Args:
            image: 输入图像
            keypoints: 特征点列表
            detection_time: 检测时间（毫秒）
            avg_velocity: 平均特征点速度（米/秒）
            match_count: 匹配成功点数
            prev_keypoints: 前一帧特征点列表（用于绘制光流）
            matches: 特征点匹配列表（用于绘制光流）
            color: 特征点颜色 (B, G, R)
            radius: 特征点半径
            
        Returns:
            numpy.ndarray: 绘制了特征点和速度信息的图像
        """
        if image is None or keypoints is None:
            return image
        
        try:
            # 创建图像副本
            result_image = image.copy()
            
            # 绘制特征点
            cv2.drawKeypoints(image, keypoints, result_image, 
                            color=color, 
                            flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
            
            # 添加特征点数量信息
            text1 = f"Features: {len(keypoints)}"
            cv2.putText(result_image, text1, (10, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            # 添加检测时间信息
            text2 = f"Time: {detection_time:.2f}ms"
            cv2.putText(result_image, text2, (10, 60), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            # 添加平均速度信息
            text3 = f"Velocity: {avg_velocity:.6f} m/s"
            cv2.putText(result_image, text3, (10, 90), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            # 添加匹配点数信息
            text4 = f"Matches: {match_count}"
            cv2.putText(result_image, text4, (10, 120), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
            
            # 添加最大速度信息
            max_vel, max_vel_time = self.get_max_velocity()
            if max_vel > 0:
                text5 = f"Max Velocity: {max_vel:.6f} m/s"
                cv2.putText(result_image, text5, (10, 150), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)  # 黄色显示最大速度
                
                # 显示最大速度时间（可选）
                if max_vel_time > 0:
                    elapsed_time = time.time() - max_vel_time
                    if elapsed_time < 60:
                        time_text = f"Max at: {elapsed_time:.1f}s ago"
                    else:
                        time_text = f"Max at: {elapsed_time/60:.1f}min ago"
                    cv2.putText(result_image, time_text, (10, 180), 
                               cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 1)
            
            return result_image
            
        except Exception as e:
            return image
    
    def draw_optical_flow(self, image, prev_keypoints, current_keypoints, matches, color=(0, 255, 255), thickness=2):
        """
        绘制光流效果（连接前后两帧同名点的路径）
        
        Args:
            image: 输入图像
            prev_keypoints: 前一帧特征点列表
            current_keypoints: 当前帧特征点列表
            matches: 特征点匹配列表（包含前后两帧同名点的对应关系）
            color: 连线颜色 (B, G, R)
            thickness: 连线粗细
            
        Returns:
            numpy.ndarray: 绘制了光流的图像
        """
        try:
            if len(matches) == 0:
                return image
            
            # 限制显示的匹配点数量，避免过于混乱
            max_display_matches = 15
            if len(matches) > max_display_matches:
                # 按匹配质量排序，只显示最好的匹配
                matches = sorted(matches, key=lambda x: x.distance)[:max_display_matches]
            
            # 过滤有效的匹配点
            valid_matches = []
            for match in matches:
                if (match.queryIdx < len(current_keypoints) and 
                    match.trainIdx < len(prev_keypoints)):
                    valid_matches.append(match)
            
            if len(valid_matches) == 0:
                return image
            
            # 计算位移统计信息用于过滤异常值
            displacements = []
            for match in valid_matches:
                current_pt = current_keypoints[match.queryIdx].pt
                prev_pt = prev_keypoints[match.trainIdx].pt
                displacement = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                displacements.append(displacement)
            
            if len(displacements) == 0:
                return image
            
            # 过滤异常位移的匹配点
            displacements = np.array(displacements)
            mean_displacement = np.mean(displacements)
            std_displacement = np.std(displacements)
            
            filtered_matches = []
            for i, match in enumerate(valid_matches):
                displacement = displacements[i]
                # 更严格的过滤条件
                if (abs(displacement - mean_displacement) <= 1.0 * std_displacement and  # 更严格的统计过滤
                    displacement <= 60):  # 绝对最大位移限制
                    filtered_matches.append(match)
            
            # 如果过滤后匹配点太少，使用更严格的原始匹配点
            if len(filtered_matches) < 2:
                # 只使用位移较小的匹配点
                small_displacement_matches = []
                for i, match in enumerate(valid_matches):
                    displacement = displacements[i]
                    if displacement <= 40:  # 只保留小位移的匹配
                        small_displacement_matches.append(match)
                
                if len(small_displacement_matches) >= 2:
                    filtered_matches = small_displacement_matches[:min(6, len(small_displacement_matches))]
                else:
                    filtered_matches = []
            
            # 绘制前后两帧同名点的连线
            for match in filtered_matches:
                # 获取前后两帧同名点的坐标
                current_pt = current_keypoints[match.queryIdx].pt
                prev_pt = prev_keypoints[match.trainIdx].pt
                
                # 转换为整数坐标
                current_pt = (int(current_pt[0]), int(current_pt[1]))
                prev_pt = (int(prev_pt[0]), int(prev_pt[1]))
                
                # 计算位移距离
                displacement = np.sqrt((current_pt[0] - prev_pt[0])**2 + (current_pt[1] - prev_pt[1])**2)
                
                # 根据位移大小调整连线样式
                if displacement < 5:
                    # 微小位移：蓝色细线
                    line_color = (255, 0, 0)  # 蓝色
                    line_thickness = 1
                elif displacement < 15:
                    # 小位移：绿色中等线
                    line_color = (0, 255, 0)  # 绿色
                    line_thickness = 2
                elif displacement < 30:
                    # 中等位移：黄色粗线
                    line_color = (0, 255, 255)  # 黄色
                    line_thickness = 2
                else:
                    # 大位移：红色粗线（这种情况应该很少出现）
                    line_color = (0, 0, 255)  # 红色
                    line_thickness = 3
                
                # 绘制前后两帧同名点的连线
                cv2.line(image, prev_pt, current_pt, line_color, line_thickness)
                
                # 在连线中点绘制箭头表示运动方向
                mid_pt = ((prev_pt[0] + current_pt[0]) // 2, (prev_pt[1] + current_pt[1]) // 2)
                
                # 计算运动方向向量
                dx = current_pt[0] - prev_pt[0]
                dy = current_pt[1] - prev_pt[1]
                
                if abs(dx) > 0 or abs(dy) > 0:
                    # 绘制方向箭头
                    arrow_length = min(8, max(3, int(displacement / 3)))
                    if arrow_length > 2:
                        end_pt = (int(mid_pt[0] + dx * arrow_length / displacement), 
                                int(mid_pt[1] + dy * arrow_length / displacement))
                        cv2.arrowedLine(image, mid_pt, end_pt, (255, 255, 255), 1, tipLength=0.4)
                
                # 标记前一帧特征点位置（红色小圆点）
                cv2.circle(image, prev_pt, 2, (0, 0, 255), -1)
                
                # 标记当前帧特征点位置（绿色大圆点）
                cv2.circle(image, current_pt, 3, (0, 255, 0), -1)
            
            # 添加光流信息
            info_text = f"Optical Flow: {len(filtered_matches)} paths"
            cv2.putText(image, info_text, (10, 150), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
            
            return image
            
        except Exception as e:
            print(f"绘制光流效果时出错: {e}")
            return image
    
    def match_features(self, descriptors1, descriptors2, detector_type='SIFT', ratio=0.75):
        """
        特征点匹配
        
        Args:
            descriptors1: 第一幅图像的特征描述符
            descriptors2: 第二幅图像的特征描述符
            detector_type (str): 检测器类型
            ratio (float): 匹配比率阈值
            
        Returns:
            list: 匹配的特征点对
        """
        if descriptors1 is None or descriptors2 is None:
            print("特征描述符为空")
            return []
        
        try:
            # 根据实际使用的检测器类型选择匹配器
            if (detector_type == 'SIFT' or detector_type == 'SURF' or 
                detector_type == 'KAZE'):
                # 使用FLANN匹配器（适用于浮点描述符）
                FLANN_INDEX_KDTREE = 1
                index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
                search_params = dict(checks=50)
                matcher = cv2.FlannBasedMatcher(index_params, search_params)
            elif (detector_type == 'ORB' or detector_type == 'BRISK' or 
                  detector_type == 'AKAZE'):
                # 使用汉明距离匹配器（适用于二进制描述符）
                matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
            else:
                # 其他检测器（FAST, HARRIS, GFTT, BLOB）不产生描述符，无法匹配
                print(f"{detector_type}检测器不产生描述符，无法进行特征匹配")
                return []
            
            # 进行匹配
            matches = matcher.knnMatch(descriptors1, descriptors2, k=2)
            
            # 应用比率测试
            good_matches = []
            for match_pair in matches:
                if len(match_pair) == 2:
                    m, n = match_pair
                    if m.distance < ratio * n.distance:
                        good_matches.append(m)
            
            print(f"找到 {len(good_matches)} 个良好匹配")
            return good_matches
            
        except Exception as e:
            return []
    
    def draw_matches(self, image1, image2, keypoints1, keypoints2, matches):
        """
        绘制特征点匹配结果
        
        Args:
            image1: 第一幅图像
            image2: 第二幅图像
            keypoints1: 第一幅图像的特征点
            keypoints2: 第二幅图像的特征点
            matches: 匹配的特征点对
            
        Returns:
            numpy.ndarray: 匹配结果图像
        """
        if (image1 is None or image2 is None or 
            keypoints1 is None or keypoints2 is None or 
            matches is None):
            return None
        
        try:
            # 绘制匹配结果
            result_image = cv2.drawMatches(image1, keypoints1, image2, keypoints2, 
                                         matches, None, 
                                         flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
            
            # 添加匹配信息
            text = f"Matches: {len(matches)}"
            cv2.putText(result_image, text, (10, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
            
            return result_image
            
        except Exception as e:
            return None
    
    def detect_and_draw_features(self, image, detector_type='SIFT', max_features=None):
        """
        检测并绘制特征点的完整流程
        
        Args:
            image: 输入图像
            detector_type (str): 检测器类型
            max_features (int): 最大特征点数量，None表示不限制
            
        Returns:
            numpy.ndarray: 绘制了特征点的图像
        """
        # 检测特征点
        keypoints, descriptors, detection_time = self.detect_features(image, detector_type, max_features)
        
        if keypoints is None:
            return image
        
        # 绘制特征点
        result_image = self.draw_features(image, keypoints, detection_time)
        
        return result_image
    
    def detect_and_display_features(self, frame, algorithm, camera_manager, result_graphics_view, status_callback=None):
        """
        检测并显示特征点的完整流程
        
        Args:
            frame: 输入图像帧
            algorithm: 特征检测算法名称
            camera_manager: 相机管理器对象
            result_graphics_view: 结果显示的图形视图
            status_callback: 状态更新回调函数
            
        Returns:
            bool: 是否成功
        """
        try:
            # 更新状态栏
            if status_callback:
                status_callback(f"正在使用{algorithm}检测特征点...", 0)
            
            # 检测特征点
            result_image = self.detect_and_draw_features(frame, algorithm, max_features=None)
            
            if result_image is not None:
                # 将结果图像转换为QPixmap并显示
                pixmap = camera_manager.camera.convert_frame_to_qpixmap(result_image)
                if pixmap:
                    # 创建结果显示场景
                    from PyQt5.QtWidgets import QGraphicsScene
                    from PyQt5.QtCore import QRectF, Qt
                    
                    result_scene = QGraphicsScene()
                    result_graphics_view.setScene(result_scene)
                    
                    # 添加结果图像到场景
                    result_scene.clear()
                    result_scene.addPixmap(pixmap)
                    
                    # 调整视图以适应图像
                    result_graphics_view.setSceneRect(QRectF(pixmap.rect()))
                    result_graphics_view.fitInView(result_scene.sceneRect(), Qt.KeepAspectRatio)
                    
                    # 更新状态栏显示检测结果
                    if status_callback:
                        status_callback(f"{algorithm}特征点检测完成", 3000)
                    return True
                else:
                    if status_callback:
                        status_callback("图像转换失败", 3000)
                    return False
            else:
                if status_callback:
                    status_callback(f"{algorithm}特征点检测失败", 3000)
                return False
                
        except Exception as e:
            if status_callback:
                status_callback(f"特征点检测出错: {str(e)}", 5000)
            return False
    
    def detect_and_display_features_continuous(self, frame, algorithm, camera_manager, result_graphics_view, status_callback=None, calibration_factor=None):
        """
        连续检测并显示特征点的完整流程
        
        Args:
            frame: 输入图像帧
            algorithm: 特征检测算法名称
            camera_manager: 相机管理器对象
            result_graphics_view: 结果显示的图形视图
            status_callback: 状态更新回调函数
            calibration_factor: 标定系数（米/像素），如果为None则使用内部pixel_size
            
        Returns:
            bool: 是否成功
        """
        try:
            # 检测特征点和速度
            keypoints, descriptors, detection_time, avg_velocity, match_count = self.detect_features_continuous(
                frame, algorithm, max_features=None, calibration_factor=calibration_factor
            )
            
            if keypoints is not None:
                # 绘制特征点和速度信息
                result_image = self.draw_features_with_velocity(
                    frame, keypoints, detection_time, avg_velocity, match_count,
                    self.prev_keypoints, self.prev_matches
                )
                
                # 将结果图像转换为QPixmap并显示
                pixmap = camera_manager.camera.convert_frame_to_qpixmap(result_image)
                if pixmap:
                    # 创建结果显示场景
                    from PyQt5.QtWidgets import QGraphicsScene
                    from PyQt5.QtCore import QRectF, Qt
                    
                    result_scene = QGraphicsScene()
                    result_graphics_view.setScene(result_scene)
                    
                    # 添加结果图像到场景
                    result_scene.clear()
                    result_scene.addPixmap(pixmap)
                    
                    # 调整视图以适应图像
                    result_graphics_view.setSceneRect(QRectF(pixmap.rect()))
                    result_graphics_view.fitInView(result_scene.sceneRect(), Qt.KeepAspectRatio)
                    
                    # 更新状态栏显示检测结果
                    if status_callback:
                        status_callback(f"连续{algorithm}检测: {len(keypoints)}个特征点, 速度: {avg_velocity:.6f}m/s, 匹配: {match_count}个", 1000)
                    
                    return True
                else:
                    if status_callback:
                        status_callback("图像转换失败", 3000)
                    return False
            else:
                if status_callback:
                    status_callback(f"连续{algorithm}特征点检测失败", 3000)
                return False
                
        except Exception as e:
            if status_callback:
                status_callback(f"连续特征点检测出错: {str(e)}", 5000)
            return False
    
    def reset_continuous_detection(self):
        """
        重置连续检测状态
        """
        self.prev_keypoints = None
        self.prev_descriptors = None
        self.prev_frame = None
        self.feature_velocity = 0.0
        self.velocity_history.clear()
        self.velocity_filter_buffer.clear()
        self.smoothed_velocity = 0.0
        self.prev_matches = None
        # 注意：不重置帧率和像素大小，保持用户设置
        # 注意：不重置最大速度，保持历史记录
        print("连续检测状态已重置")
