"""
Enhanced Simple Optical Flow Sensor
实时光流矩阵获取，增加remap和散度计算功能
"""

import cv2
import numpy as np
import time
import platform
import json
import os
from threading import Thread


def load_remap_params(json_path="remap_params.json"):
    """
    Load remapping parameters from a JSON file.
    
    Args:
        json_path: Path to the JSON file containing remap parameters
        
    Returns:
        Dictionary of remapping parameters or default parameters if file not found
    """
    # Default parameters if file not found
    default_params = {
        'SRC_TOP_LEFT_X': 0.2,
        'SRC_TOP_LEFT_Y': 0.2,
        'SRC_TOP_RIGHT_X': 0.8,
        'SRC_TOP_RIGHT_Y': 0.2,
        'SRC_BOTTOM_RIGHT_X': 0.8,
        'SRC_BOTTOM_RIGHT_Y': 0.8,
        'SRC_BOTTOM_LEFT_X': 0.2,
        'SRC_BOTTOM_LEFT_Y': 0.8,
        'DST_RIGHT_MARGIN': 0,
        'VERTICAL_STRETCH': 1.0,
        'HORIZONTAL_COMPRESSION': 1.0
    }
    
    try:
        # Check if file exists
        if not os.path.exists(json_path):
            print(f"Remap parameters file {json_path} not found. Using default parameters.")
            return default_params
        
        # Load parameters from JSON file
        with open(json_path, 'r') as f:
            params = json.load(f)
        
        print(f"Remap parameters loaded from {json_path}")
        
        # Make sure all required parameters are present
        for key in default_params:
            if key not in params:
                print(f"Missing parameter '{key}' in JSON file. Using default value.")
                params[key] = default_params[key]
                
        return params
        
    except Exception as e:
        print(f"Error loading remap parameters: {e}")
        return default_params


def calculate_remap_matrix(height, width, params):
    """Calculate source and destination points for perspective transform."""
    # Calculate the source points using the defined parameters
    src_points = np.array([
        [int(width * params['SRC_TOP_LEFT_X']), int(height * params['SRC_TOP_LEFT_Y'])],
        [int(width * params['SRC_TOP_RIGHT_X']), int(height * params['SRC_TOP_RIGHT_Y'])],
        [int(width * params['SRC_BOTTOM_RIGHT_X']), int(height * params['SRC_BOTTOM_RIGHT_Y'])],
        [int(width * params['SRC_BOTTOM_LEFT_X']), int(height * params['SRC_BOTTOM_LEFT_Y'])]
    ], dtype=np.float32)

    # Calculate the destination points using the defined parameters
    dst_points = np.array([
        [0, 0],
        [width - params['DST_RIGHT_MARGIN'], 0],
        [width - params['DST_RIGHT_MARGIN'], height],
        [0, height]
    ], dtype=np.float32)
    
    # Calculate the transformation matrix
    M = cv2.getPerspectiveTransform(src_points, dst_points)
    
    return M, src_points, dst_points


def calculate_divergence(flow):
    """
    计算光流场的散度。
    
    散度 = sqrt((∂u/∂x)² + (∂v/∂y)²)，其中 u 是 x 方向的光流，v 是 y 方向的光流。
    
    Args:
        flow: 光流场，形状为 (height, width, 2)
        
    Returns:
        散度场，形状为 (height, width)
    """
    if flow is None or flow.size == 0:
        return None
    
    # 提取 u 和 v 分量
    u = flow[..., 0]
    v = flow[..., 1]
    
    # 计算x方向导数 ∂u/∂x (使用中心差分)
    u_x = np.zeros_like(u)
    u_x[:, 1:-1] = (u[:, 2:] - u[:, :-2]) / 2.0
    
    # 边界处理
    u_x[:, 0] = u[:, 1] - u[:, 0]
    u_x[:, -1] = u[:, -1] - u[:, -2]
    
    # 计算y方向导数 ∂v/∂y (使用中心差分)
    v_y = np.zeros_like(v)
    v_y[1:-1, :] = (v[2:, :] - v[:-2, :]) / 2.0
    
    # 边界处理
    v_y[0, :] = v[1, :] - v[0, :]
    v_y[-1, :] = v[-1, :] - v[-2, :]
    
    # 计算散度
    divergence = np.sqrt(u_x**2 + v_y**2)
    return divergence


def create_divergence_visualization(divergence, mask=None, scale=5.0, 
                                   smooth_kernel=3, adaptive_threshold=True):
    """
    创建散度场的可视化图像。
    
    Args:
        divergence: 散度场，形状为 (height, width)
        mask: 可选的掩码，指定计算区域
        scale: 缩放因子，用于增强视觉效果
        smooth_kernel: 平滑核大小
        adaptive_threshold: 是否使用自适应阈值
        
    Returns:
        可视化的彩色图像
    """
    if divergence is None:
        return None
    
    # 应用掩码(如果提供)
    if mask is not None:
        divergence = divergence * (mask > 0)
    
    # 轻微平滑以减少噪声
    if smooth_kernel > 1:
        divergence = cv2.GaussianBlur(divergence, (smooth_kernel, smooth_kernel), 0.5)
    
    # 缩放散度值以增强视觉效果
    scaled_divergence = divergence * scale
    
    # 自适应或固定阈值
    if adaptive_threshold:
        # 使用标准差来设定阈值
        std_val = np.std(scaled_divergence)
        positive_thresh = max(0.1, std_val * 0.5)
        negative_thresh = -max(0.1, std_val * 0.5)
    else:
        positive_thresh = 0.5
        negative_thresh = -0.5

    
    # 将散度值映射到-1到1的范围
    max_val = max(0.1, np.max(np.abs(scaled_divergence)))
    normalized_divergence = np.clip(scaled_divergence / max_val, -1, 1)
    
    # 创建热图
    height, width = divergence.shape
    divergence_vis = np.zeros((height, width, 3), dtype=np.uint8)
    
    # 正散度(发散)映射为红色 - 使用渐变
    positive_mask = normalized_divergence > positive_thresh / max_val
    positive_intensity = np.clip((normalized_divergence[positive_mask] - positive_thresh / max_val) * 
                                max_val / (1 - positive_thresh / max_val), 0, 1)
    divergence_vis[positive_mask, 0] = 0  # B
    divergence_vis[positive_mask, 1] = 0  # G
    divergence_vis[positive_mask, 2] = (positive_intensity * 255).astype(np.uint8)  # R
    
    # 负散度(汇聚)映射为蓝色 - 使用渐变
    negative_mask = normalized_divergence < negative_thresh / max_val
    negative_intensity = np.clip((-normalized_divergence[negative_mask] - (-negative_thresh / max_val)) * 
                                max_val / (1 - (-negative_thresh / max_val)), 0, 1)
    divergence_vis[negative_mask, 0] = (negative_intensity * 255).astype(np.uint8)  # B
    divergence_vis[negative_mask, 1] = 0  # G
    divergence_vis[negative_mask, 2] = 0  # R
    
    # 零散度映射为绿色
    zero_mask = (normalized_divergence >= negative_thresh / max_val) & \
                (normalized_divergence <= positive_thresh / max_val)
    divergence_vis[zero_mask, 0] = 0  # B
    divergence_vis[zero_mask, 1] = 50  # G
    divergence_vis[zero_mask, 2] = 0  # R
    
    return divergence_vis


class SimpleOpticalFlow:
    """
    增强的实时光流传感器，增加了remap和散度计算功能
    """
    
    def __init__(self, video_path=0, width=640, height=480, fps=60, 
                 finest_scale=0, patch_size=4, patch_stride=2,
                 remap_params_file="remap_params.json", enable_remap=True,
                 show_divergence=True, divergence_scale=5.0):
        """
        初始化光流传感器
        
        Args:
            video_path: 摄像头索引或视频文件路径 (默认: 0)
            width: 摄像头宽度 (默认: 640)
            height: 摄像头高度 (默认: 480)
            fps: 帧率 (默认: 60)
            finest_scale: DIS最细尺度层级，0=最稠密 (默认: 0)
            patch_size: DIS补丁大小，越小越稠密 (默认: 4)
            patch_stride: DIS补丁步长，越小越稠密 (默认: 2)
            remap_params_file: remap参数文件路径 (默认: "remap_params.json")
            enable_remap: 是否启用remap功能 (默认: True)
            show_divergence: 是否显示散度可视化 (默认: True)
            divergence_scale: 散度可视化缩放因子 (默认: 5.0)
        """
        self.video_path = video_path
        self.width = width
        self.height = height
        self.fps = fps
        self.enable_remap = enable_remap
        self.show_divergence = show_divergence
        self.divergence_scale = divergence_scale
        
        # DIS光流参数
        self.finest_scale = finest_scale
        self.patch_size = patch_size
        self.patch_stride = patch_stride
        
        # 光流数据
        self.flow = None
        self.divergence = None
        self.running = True
        
        # 加载remap参数
        if self.enable_remap:
            self.load_remap_parameters(remap_params_file)
        
        # 初始化摄像头
        self._init_camera()
        
        # 启动处理线程
        self.start()
    
    def load_remap_parameters(self, remap_params_file):
        """加载remap参数并初始化变换矩阵"""
        self.remap_params = load_remap_params(remap_params_file)
        
        # 提取拉伸和压缩参数
        self.vertical_stretch = self.remap_params.get('VERTICAL_STRETCH', 1.0)
        self.horizontal_compression = self.remap_params.get('HORIZONTAL_COMPRESSION', 1.0)
        
        # 计算透视变换矩阵
        self.remap_matrix, self.src_points, self.dst_points = calculate_remap_matrix(
            self.height, self.width, self.remap_params
        )
        
        print(f"Remap参数已加载 (垂直拉伸: {self.vertical_stretch:.1f}x, 水平压缩: {self.horizontal_compression:.1f}x)")
    
    def remap_image(self, image):
        """应用透视变换和拉伸/压缩"""
        if not self.enable_remap:
            return image
            
        # 首先应用透视变换
        remapped = cv2.warpPerspective(image, self.remap_matrix, (self.width, self.height))
        
        # 应用垂直拉伸
        if self.vertical_stretch != 1.0:
            remapped = self.stretch_image_vertically(remapped, self.vertical_stretch)
        
        # 应用水平压缩
        if self.horizontal_compression != 1.0:
            remapped = self.compress_image_horizontally(remapped, self.horizontal_compression)
            
        return remapped
    
    def stretch_image_vertically(self, image, factor):
        """垂直拉伸或压缩图像"""
        h, w = image.shape[:2]
        new_h = int(h * factor)
        stretched = cv2.resize(image, (w, new_h))
        
        if new_h > h:
            start_y = (new_h - h) // 2
            stretched = stretched[start_y:start_y+h, :]
        elif new_h < h:
            pad_top = (h - new_h) // 2
            pad_bottom = h - new_h - pad_top
            stretched = cv2.copyMakeBorder(stretched, pad_top, pad_bottom, 0, 0, 
                                          cv2.BORDER_CONSTANT, value=(0, 0, 0))
        
        return stretched
    
    def compress_image_horizontally(self, image, factor):
        """水平压缩图像并用黑色填充"""
        h, w = image.shape[:2]
        new_w = int(w * factor)
        compressed = cv2.resize(image, (new_w, h))
        
        result = np.zeros((h, w, 3), dtype=np.uint8)
        pad_left = (w - new_w) // 2
        result[:, pad_left:pad_left+new_w] = compressed
        
        return result
    
    def visualize_remap_points(self, image):
        """在图像上可视化remap点"""
        if not self.enable_remap:
            return image
            
        img_with_points = image.copy()
        
        # 在每个点画圆
        point_color = (0, 0, 255)  # 红色
        point_size = 5
        line_color = (0, 255, 0)   # 绿色
        line_thickness = 2

        for i, point in enumerate(self.src_points):
            x, y = int(point[0]), int(point[1])
            cv2.circle(img_with_points, (x, y), point_size, point_color, -1)
            cv2.putText(img_with_points, str(i), (x+10, y+10), 
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)

        # 画连接线形成四边形
        for i in range(4):
            cv2.line(img_with_points, 
                    (int(self.src_points[i][0]), int(self.src_points[i][1])),
                    (int(self.src_points[(i+1)%4][0]), int(self.src_points[(i+1)%4][1])),
                    line_color, line_thickness)
        
        # 添加参数信息
        y_pos = self.height - 100
        cv2.putText(img_with_points, f"Vertical Stretch: {self.vertical_stretch:.1f}x",
                   (10, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
        y_pos += 25
        cv2.putText(img_with_points, f"Horizontal Compression: {self.horizontal_compression:.1f}x",
                   (10, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
                    
        return img_with_points
    
    def _init_camera(self):
        """初始化摄像头"""
        system = platform.system()
        if system == "Windows":
            self.cap = cv2.VideoCapture(self.video_path, cv2.CAP_DSHOW)
        elif system == "Linux":
            self.cap = cv2.VideoCapture(self.video_path, cv2.CAP_V4L2)
        else:
            self.cap = cv2.VideoCapture(self.video_path)

        # 设置摄像头参数
        self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
        self.cap.set(cv2.CAP_PROP_FPS, self.fps)
        
        # 获取实际摄像头尺寸
        self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    def start(self):
        """启动光流处理"""
        print("初始化光流传感器...")
        time.sleep(1)  # 等待摄像头稳定
        
        # 捕获初始帧
        ret, frame = self.cap.read()
        if not ret:
            print("无法捕获初始帧")
            return
        
        # 应用remap
        if self.enable_remap:
            frame = self.remap_image(frame)
            
        self.prev_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        print("光流传感器初始化完成!")
        
        # 启动处理线程
        self.thread = Thread(target=self._process_frames)
        self.thread.daemon = True
        self.thread.start()
    
    def _process_frames(self):
        """处理帧的主循环"""
        
        # 创建DIS光流计算器并设置参数
        dis_flow = cv2.DISOpticalFlow_create()
        
        # 设置DIS算法参数以获得更稠密的光流
        dis_flow.setFinestScale(self.finest_scale)
        dis_flow.setPatchSize(self.patch_size)
        dis_flow.setPatchStride(self.patch_stride)
        dis_flow.setGradientDescentIterations(25)
        dis_flow.setVariationalRefinementIterations(5)
        dis_flow.setVariationalRefinementAlpha(20.0)
        dis_flow.setVariationalRefinementDelta(5.0)
        dis_flow.setVariationalRefinementGamma(10.0)
        
        while self.running and self.cap.isOpened():
            ret, frame = self.cap.read()
            if not ret:
                break
            
            # 应用remap变换
            if self.enable_remap:
                frame = self.remap_image(frame)
            
            # 转换为灰度图
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            
            # 计算光流
            flow = dis_flow.calc(self.prev_gray, gray, None)
            
            # 更新光流数据
            self.flow = flow
            
            # 计算散度
            if self.show_divergence:
                self.divergence = calculate_divergence(flow)
            
            # # 更新前一帧
            # self.prev_gray = gray
    
    def get_flow(self):
        """
        获取当前光流矩阵
        
        Returns:
            numpy.ndarray: 光流矩阵，形状为 (height, width, 2)
        """
        return self.flow
    
    def get_divergence(self):
        """
        获取当前散度矩阵
        
        Returns:
            numpy.ndarray: 散度矩阵，形状为 (height, width)
        """
        return self.divergence
    
    def get_flow_magnitude(self):
        """获取光流幅值矩阵"""
        if self.flow is None:
            return None
        
        return np.sqrt(self.flow[:,:,0]**2 + self.flow[:,:,1]**2)
    
    def get_flow_direction(self):
        """获取光流方向矩阵(角度)"""
        if self.flow is None:
            return None
        
        return np.arctan2(self.flow[:,:,1], self.flow[:,:,0])
    
    def get_divergence_statistics(self):
        """
        获取散度统计信息
        
        Returns:
            dict: 包含散度统计信息的字典
        """
        if self.divergence is None:
            return {
                'mean_divergence': 0.0,
                'max_divergence': 0.0,
                'min_divergence': 0.0,
                'std_divergence': 0.0
            }
        
        return {
            'mean_divergence': np.mean(self.divergence),
            'max_divergence': np.max(self.divergence),
            'min_divergence': np.min(self.divergence),
            'std_divergence': np.std(self.divergence)
        }
    
    def show_divergence_visualization(self, window_name="Divergence Field"):
        """显示散度可视化"""
        if self.divergence is None:
            print("警告: 没有可用的散度数据")
            return
        
        # 创建散度可视化
        divergence_vis = create_divergence_visualization(
            self.divergence, 
            scale=self.divergence_scale
        )
        
        if divergence_vis is not None:
            # 添加标题
            cv2.putText(divergence_vis, "Divergence Field", 
                       (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
            
            # 添加统计信息
            stats = self.get_divergence_statistics()
            y_pos = divergence_vis.shape[0] - 60
            cv2.putText(divergence_vis, f"Mean: {stats['mean_divergence']:.4f}", 
                       (10, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            y_pos += 20
            cv2.putText(divergence_vis, f"Max: {stats['max_divergence']:.4f}", 
                       (10, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            y_pos += 20
            cv2.putText(divergence_vis, f"Std: {stats['std_divergence']:.4f}", 
                       (10, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
            
            cv2.imshow(window_name, divergence_vis)
            cv2.waitKey(1)
            
            return divergence_vis
    
    def get_flow_rgb_image(self, scale_factor=30, divergence_scale_factor=200):
        """
        获取光流和散度的RGB图像表示
        
        Args:
            scale_factor: 光流值的缩放系数 (默认: 50)
            divergence_scale_factor: 散度值的缩放系数 (默认: 100)
            
        Returns:
            numpy.ndarray: RGB图像，形状为 (height, width, 3)
                          R通道=散度*divergence_scale_factor
                          G通道=flow_x*scale_factor
                          B通道=flow_y*scale_factor
        """
        if self.flow is None:
            return None
        
        height, width = self.flow.shape[:2]
        
        # 获取光流的x和y分量
        flow_x = self.flow[:, :, 0] * scale_factor
        flow_y = self.flow[:, :, 1] * scale_factor
        
        # 获取散度分量
        if self.divergence is not None:
            divergence_scaled = self.divergence * divergence_scale_factor
        else:
            divergence_scaled = np.zeros((height, width))
        
        # 将光流值裁剪到0-255范围，并处理负值
        # 使用偏移量将负值映射到0-127，正值映射到128-255
        flow_x_mapped = np.clip(flow_x + 128, 0, 255).astype(np.uint8)
        flow_y_mapped = np.clip(flow_y + 128, 0, 255).astype(np.uint8)
        
        # 散度通常为正值，直接映射到0-255
        divergence_mapped = np.clip(divergence_scaled, 0, 255).astype(np.uint8)
        
        # 创建RGB图像
        rgb_image = np.zeros((height, width, 3), dtype=np.uint8)
        rgb_image[:, :, 0] = divergence_mapped    # R通道 = 散度 * divergence_scale_factor
        rgb_image[:, :, 1] = flow_x_mapped        # G通道 = flow_x * scale_factor
        rgb_image[:, :, 2] = flow_y_mapped        # B通道 = flow_y * scale_factor
        
        return rgb_image
    
    def show_flow_rgb_image(self, scale_factor=50, divergence_scale_factor=100, window_name="Flow RGB Image"):
        """
        显示光流和散度的RGB图像
        
        Args:
            scale_factor: 光流值的缩放系数 (默认: 50)
            divergence_scale_factor: 散度值的缩放系数 (默认: 100)
            window_name: 显示窗口名称
        """
        rgb_image = self.get_flow_rgb_image(scale_factor, divergence_scale_factor)
        
        if rgb_image is None:
            print("警告: 没有可用的光流数据")
            return
        
        # 更新信息文本以包含散度
        info_text = f"R=Divergence*{divergence_scale_factor}, G=Flow_X*{scale_factor}, B=Flow_Y*{scale_factor}"
        cv2.putText(rgb_image, info_text, (10, 25), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
        
        # 添加散度统计信息
        if self.divergence is not None:
            stats = self.get_divergence_statistics()
            info_text2 = f"Div: Mean={stats['mean_divergence']:.4f}, Max={stats['max_divergence']:.4f}"
            cv2.putText(rgb_image, info_text2, (10, 50), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
        
        cv2.imshow(window_name, rgb_image)
        cv2.waitKey(1)
        
        return rgb_image
    
    def calculate_mean_flow(self):
        """计算平均光流向量"""
        if self.flow is None:
            return np.array([0.0, 0.0])
        
        return np.mean(self.flow, axis=(0, 1))
    
    def calculate_flow_statistics(self):
        """计算光流统计信息"""
        if self.flow is None:
            return {
                'mean_flow': np.array([0.0, 0.0]),
                'mean_magnitude': 0.0,
                'max_magnitude': 0.0,
                'flow_variance': 0.0
            }
        
        magnitude = self.get_flow_magnitude()
        mean_flow = self.calculate_mean_flow()
        
        return {
            'mean_flow': mean_flow,
            'mean_magnitude': np.mean(magnitude),
            'max_magnitude': np.max(magnitude),
            'flow_variance': np.var(magnitude)
        }
    
    def show_flow(self, flow_matrix=None, step=20, scale=10, window_name="Optical Flow"):
        """显示光流矢量场 - 黑底白箭头"""
        flow = flow_matrix if flow_matrix is not None else self.flow
        
        if flow is None:
            print("警告: 没有可用的光流数据")
            return
        
        height, width = flow.shape[:2]
        flow_image = np.zeros((height, width, 3), dtype=np.uint8)
        
        y, x = np.mgrid[step//2:height:step, step//2:width:step].reshape(2, -1).astype(int)
        
        valid_mask = (y < height) & (x < width)
        y = y[valid_mask]
        x = x[valid_mask]
        
        if len(x) == 0:
            print("警告: 没有有效的采样点")
            return
        
        fx = flow[y, x, 0]
        fy = flow[y, x, 1]
        
        for i in range(len(x)):
            start_point = (x[i], y[i])
            end_point = (
                int(x[i] + fx[i] * scale),
                int(y[i] + fy[i] * scale)
            )
            
            end_point = (
                max(0, min(width-1, end_point[0])),
                max(0, min(height-1, end_point[1]))
            )
            
            magnitude = np.sqrt(fx[i]**2 + fy[i]**2)
            
            if magnitude > 0.1:
                cv2.arrowedLine(flow_image, start_point, end_point, 
                               (255, 255, 255), 1, tipLength=0.3)
                cv2.circle(flow_image, start_point, 1, (255, 255, 255), -1)
        
        info_text = f"Step: {step}, Scale: {scale}, Vectors: {len(x)}"
        cv2.putText(flow_image, info_text, (10, 25), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
        
        cv2.imshow(window_name, flow_image)
        cv2.waitKey(1)
        
        return flow_image
    
    def show_combined_visualization(self, show_original=True, show_remap_points=True, 
                                   show_flow=True, show_divergence=True):
        """显示组合可视化"""
        if not self.cap.isOpened():
            return
            
        ret, frame = self.cap.read()
        if not ret:
            return
            
        if show_original and show_remap_points and self.enable_remap:
            # 显示原图with remap点
            original_with_points = self.visualize_remap_points(frame.copy())
            cv2.imshow('Original with Remap Points', original_with_points)
        
        if self.enable_remap:
            # 显示remap后的图像
            remapped_frame = self.remap_image(frame)
            cv2.imshow('Remapped Image', remapped_frame)
        
        if show_flow:
            # 显示光流
            self.show_flow()
        
        if show_divergence and self.show_divergence:
            # 显示散度
            self.show_divergence_visualization()
        
        # 显示光流RGB图像
        self.show_flow_rgb_image()
    
    def stop(self):
        """停止光流传感器"""
        self.running = False
        if hasattr(self, 'thread') and self.thread.is_alive():
            self.thread.join(timeout=1.0)
        if hasattr(self, 'cap') and self.cap.isOpened():
            self.cap.release()
        cv2.destroyAllWindows()
        print("光流传感器已停止")
    
    def __del__(self):
        """清理资源"""
        self.stop()


# 示例使用代码
if __name__ == "__main__":
    # 创建remap参数文件示例
    example_remap_params = {
        "SRC_TOP_LEFT_X": 0.2,
        "SRC_TOP_LEFT_Y": 0.2,
        "SRC_TOP_RIGHT_X": 0.8,
        "SRC_TOP_RIGHT_Y": 0.2,
        "SRC_BOTTOM_RIGHT_X": 0.8,
        "SRC_BOTTOM_RIGHT_Y": 0.8,
        "SRC_BOTTOM_LEFT_X": 0.2,
        "SRC_BOTTOM_LEFT_Y": 0.8,
        "DST_RIGHT_MARGIN": 0,
        "VERTICAL_STRETCH": 1.2,
        "HORIZONTAL_COMPRESSION": 0.8
    }
    
    # 如果remap_params.json不存在，创建一个示例文件
    if not os.path.exists("remap_params.json"):
        with open("remap_params.json", "w") as f:
            json.dump(example_remap_params, f, indent=4)
        print("已创建示例 remap_params.json 文件")
    
    # 创建增强的光流传感器
    sensor = SimpleOpticalFlow(
        video_path=0,           # 使用默认摄像头
        width=640,
        height=480,
        fps=60,
        finest_scale=2,         # 最稠密的光流
        patch_size=4,           # 较小的patch获得更稠密的光流
        patch_stride=2,         # 较小的stride获得更稠密的光流
        remap_params_file="remap_params.json",
        enable_remap=True,      # 启用remap功能
        show_divergence=True,   # 启用散度计算和显示
        divergence_scale=5.0    # 散度可视化缩放因子
    )
    
    print("光流传感器已启动。按 'q' 键退出。")
    print("按 'r' 键显示/隐藏组合可视化")
    print("按 's' 键保存当前光流数据")
    
    show_visualization = True
    frame_count = 0
    
    try:
        while True:
            # 获取光流数据
            flow = sensor.get_flow()
            divergence = sensor.get_divergence()
            
            if flow is not None:
                # 计算统计信息
                flow_stats = sensor.calculate_flow_statistics()
                divergence_stats = sensor.get_divergence_statistics()
                
                frame_count += 1
                if frame_count % 30 == 0:  # 每30帧打印一次统计信息
                    print(f"Frame {frame_count}:")
                    print(f"  平均光流: [{flow_stats['mean_flow'][0]:.4f}, {flow_stats['mean_flow'][1]:.4f}]")
                    print(f"  平均幅值: {flow_stats['mean_magnitude']:.4f}")
                    print(f"  最大幅值: {flow_stats['max_magnitude']:.4f}")
                    print(f"  平均散度: {divergence_stats['mean_divergence']:.4f}")
                    print(f"  最大散度: {divergence_stats['max_divergence']:.4f}")
                    print("-" * 50)
            
            # 显示可视化
            if show_visualization:
                sensor.show_combined_visualization()
            
            # 检查键盘输入
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            elif key == ord('r'):
                show_visualization = not show_visualization
                if not show_visualization:
                    cv2.destroyAllWindows()
                print(f"可视化: {'开启' if show_visualization else '关闭'}")
            elif key == ord('s'):
                # 保存当前数据
                if flow is not None:
                    timestamp = int(time.time())
                    np.save(f"flow_data_{timestamp}.npy", flow)
                    if divergence is not None:
                        np.save(f"divergence_data_{timestamp}.npy", divergence)
                    print(f"数据已保存: flow_data_{timestamp}.npy")
                    if divergence is not None:
                        print(f"散度数据已保存: divergence_data_{timestamp}.npy")
            
            time.sleep(0)  # 小延迟以避免过度占用CPU
            
    except KeyboardInterrupt:
        print("\n接收到中断信号，正在退出...")
    
    finally:
        # 清理资源
        sensor.stop()
        cv2.destroyAllWindows()
        print("程序已安全退出")