import cv2
import numpy as np
from ultralytics import YOLO
from sklearn.linear_model import LinearRegression
from lane_detection.CameraCombine import CameraCombine
from lane_detection.time_counter import TimeCounter
from lane_detection.read_param import LaneDetectionConfig
from typing import List, Tuple, Dict, Optional

class LaneDetector(CameraCombine):
    """车道线检测器类。

    该类继承自 CameraCombine，实现了车道线检测的核心功能。

    Attributes:
        vis_config: 可视化配置参数。
        line_config: 车道线配置参数。
        det_config: 检测配置参数。
        is_deploy: 是否部署模式标志。
        model: YOLO 检测模型实例。
        camera_lidar_x: 相机到激光雷达的X轴距离(mm)。
        camera_lidar_y: 相机到激光雷达的Y轴距离(mm)。
        bev_scale: BEV图像缩放比例。
        show_mode: 显示模式('left','right','bev','none')。
        mark_mode: 标记模式('line','global','all','none')。
        output_frame: 输出帧路径或显示标志。
        bev_pixels_per_mm: BEV每毫米像素数。
        timer: 计时器实例。
        cap_L: 左侧摄像头视频捕获对象。
        cap_R: 右侧摄像头视频捕获对象。
        bev_camera_center: BEV相机中心位置。
        mid_x: BEV图像中点X坐标。
    """

    def __init__(self, lanedetector: LaneDetectionConfig):
        """初始化车道线检测器。
    
        Args:
            lanedetector (LaneDetectionConfig): 车道线检测配置参数。
        """
        lanedetector.left_transform_config.cal_inter_param = lanedetector.cal_param
        lanedetector.right_transform_config.cal_inter_param = lanedetector.cal_param
        super().__init__(left_param=lanedetector.left_transform_config, 
                         right_param=lanedetector.right_transform_config,
                         cal_outer_param=lanedetector.cal_param,
                         ang_bias=lanedetector.ang_bias)
        # 配置参数初始化
        self.vis_config = lanedetector.vis_config                                # 可视化配置
        self.line_config = lanedetector.line_config                              # 车道线配置
        self.det_config = lanedetector.detection                                 # 检测配置
        self.is_deploy = lanedetector.is_deploy                                  # 部署模式
        self.model = YOLO(lanedetector.model_path)                              # 加载模型
        self.camera_lidar_x, self.camera_lidar_y = lanedetector.camera_lidar_pos # 两摄像头中心到激光雷达的距离（毫米）
    
        # BEV图像相关配置
        self.bev_scale = self.vis_config.bev_scale                               # BEV图像缩放比例
        self.show_mode = self.vis_config.show_mode                               # 显示模式 ('left', 'right', 'bev', 'none')
        self.mark_mode = self.vis_config.mark_mode                               # 标记模式 ('line', 'global', 'all', 'none')
        self.output_frame = self.vis_config.output_frame                         # 输出帧路径或显示标志 
        self.bev_pixels_per_mm = lanedetector.left_transform_config.bev_pixels_per_mm  # BEV每毫米像素数
    
        # 计算中线
        self.bev_camera_center = [
            (self.left_camera.bev_pos[0] + self.right_camera.bev_pos[0]) * 0.5, 
            (self.left_camera.bev_pos[1] + self.right_camera.bev_pos[1]) * 0.5
        ]
    
        # 初始化计时器
        self.timer = TimeCounter(lanedetector.is_log)
    
        # 摄像头初始化
        if not self.is_deploy: 
            self.cap_L = cv2.VideoCapture(lanedetector.camera.left_video)        # 左侧摄像头
            self.cap_R = cv2.VideoCapture(lanedetector.camera.right_video)       # 右侧摄像头          
            if not self.cap_L.isOpened() or not self.cap_R.isOpened():
                raise ValueError("无法打开摄像头，请检查摄像头连接和索引。")

    def read_frames(self) -> Optional[List[np.ndarray]]:
        """读取一帧图像并进行去畸变。
    
        Returns:
            List[np.ndarray] or None: 去畸变后的左右相机图像列表。如果读取失败，返回None。
        """
        # 读取左右相机图像
        ret_L, frame_L = self.cap_L.read()
        ret_R, frame_R = self.cap_R.read()
    
        # 如果读取失败，返回None
        if not ret_L or not ret_R:
            return None
    
        # 去畸变
        self.timer.start()
        frame_L = self.left_camera.high_precision_undistort(frame_L)
        frame_R = self.right_camera.high_precision_undistort(frame_R)
        self.timer.get_time_from_start_and_reset("Undistort")
        
        return [frame_L, frame_R]

    def fit_line_to_mask_fast(self, mask: np.ndarray, bbox: List[int],
                             sample_step: int = 10, max_slope: float = 0.2
                             ) -> Optional[List[Tuple[int, int]]]:
        """快速将分割掩码拟合为直线（加速版），添加斜率滤波。

        Args:
            mask: 分割掩码 (H,W)。
            bbox: 对应的边界框 [x1,y1,x2,y2]。
            sample_step: 采样步长（越大越快）。
            max_slope: 最大允许斜率（绝对值），小于此值视为水平线。

        Returns: 
            直线端点 [(x1,y1), (x2,y2)] 或 None（如果斜率太小）。
        """
        # 获取掩码中的点坐标（局部坐标）
        y, x = np.where(mask > 0)
        
        # 检查点数
        if len(x) < 2:
            return None
        
        # 转换为全局坐标
        x_global = x + bbox[0]
        y_global = y + bbox[1]
        
        # 点采样（加速关键）
        if sample_step > 1 and len(x) > sample_step * 2:
            indices = np.arange(0, len(x), sample_step)
            x_global = x_global[indices]
            y_global = y_global[indices]
        
        # 使用OpenCV的fitLine函数（比PCA快10倍）
        points = np.column_stack((x_global, y_global)).astype(np.float32)
        
        # 使用最小二乘法拟合直线（速度最快）
        [vx, vy, x0, y0] = cv2.fitLine(points, cv2.DIST_L2, 0, 0.01, 0.01)
        
        # 计算直线方向向量
        direction = np.array([vx[0], vy[0]])
        
        # 计算投影长度
        points_centered = points - [x0[0], y0[0]]
        projections = np.dot(points_centered, direction)
        
        # 计算端点
        t_min = np.min(projections)
        t_max = np.max(projections)
        
        start = np.array([x0[0], y0[0]]) + t_min * direction
        end = np.array([x0[0], y0[0]]) + t_max * direction
        
        # 计算斜率（过滤水平线）
        dx = end[0] - start[0]
        dy = end[1] - start[1]
        
        # 避免除以零（垂直线）
        if abs(dx) > 1e-5:
            slope = abs(dy / dx)
        else:
            slope = float('inf')  # 垂直线，斜率无限大
            
        # 如果斜率太小（接近水平线），返回None
        if slope < max_slope:
            return None
        
        return [tuple(start.astype(int)), tuple(end.astype(int))]
    
    def visualize_detections(self, frame: np.ndarray, results: List
                            ) -> np.ndarray:
        """在图像上可视化检测结果和拟合的直线。
    
        Args:
            frame (np.ndarray): 原始图像。
            results (List): YOLO检测结果列表。
    
        Returns:
            np.ndarray: 可视化后的图像。
        """
        for result in results:
            # 绘制分割掩码并拟合直线
            if result.masks is not None:
                for i, mask in enumerate(result.masks):
                    # 获取对应的边界框
                    box = result.boxes[i]
                    x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy())
                    
                    # 将掩码数据从GPU移动到CPU并转换为NumPy数组
                    mask_data = mask.data[0].cpu().numpy().astype(np.uint8)
                    
                    # 裁剪掩码到边界框区域
                    mask_cropped = mask_data[y1:y2, x1:x2]
                    
                    # 拟合直线
                    line = self.fit_line_to_mask_fast(mask_cropped, [x1, y1, x2, y2])
                    if line is not None:
                        # 绘制拟合的直线
                        cv2.line(frame, line[0], line[1], (0, 255, 0), 2)
                        
                        # 绘制端点
                        cv2.circle(frame, line[0], 5, (0, 0, 255), -1)
                        cv2.circle(frame, line[1], 5, (0, 0, 255), -1)
        return frame

    def calculate_center_line(self, 
                             left_lines_bev: List[Tuple[Tuple[float, float], Tuple[float, float]]],
                             right_lines_bev: List[Tuple[Tuple[float, float], Tuple[float, float]]],
                             point_spacing: int = 10
                             ) -> Tuple[List[Tuple[float, float, float]],
                                       List[Tuple[float, float, float]],
                                       List[Tuple[float, float, float]],
                                       int]:
        """计算中线并沿中线方向等距取点。
    
        Args:
            left_lines_bev: 左车道线在BEV空间中的投影。
            right_lines_bev: 右车道线在BEV空间中的投影。
            point_spacing: 沿中线方向的点间距（像素）。
    
        Returns:
            包含四个元素的元组：
            - 中线点列表 [(x,y,yaw), ...]
            - 左车道线点列表 [(x,y,yaw), ...]
            - 右车道线点列表 [(x,y,yaw), ...]
            - 点数
        """
        # 检查点数是否足够
        if len(left_lines_bev) < 2 or len(right_lines_bev) < 2:
            print("左右车道线有效点数不足，无法计算中线")
            return [], [], [], 0
    
        left_points = np.array(left_lines_bev)
        right_points = np.array(right_lines_bev)
    
        # 找到左右线的共同Y范围
        min_y = max(left_points[:, 1].min(), right_points[:, 1].min())
        max_y = min(left_points[:, 1].max(), right_points[:, 1].max())
    
        if max_y - min_y < 20:  # 20像素的最小高度
            print("Y范围太小，无法计算中线")
            return [], [], [], 0
    
        # 对左右线进行线性拟合
        left_model = LinearRegression()
        left_model.fit(left_points[:, 1].reshape(-1, 1), left_points[:, 0])
    
        right_model = LinearRegression()
        right_model.fit(right_points[:, 1].reshape(-1, 1), right_points[:, 0])
    
        # 计算中线起点和终点
        start_y = min_y
        end_y = max_y
        
        start_x_left = left_model.predict([[start_y]])[0]
        start_x_right = right_model.predict([[start_y]])[0]
        start_x = (start_x_left + start_x_right) / 2.0
         
        end_x_left = left_model.predict([[end_y]])[0]
        end_x_right = right_model.predict([[end_y]])[0]
        end_x = (end_x_left + end_x_right) / 2.0
        
        # 计算中线长度
        center_length = np.sqrt((end_x - start_x)**2 + (end_y - start_y)**2)
        
        # 如果中线长度小于点间距的两倍，只返回起点和终点
        if center_length < point_spacing * 2:
            # 计算中线的整体朝向（yaw角）
            yaw_left = np.arctan2(start_x_left-end_x_left, end_y-start_y)  # 弧度
            yaw_right = np.arctan2(start_x_right-end_x_right, end_y-start_y)  # 弧度
            yaw = np.arctan2(start_x-end_x, end_y-start_y)  # 弧度
            return ([(start_x, start_y, yaw), (end_x, end_y, yaw)],
                    [(start_x_left, start_y, yaw), (end_x_left, end_y, yaw)],
                    [(start_x_right, start_y, yaw), (end_x_right, end_y, yaw)],
                    2)
    
        # 计算中线方向向量
        dx = end_x - start_x
        dy = end_y - start_y
        direction = np.array([dx, dy])
        direction = direction / np.linalg.norm(direction)  # 单位化
        
        dx_left = end_x_left - start_x_left
        dy_left =  end_y - start_y
        scale_left = np.linalg.norm([dx_left, dy_left]) / center_length
        direction_left = np.array([dx_left, dy_left])
        direction_left = direction_left / np.linalg.norm(direction_left)  # 单位化
      
        dx_right = end_x_right - start_x_right
        dy_right =  end_y - start_y
        scale_right = np.linalg.norm([dx_right, dy_right]) / center_length
        direction_right = np.array([dx_right, dy_right])
        direction_right = direction_right / np.linalg.norm(direction_right)  # 单位化
    
        # 沿中线方向等距取点
        center_points = []
        left_points = []
        right_points = []
    
        num_points = int(center_length / point_spacing) + 1
        
        for i in range(num_points):
            distance = i * point_spacing
    
            if distance > center_length:
                distance = center_length
            distance_left = distance * scale_left
            distance_right = distance * scale_right
    
            point_x = start_x + distance * direction[0]
            point_y = start_y + distance * direction[1]
    
            point_x_left = start_x_left + distance_left * direction_left[0]
            point_y_left = start_y + distance_left * direction_left[1]
            point_x_right = start_x_right + distance_right * direction_right[0]
            point_y_right = start_y + distance_right * direction_right[1]
    
            center_points.append((point_x, point_y))
            left_points.append((point_x_left, point_y_left))
            right_points.append((point_x_right, point_y_right))
    
        # 计算中线的整体朝向（yaw角）
        yaw = np.arctan2(center_points[0][0]-center_points[-1][0], center_points[-1][1]-center_points[0][1])  # 弧度
        yaw_left = np.arctan2(left_points[0][0]-left_points[-1][0], left_points[-1][1]-left_points[0][1])  # 弧度
        yaw_right = np.arctan2(right_points[0][0]-right_points[-1][0], right_points[-1][1]-right_points[0][1])  # 弧度
        center_points_with_yaw = []
        left_points_with_yaw = []
        right_points_with_yaw = []
    
        # 将yaw角添加到每个点
        for pt in center_points:
            x, y = pt
            center_points_with_yaw.append((x, y, yaw))
        for pt in left_points:
            x, y = pt
            left_points_with_yaw.append((x, y, yaw_left))
        for pt in right_points:
            x, y = pt
            right_points_with_yaw.append((x, y, yaw_right))
        
        return center_points_with_yaw, left_points_with_yaw, right_points_with_yaw, len(center_points_with_yaw)
    
    def find_line_intersection_infinite(self, 
                                       line_point: Tuple[float, float], 
                                       line_direction: Tuple[float, float], 
                                       target_line: Tuple[Tuple[float, float], Tuple[float, float]]
                                       ) -> Optional[Tuple[float, float]]:
        """查找无限长直线与有限长线段的交点。
        
        Args:
            line_point: 直线上的一点 (x,y)。
            line_direction: 直线的方向向量 (dx,dy)。
            target_line: 目标线段 [(start_x, start_y), (end_x, end_y)]。
            
        Returns:
            交点坐标 (x,y) 或 None。
        """
        # 提取目标线段的起点和终点
        target_start, target_end = target_line
        
        # 计算目标线段的方向向量
        target_dx = target_end[0] - target_start[0]
        target_dy = target_end[1] - target_start[1]
        
        # 计算两条直线的参数方程
        A = np.array([
            [line_direction[0], -target_dx],
            [line_direction[1], -target_dy]
        ])
        
        b = np.array([
            target_start[0] - line_point[0],
            target_start[1] - line_point[1]
        ])
        
        try:
            # 解线性方程组
            t, u = np.linalg.solve(A, b)
            
            # 检查参数u是否在有效范围内
            if 0 <= u <= 1:
                # 计算交点坐标
                intersection_x = line_point[0] + t * line_direction[0]
                intersection_y = line_point[1] + t * line_direction[1]
                
                return (intersection_x, intersection_y)
        except np.linalg.LinAlgError:
            # 矩阵奇异，无解或无穷多解（平行线）
            pass
        
        return None

    def draw_perpendicular_grid_simple(self, 
                                      center_points: List[Tuple[float, float, float]], 
                                      left_lane: Tuple[Tuple[float, float], Tuple[float, float]], 
                                      right_lane: Tuple[Tuple[float, float], Tuple[float, float]], 
                                      grid_spacing: int = 50, 
                                      grid_length: int = 200, 
                                      bev_image: Optional[np.ndarray] = None
                                      ) -> Tuple[List[Dict], np.ndarray]:
        """绘制垂直于中心线的网格线（简化版）。
        
        Args:
            center_points: 中心线点列表 [(x,y,yaw), ...]。
            left_lane: 左车道线 [(start_x, start_y), (end_x, end_y)]。
            right_lane: 右车道线 [(start_x, start_y), (end_x, end_y)]。
            grid_spacing: 网格间距（沿中心线方向，像素）。
            grid_length: 网格线长度（垂直于中心线方向，像素）。
            bev_image: BEV图像（可选，如果提供则在图像上绘制）。
            
        Returns:
            包含两个元素的元组：
            - 网格点信息列表
            - 可视化后的BEV图像
        """
        if not center_points or len(center_points) < 2:
            print("中心点不足，无法绘制网格")
            return [], bev_image
        
        # 创建BEV图像（如果未提供）
        if bev_image is None:
            bev_height = self.left_camera.bev_height
            bev_width = self.left_camera.bev_width
            bev_image = np.zeros((bev_height, bev_width, 3), dtype=np.uint8)
        
        # 存储网格点信息
        grid_points_info = []
        
        # 获取中心线的yaw角（所有点的yaw角相同）
        yaw = center_points[0][2]
        
        # 计算垂直方向（垂直于中心线）
        perpendicular_yaw = yaw
        
        # 计算垂直方向向量
        dx_perp = np.cos(perpendicular_yaw)
        dy_perp = np.sin(perpendicular_yaw)
        
        # 沿中心线采样点（根据网格间距）
        num_points = len(center_points)
        step = 15
        
        for i in range(0, num_points, step):
            if i >= num_points:
                break
                
            center_point = center_points[i]
            x_center, y_center, _ = center_point
            
            grid_direction = (dx_perp, dy_perp)
            
            # 计算与左右车道线的交点（简化版：使用直线交点公式）
            left_intersection = self.find_line_intersection_infinite(
                (x_center, y_center), grid_direction, left_lane
            )
            
            right_intersection = self.find_line_intersection_infinite(
                (x_center, y_center), grid_direction, right_lane
            )
            
            # 绘制交点
            if left_intersection:
                cv2.circle(bev_image, 
                          (int(left_intersection[0]), int(left_intersection[1])),
                          8, (255, 0, 0), -1)  # 蓝色-左交点
                
                # 计算距离
                left_distance = np.sqrt((left_intersection[0]-x_center)**2 + 
                                        (left_intersection[1]-y_center)**2)
                left_distance /= self.bev_pixels_per_mm
                # 添加距离文本
                cv2.putText(bev_image, f"{left_distance}",
                           (int(left_intersection[0])+10, int(left_intersection[1])),
                           cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
            else:
                left_distance = None
            
            if right_intersection:
                cv2.circle(bev_image, 
                          (int(right_intersection[0]), int(right_intersection[1])),
                          8, (0, 0, 255), -1)  # 红色-右交点
                
                # 计算距离
                right_distance = np.sqrt((right_intersection[0]-x_center)**2 + 
                                        (right_intersection[1]-y_center)**2)
                right_distance /= self.bev_pixels_per_mm
                # 添加距离文本
                cv2.putText(bev_image, f"{right_distance}",
                           (int(right_intersection[0])+10, int(right_intersection[1])),
                           cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), 2)
            else:
                right_distance = None
            
            # 添加中心点序号
            cv2.putText(bev_image, str(i),
                       (int(x_center)+5, int(y_center)-5),
                       cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
            
            cv2.circle(bev_image, 
                        (int(x_center), int(y_center)),
                        8, (0, 255, 0), -1)  # 绿色-中心点
            # 存储网格点信息
            grid_points_info.append({
                "index": i,
                "center_point": (x_center, y_center),
                "left_intersection": left_intersection,
                "right_intersection": right_intersection,
                "left_distance": left_distance,
                "right_distance": right_distance
            })
            
            # 打印距离信息
            print(f"点 {i}: 中心({x_center}, {y_center}), "
                  f"左距离={left_distance if left_distance else 'N/A'}, "
                  f"右距离={right_distance if right_distance else 'N/A'}")
        
        return grid_points_info, bev_image

    def calculate_line_fit_error(self, points: List[Tuple[float, float]]) -> float:
        """使用LinearRegression计算拟合误差。
        
        Args:
            points: 4个点的坐标 [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]。
            
        Returns:
            平均绝对误差（MAE）。
        """
        points = np.array(points)
        X = points[:, 0].reshape(-1, 1)  # 转换为二维数组
        y = points[:, 1]
        
        # 线性回归拟合
        reg = LinearRegression().fit(X, y)
        k = reg.coef_[0]
        b = reg.intercept_
        
        # 计算每个点的垂直距离
        distances = np.abs(k * X.flatten() - y + b) / np.sqrt(k**2 + 1)
        
        return np.mean(distances)  # 返回平均绝对误差

    def merge_cross_camera_lines(self, 
                                left_lines_bev: List[Tuple[Tuple[float, float], Tuple[float, float]]],
                                right_lines_bev: List[Tuple[Tuple[float, float], Tuple[float, float]]],
                                max_variance: int = 50
                                ) -> Tuple[List[Tuple[Tuple[float, float], Tuple[float, float]]], np.ndarray]:
        """合并所有车道线（包括不横跨裁切线的车道线）。
        
        Args:
            left_lines_bev: 左相机车道线在BEV空间中的投影。
            right_lines_bev: 右相机车道线在BEV空间中的投影。
            max_variance: 最大允许方差（像素）。
            
        Returns:
            包含两个元素的元组：
            - 所有车道线列表（包括合并后的和未合并的）
            - 合并后的BEV掩码图像
        """
        if not left_lines_bev and not right_lines_bev:
            return [], None
        
        # 创建BEV掩码图像
        bev_height = self.left_camera.bev_height
        bev_width = self.left_camera.bev_width
        merged_mask = np.zeros((bev_height, bev_width), dtype=np.uint8)
        
        # 存储匹配对
        matched_pairs = []
        matched_left = set()
        matched_right = set()
        
        # 创建所有车道线的列表（左相机+右相机）
        all_lines = []
        all_lines.extend([(line, 'left', i) for i, line in enumerate(left_lines_bev)])
        all_lines.extend([(line, 'right', i) for i, line in enumerate(right_lines_bev)])
        
        # 寻找匹配的车道线（考虑所有车道线）
        for i in range(len(all_lines)):
            line_i, camera_i, idx_i = all_lines[i]
            start_i, end_i = line_i
            
            min_error = float('inf')
            best_match = None
            
            for j in range(i + 1, len(all_lines)):
                line_j, camera_j, idx_j = all_lines[j]
                start_j, end_j = line_j
                
                # 跳过同一相机的车道线（避免合并同一相机内的车道线）
                if camera_i == camera_j:
                    continue
                    
                # 组合4个点（线i起点终点 + 线j起点终点）
                points = [start_i, end_i, start_j, end_j]
                
                # 计算拟合误差
                error = self.calculate_line_fit_error(points)
                
                # 检查是否匹配
                if error < min_error and error < max_variance:
                    min_error = error
                    best_match = j
            
            # 如果找到匹配，记录匹配对
            if best_match is not None:
                line_j, camera_j, idx_j = all_lines[best_match]
                
                # 记录匹配对（相机类型和索引）
                if camera_i == 'left':
                    matched_pairs.append((idx_i, idx_j))
                    matched_left.add(idx_i)
                    matched_right.add(idx_j)
                else:
                    matched_pairs.append((idx_j, idx_i))
                    matched_left.add(idx_j)
                    matched_right.add(idx_i)
        
        # 合并匹配的车道线
        merged_lanes = []
        for left_idx, right_idx in matched_pairs:
            left_line = left_lines_bev[left_idx]
            right_line = right_lines_bev[right_idx]
            
            # 获取所有点
            points = [left_line[0], left_line[1], right_line[0], right_line[1]]
            
            # 拟合一条新直线
            points_array = np.array(points)
            X = points_array[:, 0].reshape(-1, 1)
            y = points_array[:, 1]
            
            # 线性回归拟合
            reg = LinearRegression().fit(X, y)
            k = reg.coef_[0]
            b = reg.intercept_
            
            # 计算y方向上的并集范围
            min_y = min(points_array[:, 1])
            max_y = max(points_array[:, 1])
            
            # 计算对应的x值
            if k != 0:  # 避免除以零
                min_x = (min_y - b) / k
                max_x = (max_y - b) / k
            else:
                min_x = min(points_array[:, 0])
                max_x = max(points_array[:, 0])
            
            # 创建新的线段
            merged_start = (min_x, min_y)
            merged_end = (max_x, max_y)
            merged_lanes.append((merged_start, merged_end))
            
            # 在掩码上绘制合并后的线段
            cv2.line(merged_mask, 
                    (int(merged_start[0]), int(merged_start[1])),
                    (int(merged_end[0]), int(merged_end[1])),
                    255, 5)
        
        # 获取未匹配的车道线
        remaining_left = []
        for i, line in enumerate(left_lines_bev):
            if i not in matched_left:
                remaining_left.append(line)
                # 在掩码上绘制未匹配的左车道线
                cv2.line(merged_mask, 
                        (int(line[0][0]), int(line[0][1])),
                        (int(line[1][0]), int(line[1][1])),
                        128, 3)  # 使用不同的灰度值
        
        remaining_right = []
        for j, line in enumerate(right_lines_bev):
            if j not in matched_right:
                remaining_right.append(line)
                # 在掩码上绘制未匹配的右车道线
                cv2.line(merged_mask, 
                        (int(line[0][0]), int(line[0][1])),
                        (int(line[1][0]), int(line[1][1])),
                        64, 3)  # 使用不同的灰度值
        
        # 合并所有车道线
        global_lanes = merged_lanes + remaining_left + remaining_right
        
        return global_lanes, merged_mask

    def find_optimal_left_right_lanes(self, 
                                     global_lanes: List[Tuple[Tuple[float, float], Tuple[float, float]]],
                                     bev_camera_center: Tuple[float, float],
                                     distance_weight: float = 0.2, 
                                     length_weight: float = 0.2, 
                                     slope_weight: float = 0.5, 
                                     width_weight: float = 0.25,
                                     min_width: int = 500, 
                                     max_width: int = 2000
                                     ) -> Tuple[Optional[Tuple[Tuple[float, float], Tuple[float, float]]],
                                               Optional[Tuple[Tuple[float, float], Tuple[float, float]]],
                                               float]:
        """从全局车道线中识别最优的左右车道。
        
        Args:
            global_lanes: 所有车道线列表。
            bev_camera_center: 相机在BEV中的中心位置 (x, y)。
            distance_weight: 距离相机中心权重。
            length_weight: 长度权重。
            slope_weight: 斜率权重。
            width_weight: 车道宽度权重。
            min_width: 最小车道宽度（像素）。
            max_width: 最大车道宽度（像素）。
            
        Returns:
            包含三个元素的元组：
            - 最优左车道线
            - 最优右车道线
            - 评分
        """
        if not global_lanes or len(global_lanes) < 2:
            return None, None, 0
        
        # 计算每条车道线的属性
        lane_properties = []
        for lane in global_lanes:
            start, end = lane
            
            # 1. 计算距离相机中心的最近距离
            dist_start = np.sqrt((start[0]-bev_camera_center[0])**2 + (start[1]-bev_camera_center[1])**2)
            dist_end = np.sqrt((end[0]-bev_camera_center[0])**2 + (end[1]-bev_camera_center[1])**2)
            min_dist = min(dist_start, dist_end)
            
            # 2. 计算车道线长度
            length = np.sqrt((end[0]-start[0])**2 + (end[1]-start[1])**2)
            
            # 3. 计算斜率
            if abs(end[0]-start[0]) > 1e-5:  # 避免除以零
                slope = (end[1]-start[1]) / (end[0]-start[0])
            else:
                slope = float('inf')  # 垂直线
            
            # 4. 计算车道线中点
            mid_point = ((start[0] + end[0]) / 2, (start[1] + end[1]) / 2)
            
            lane_properties.append({
                "lane": lane,
                "min_dist": min_dist,
                "length": length,
                "slope": slope,
                "mid_point": mid_point
            })
        
        # 归一化属性值
        max_dist = max(prop["min_dist"] for prop in lane_properties) or 1
        max_length = max(prop["length"] for prop in lane_properties) or 1
        
        for prop in lane_properties:
            prop["norm_dist"] = 1 - (prop["min_dist"] / max_dist)  # 距离越小越好，所以用1减
            prop["norm_length"] = prop["length"] / max_length  # 长度越大越好
        
        # 寻找最优左右车道对
        best_score = -float('inf')
        best_left = None
        best_right = None
        
        for i in range(len(lane_properties)):
            for j in range(i+1, len(lane_properties)):
                lane_i = lane_properties[i]
                lane_j = lane_properties[j]
                
                # 计算两条车道线中点的横向距离（车道宽度）
                width = abs(lane_i["mid_point"][0] - lane_j["mid_point"][0])
                
                # 检查车道宽度是否在合理范围内
                if width < min_width or width > max_width:
                    continue
                
                # 计算车道宽度得分（高斯函数，越接近理想宽度得分越高）
                ideal_width = (min_width + max_width) / 2
                width_score = np.exp(-0.5 * ((width - ideal_width) / (ideal_width/3))**2)
                
                # 计算斜率相似度
                slope_diff = abs(lane_i["slope"] - lane_j["slope"])
                norm_slope_diff = 1 - min(slope_diff / 10, 1)  # 归一化斜率差异
                
                # 计算平均距离得分
                avg_dist_score = (lane_i["norm_dist"] + lane_j["norm_dist"]) / 2
                
                # 计算平均长度得分
                avg_length_score = (lane_i["norm_length"] + lane_j["norm_length"]) / 2
                
                # 计算综合评分
                score = (
                    distance_weight * avg_dist_score +
                    length_weight * avg_length_score +
                    slope_weight * norm_slope_diff +
                    width_weight * width_score
                )
                
                if score > best_score:
                    best_score = score
                    # 确定左右车道（基于中点x坐标）
                    if lane_i["mid_point"][0] < lane_j["mid_point"][0]:
                        best_left = lane_i["lane"]
                        best_right = lane_j["lane"]
                    else:
                        best_left = lane_j["lane"]
                        best_right = lane_i["lane"]
        
        return best_left, best_right, best_score

    def calculate_lane_slope(self, lane: Tuple[Tuple[float, float], Tuple[float, float]]) -> float:
        """计算车道线斜率。
        
        Args:
            lane: 车道线 [(start_x, start_y), (end_x, end_y)]。
            
        Returns:
            斜率值。
        """
        start, end = lane
        if abs(end[0] - start[0]) < 1e-5:  # 避免除以零
            return float('inf')  # 垂直线
        return (end[1] - start[1]) / (end[0] - start[0])

    def transform_to_meter_coordinates(self, 
                                       center_points: List[Tuple[float, float, float]], 
                                       bev_center: Tuple[float, float]
                                       ) -> List[float]:
        """将BEV像素坐标转换到以bev_center为原点、单位为米的坐标系。
        
        Args:
            center_points: BEV像素坐标 [[(x1,y1), (x2,y2), ...], ...]。
            bev_center: BEV中心点在棋盘格坐标系中的位置 (x_mm, y_mm)。
            
        Returns:
            转换后的坐标列表 [[(x1_m,y1_m), (x2_m,y2_m), ...], ...]。
        """
        transformed_points = []
        
        for point in center_points:
            # 1. 转换为相对于BEV图像中心的偏移量（像素）
            y_px = bev_center[0] - point[0]
            x_px = bev_center[1] - point[1]
            
            # 2. 转换为毫米（根据像素/毫米比例）
            x_mm = x_px / self.bev_pixels_per_mm + self.camera_lidar_x  # 加上相机到激光雷达的距离
            y_mm = y_px / self.bev_pixels_per_mm + self.camera_lidar_y  # 加上相机到激光雷达的距离
            
            # 3. 转换为相对于bev_center，前左上坐标系
            x_m = (x_mm ) / 1000.0
            y_m = (y_mm ) / 1000.0
                
            transformed_points.append(x_m)   
            transformed_points.append(y_m)
            transformed_points.append(point[2])# 保留yaw角
        
        return transformed_points

    def transform_to_meter_coordinates_center(self, 
                                            center_points: List[Tuple[float, float, float]], 
                                            bev_center: Tuple[float, float]
                                            ) -> List[Tuple[float, float, float]]:
        """将BEV像素坐标转换到以bev_center为原点、单位为米的坐标系。
        
        Args:
            center_points: BEV像素坐标 [[(x1,y1), (x2,y2), ...], ...]。
            bev_center: BEV中心点在棋盘格坐标系中的位置 (x_mm, y_mm)。
            
        Returns:
            转换后的坐标列表 [[(x1_m,y1_m), (x2_m,y2_m), ...], ...]。
        """
        transformed_points = []
        
        for point in center_points:
            # 1. 转换为相对于BEV图像中心的偏移量（像素）
            y_px = bev_center[0] - point[0]
            x_px = bev_center[1] - point[1]
            
            # 2. 转换为毫米（根据像素/毫米比例）
            x_mm = x_px / self.bev_pixels_per_mm + self.camera_lidar_x  # 加上相机到激光雷达的距离
            y_mm = y_px / self.bev_pixels_per_mm + self.camera_lidar_y  # 加上相机到激光雷达的距离
            
            # 3. 转换为相对于bev_center，前左上坐标系
            x_m = (x_mm ) / 1000.0
            y_m = (y_mm ) / 1000.0
                
            transformed_points.append((x_m, y_m, point[2]))   
        
        return transformed_points
    
    def get_distance(self, 
                    bev_left_lane: Tuple[Tuple[float, float], Tuple[float, float]], 
                    bev_right_lane: Tuple[Tuple[float, float], Tuple[float, float]], 
                    bev_camera_center: Tuple[float, float]
                    ) -> None:
        """计算BEV空间中车道线与相机中心的距离。
        
        Args:
            bev_left_lane: BEV空间中的左车道线。
            bev_right_lane: BEV空间中的右车道线。
            bev_camera_center: BEV空间中的相机中心位置。
        """
        print("计算BEV空间中车道线与相机中心的距离")
        # 计算左车道线与相机中心的距离
        left_start, left_end = bev_left_lane
        right_start, right_end = bev_right_lane
        left_start = np.array(left_start)
        right_start = np.array(right_start)
        left_end = np.array(left_end)
        right_end = np.array(right_end)
        bev_camera_center = np.array(bev_camera_center)
        left_distance_start = np.linalg.norm(left_start - bev_camera_center)
        right_distance_start = np.linalg.norm(right_start - bev_camera_center)
        left_distance_end = np.linalg.norm(left_end - bev_camera_center)
        right_distance_end = np.linalg.norm(right_end - bev_camera_center)
        left_distance_start /= self.bev_pixels_per_mm * 1000.0
        right_distance_start /= self.bev_pixels_per_mm * 1000.0
        left_distance_end /= self.bev_pixels_per_mm * 1000.0
        right_distance_end /= self.bev_pixels_per_mm * 1000.0
        print(f"左车道线距离: {left_distance_start}, 右车道线距离: {right_distance_start}")
        print(f"左车道线结束点距离: {left_distance_end}, 右车道线结束点距离: {right_distance_end}")

    def process_frame(self, frame: List[np.ndarray]
                     ) -> Tuple[Optional[np.ndarray], Optional[Dict], Optional[List[Tuple[float, float, float]]]]:
        """处理一帧图像，返回可视化结果。
        
        Args:
            frame: 包含左右相机图像的列表。
            
        Returns:
            包含三个元素的元组：
            - 可视化后的图像
            - 处理结果参数
            - 转换后的中心线坐标
        """
        # 处理一帧图像，返回可视化结果
        if frame is None or frame[0] is None or frame[1] is None:
            return None, None, None
            
        return_params = {
            "process_time": float,          # 节点处理时间，单位【秒】
            "bev_number": int,             # BEV坐标系下标记点总数
            "bev_left_line": List[float],   # [x1,y1,yaw1,...,xN,yN,yawN]
            "bev_right_line": List[float],  # [x1,y1,yaw1,...,xN,yN,yawN]
            "bev_mid_line": List[float]     # [x1,y1,yaw1,...,xN,yN,yawN]
        }
        
        # 进行推理
        results = self.model(frame, 
                             conf=self.det_config.confidence_threshold, 
                             iou=self.det_config.iou_threshold,
                             batch=2, 
                             verbose=False, 
                             device=self.det_config.device)
        self.timer.get_time_from_start_and_reset("inference")

        # 存储拟合的直线
        fitted_left_lines = []
        fitted_right_lines = []

        # 遍历检测结果
        for i, result in enumerate(results):
            if i == 0:
                camera = 'left'
            else:
                camera = 'right'

            # 绘制分割掩码并拟合直线
            if result.masks is not None:
                for i, mask in enumerate(result.masks):
                    # 获取对应的边界框
                    box = result.boxes[i]
                    x1, y1, x2, y2 = map(int, box.xyxy[0].cpu().numpy())
                    
                    # 将掩码数据从GPU移动到CPU并转换为NumPy数组
                    mask_data = mask.data[0].cpu().numpy().astype(np.uint8)
                    
                    # 裁剪掩码到边界框区域
                    mask_cropped = mask_data[y1:y2, x1:x2]
                    
                    # 拟合直线
                    line = self.fit_line_to_mask_fast(mask_cropped, 
                                                      [x1, y1, x2, y2], 
                                                      self.line_config.sample_step,
                                                      self.line_config.max_slope)
                    if line is not None:
                        if camera == 'left':
                            fitted_left_lines.append(line)
                        else:
                            fitted_right_lines.append(line)

        self.timer.get_time_from_start_and_reset("Fit lines")
        
        # 投影到BEV空间
        bev_camera_left_points = self.left_camera.project_points_with_kdtree(fitted_left_lines)
        bev_camera_right_points = self.right_camera.project_points_with_kdtree(fitted_right_lines)
        self.timer.get_time_from_start_and_reset("proj to bev")
        
        # 在BEV空间中合并所有车道线并拟合全局车道线
        global_lane, merged_mask = self.merge_cross_camera_lines(bev_camera_left_points, 
                                                                 bev_camera_right_points, 
                                                                 max_variance=self.line_config.max_variance)
        
        # 寻找最优左右车道
        left_lane, right_lane, _ = self.find_optimal_left_right_lanes(
            global_lane, 
            self.bev_camera_center,
            distance_weight=self.line_config.distance_weight,
            length_weight=self.line_config.length_weight,
            slope_weight=self.line_config.slope_weight,
            width_weight=self.line_config.width_weight,
            min_width=self.line_config.min_width,
            max_width=self.line_config.max_width
        )
        
        self.timer.get_time_from_start_and_reset("merge lines")
        
        if left_lane is None or right_lane is None:
            print("未找到合适的左右车道线，无法计算中线")
            return None, None, None

        center_points, left_points, right_points, point_num = self.calculate_center_line(
            left_lane, right_lane, point_spacing=self.line_config.point_spacing 
        )

        self.timer.get_time_from_start_and_reset("proc center line")
        process_time = self.timer.global_stop("Total frame processing")

        # 可视化结果
        if self.show_mode == 'bev':
            # 创建BEV图像
            bev_img_left = self.left_camera.fast_bev_transform(frame[0])
            bev_img_right = self.right_camera.fast_bev_transform(frame[1])
            # 创建拼接的BEV图像
            annotated_frame = self.create_cropped_bev(bev_img_left, bev_img_right)
            annotated_frame = cv2.circle(annotated_frame, (int(self.bev_camera_center[0]), int(self.bev_camera_center[1])), 20, (0, 255, 0), -1)
            annotated_frame = cv2.circle(annotated_frame, (int( self.left_camera.bev_pos[0]), int(self.left_camera.bev_pos[1])), 20, (255, 0, 0), -1)
            annotated_frame = cv2.circle(annotated_frame, (int( self.right_camera.bev_pos[0]), int(self.right_camera.bev_pos[1])), 20, (0, 0, 255), -1)
            annotated_frame = cv2.line(annotated_frame, (int(self.mid_x), 0), (int(self.mid_x), self.left_camera.bev_height), (255, 255, 255), 2)
            if self.mark_mode in ['global', 'all']:
                # 可视化合并后的掩码
                if merged_mask is not None:
                    # 将掩码转换为彩色
                    mask_color = np.zeros_like(annotated_frame)
                    mask_color[merged_mask > 0] = (0, 0, 255)  # 红色
                    
                    # 叠加掩码到图像
                    annotated_frame = cv2.addWeighted(annotated_frame, 0.7, mask_color, 0.3, 0)

                # 可视化全局车道线
                if global_lane:
                    annotated_frame = self.visualize_lines_in_bev(
                        annotated_frame, global_lane, color=(0, 255, 255), line_thickness = self.vis_config.line_thickness
                    )

            if self.mark_mode in ['line', 'all']:
                # 可视化中线
                if center_points is not None:
                    annotated_frame = self.visualize_keypoints_in_bev(
                        annotated_frame, center_points, color=self.vis_config.center_color
                    )
                # 可视化左右车道线
                if left_points is not None:
                    annotated_frame = self.visualize_keypoints_in_bev(
                        annotated_frame, left_points, color=self.vis_config.left_color
                    )
                if right_points is not None:
                    annotated_frame = self.visualize_keypoints_in_bev(
                        annotated_frame, right_points, color=self.vis_config.right_color
                    )
            if self.mark_mode in ['grid', 'all']:
                # 绘制垂直于中心线的网格
                if center_points and left_lane and right_lane:
                    grid_info, annotated_frame = self.draw_perpendicular_grid_simple(
                        center_points, 
                        left_lane, 
                        right_lane,
                        grid_spacing=50,  # 网格间距（像素）
                        grid_length=200,   # 网格线长度（像素）
                        bev_image=annotated_frame  # 在已有的BEV图像上绘制
                    )
                    
            # 调整大小
            annotated_frame = cv2.resize(annotated_frame, 
                                        (int(self.left_camera.bev_width*self.bev_scale), 
                                         int(self.left_camera.bev_height*self.bev_scale)))
        elif self.show_mode == 'right':
            annotated_frame = frame[1]
            if fitted_right_lines is not None:
                annotated_frame = self.visualize_lines_in_bev(
                    annotated_frame, fitted_right_lines, color=self.vis_config.right_color
                )
        elif self.show_mode == 'left':
            annotated_frame = frame[0]
            if fitted_right_lines is not None:
                annotated_frame = self.visualize_lines_in_bev(
                    annotated_frame, fitted_left_lines, color=self.vis_config.left_color
                )
        else:
            annotated_frame = None

        return_params["bev_mid_line"] = self.transform_to_meter_coordinates(center_points, self.bev_camera_center) if center_points else None
        return_params["bev_left_line"] = self.transform_to_meter_coordinates(left_points, self.bev_camera_center) if left_points else None
        return_params["bev_right_line"] = self.transform_to_meter_coordinates(right_points, self.bev_camera_center) if right_points else None
        return_params["bev_number"] = point_num
        return_params["process_time"] = process_time
        center_points = self.transform_to_meter_coordinates_center(center_points, self.bev_camera_center) if center_points else None
        if self.show_mode != "none":
            if self.output_frame == 'cvshow':
                cv2.show("Lane Detection", annotated_frame)
                cv2.waitKey(1)
            else:
                if self.output_frame:
                    cv2.imwrite(self.output_frame, annotated_frame)

        return annotated_frame, return_params, center_points

    def visualize_global_lane(self, 
                             bev_image: np.ndarray, 
                             global_lane: Tuple[Tuple[float, float], Tuple[float, float]], 
                             color: Tuple[int, int, int] = (0, 255, 255), 
                             thickness: int = 5
                             ) -> np.ndarray:
        """在BEV图像上可视化全局车道线。
        
        Args:
            bev_image: BEV图像。
            global_lane: 全局车道线 [(start_x, start_y), (end_x, end_y)]。
            color: 颜色 (B, G, R)。
            thickness: 线宽。
            
        Returns:
            可视化后的图像。
        """
        if global_lane is None or len(global_lane) < 2:
            return bev_image
        
        start, end = global_lane
        cv2.line(bev_image, 
                 (int(start[0]), int(start[1])), 
                 (int(end[0]), int(end[1])), 
                 color, thickness, cv2.LINE_AA)
        
        # 添加端点标记
        cv2.circle(bev_image, (int(start[0]), int(start[1])), 15, (0, 0, 255), -1)
        cv2.circle(bev_image, (int(end[0]), int(end[1])), 15, (0, 0, 255), -1)
        
        return bev_image

    def release_cameras(self) -> None:
        """释放摄像头资源。"""
        if self.is_deploy:
            return
        self.cap_L.release()
        self.cap_R.release()
        cv2.destroyAllWindows()

    def visualize_lines_in_bev(self, 
                              bev_image: np.ndarray, 
                              line_points: List[Tuple[Tuple[float, float], Tuple[float, float]]], 
                              color: Tuple[int, int, int] = (255, 0, 0), 
                              thickness: int = 10
                              ) -> np.ndarray:
        """在BEV图像上可视化关键点和拟合直线（支持线段截断）。
        
        Args:
            bev_image: BEV图像。
            line_points: BEV空间中的直线端点列表。
            color: 线条颜色 (B, G, R)。
            thickness: 线条粗细。
            
        Returns:
            可视化后的BEV图像。
        """
        # 获取图像边界
        img_height, img_width = bev_image.shape[:2]
        
        # 绘制端点
        for line in line_points:
            for point in line:
                x, y = point
                if 0 <= x < img_width and 0 <= y < img_height:
                    cv2.circle(bev_image, (int(x), int(y)), 15, (0, 0, 255), -1)
        
        # 绘制线段（支持截断）
        for line in line_points:
            if line and len(line) == 2:
                pt1, pt2 = line
                x1, y1 = pt1
                x2, y2 = pt2
                
                # 检查线段是否完全在图像外
                if (x1 < 0 and x2 < 0) or (x1 >= img_width and x2 >= img_width) or \
                   (y1 < 0 and y2 < 0) or (y1 >= img_height and y2 >= img_height):
                    continue  # 完全在图像外，跳过
                
                # 计算线段与图像边界的交点
                intersections = []
                
                # 检查与左边界 (x=0) 的交点
                if (x1 < 0 and x2 >= 0) or (x1 >= 0 and x2 < 0):
                    t = -x1 / (x2 - x1) if x2 != x1 else 0
                    y = y1 + t * (y2 - y1)
                    if 0 <= y < img_height:
                        intersections.append((0, y))
                
                # 检查与右边界 (x=img_width-1) 的交点
                if (x1 < img_width and x2 >= img_width) or (x1 >= img_width and x2 < img_width):
                    t = (img_width - 1 - x1) / (x2 - x1) if x2 != x1 else 0
                    y = y1 + t * (y2 - y1)
                    if 0 <= y < img_height:
                        intersections.append((img_width - 1, y))
                
                # 检查与上边界 (y=0) 的交点
                if (y1 < 0 and y2 >= 0) or (y1 >= 0 and y2 < 0):
                    t = -y1 / (y2 - y1) if y2 != y1 else 0
                    x = x1 + t * (x2 - x1)
                    if 0 <= x < img_width:
                        intersections.append((x, 0))
                
                # 检查与下边界 (y=img_height-1) 的交点
                if (y1 < img_height and y2 >= img_height) or (y1 >= img_height and y2 < img_height):
                    t = (img_height - 1 - y1) / (y2 - y1) if y2 != y1 else 0
                    x = x1 + t * (x2 - x1)
                    if 0 <= x < img_width:
                        intersections.append((x, img_height - 1))
                
                # 收集所有在图像内的端点
                visible_points = []
                for point in [pt1, pt2]:
                    x, y = point
                    if 0 <= x < img_width and 0 <= y < img_height:
                        visible_points.append(point)
                
                # 合并所有点（可见端点和交点）
                all_points = visible_points + intersections
                
                # 如果有2个或更多点，绘制线段
                if len(all_points) >= 2:
                    # 对点进行排序，确保线段方向正确
                    if len(all_points) == 2:
                        start, end = all_points[0], all_points[1]
                    else:
                        # 如果有多个点，找到最远的两个点
                        max_dist = 0
                        start, end = all_points[0], all_points[1]
                        for i in range(len(all_points)):
                            for j in range(i+1, len(all_points)):
                                dist = np.sqrt((all_points[i][0]-all_points[j][0])**2 + 
                                              (all_points[i][1]-all_points[j][1])**2)
                                if dist > max_dist:
                                    max_dist = dist
                                    start, end = all_points[i], all_points[j]
                    
                    # 绘制线段
                    cv2.line(bev_image, 
                             (int(start[0]), int(start[1])), 
                             (int(end[0]), int(end[1])), 
                             color, thickness, cv2.LINE_AA)
        
        return bev_image

    def visualize_keypoints_in_bev(self, 
                                 bev_image: np.ndarray, 
                                 keypoints: List[Tuple[float, float]], 
                                 color: Tuple[int, int, int] = (0, 255, 0)
                                 ) -> np.ndarray:
        """在BEV图像上可视化关键点和拟合直线。
        
        Args:
            bev_image: BEV图像。
            keypoints: BEV空间中的关键点坐标。
            color: 颜色 (B, G, R)。
            
        Returns:
            可视化后的BEV图像。
        """
        # 绘制点 (红色)
        for point in keypoints:
            x, y = point[0], point[1]
            int_x, int_y = int(x), int(y)
            cv2.circle(bev_image, (int_x, int_y), 10, color, -1)

        return bev_image

    def test_video(self, 
                  output_video_path: Optional[str] = None, 
                  show_video: bool = False, 
                  save_video: bool = True, 
                  fps: int = 30
                  ) -> None:
        """测试视频流并输出结果。
        
        Args:
            output_video_path: 输出视频路径（如果为None则不保存）。
            show_video: 是否显示实时视频。
            save_video: 是否保存视频。
            fps: 输出视频帧率。
        """
        # 初始化视频写入器
        video_writer = None
        
        # 处理视频流
        while self.cap_L.isOpened() and self.cap_R.isOpened():
            self.timer.global_start()

            # 读取一帧图像
            batch_frames = self.read_frames()
            if batch_frames is None:
                break
            
            # 处理并可视化
            annotated_frame, _ = self.process_frame(batch_frames)
            
            # 检查是否成功处理
            if annotated_frame is None:
                continue
            
            # 初始化视频写入器（如果需要）
            if save_video and video_writer is None and output_video_path:
                height, width = annotated_frame.shape[:2]
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # 或使用 'XVID'
                video_writer = cv2.VideoWriter(output_video_path, fourcc, fps, (width, height))
            
            # 保存视频帧
            if save_video and video_writer is not None:
                video_writer.write(annotated_frame)
            
            # 显示结果
            if show_video:
                cv2.imshow("Annotated Frame", annotated_frame)
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    break
                elif key == ord('p'):  # 暂停/继续
                    while True:
                        key2 = cv2.waitKey(0)
                        if key2 == ord('p'):
                            break
                        elif key2 == ord('q'):
                            break
                    if key2 == ord('q'):
                        break
        
        # 释放资源
        if self.cap_L.isOpened():
            self.cap_L.release()
        if self.cap_R.isOpened():
            self.cap_R.release()
        if video_writer is not None:
            video_writer.release()
        if show_video:
            cv2.destroyAllWindows()
        
        print("视频处理完成")
    
    def view_video(self) -> None:
        """测试视频流。"""
        # 处理视频流
        while self.cap_L.isOpened() and self.cap_R.isOpened():
            self.timer.global_start()

            # 读取一帧图像
            batch_frames = self.read_frames()

            # 处理并可视化
            annotated_frame, return_param = self.process_frame(batch_frames)
            # print(return_param)
            # 显示或保存结果
            if annotated_frame is not None:
                if self.output_frame is False:
                    cv2.imshow("Annotated Frame", annotated_frame)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                else:
                    cv2.imwrite(self.output_frame, annotated_frame)
      
    def process_img(self, 
                   left_frame: np.ndarray, 
                   right_frame: np.ndarray
                   ) -> Tuple[Optional[np.ndarray], Optional[Dict], Optional[List[Tuple[float, float, float]]]]:
        """处理每一帧视频流。
        
        Args:
            left_frame: 左相机图像。
            right_frame: 右相机图像。
            
        Returns:
            包含三个元素的元组：
            - 可视化后的图像
            - 处理结果参数
            - 转换后的中心线坐标
        """
        # 开启计时器
        self.timer.global_start()
        self.timer.start()

        if left_frame is not None and right_frame is not None:
            # 去畸变
            left_frame = self.left_camera.high_precision_undistort(left_frame)
            right_frame = self.right_camera.high_precision_undistort(right_frame)
            
            #构造batch
            batch_frames = [left_frame, right_frame]

            # 处理并可视化
            annotated_frame, return_param, center_line = self.process_frame(batch_frames)

            return annotated_frame, return_param, center_line
        else:
            print("摄像头未打开，无法处理视频。")
            return None, None, None