import numpy as np
import cv2
from typing import List, Dict, Tuple, Optional, Set
from loguru import logger
import os
from collections import deque
import heapq
import math
from scipy.ndimage import distance_transform_edt
import random

class CtlPoint:
    """中心线上的坐标点类，用于构建血管中心线的树形结构"""
    
    def __init__(self, point: Tuple[int, int], index: int = None):
        """
        初始化中心线点
        
        Args:
            point: 坐标点 (x, y)
            index: 点在全局索引中的位置
        """
        self.point = point  # 坐标点 (x, y)
        self.index = index  # 全局索引
        self.children = []  # 子节点列表
        self.parent = None  # 父节点
        self.is_extreme = False  # 是否为极端点（端点或分叉点）
        self.is_endpoint = False  # 是否为端点
        self.is_bifurcation = False  # 是否为分叉点
        self.neighbor_count = 0  # 邻居点数量
        self.distance_from_start = 0  # 从起始点的距离
        self.vessel_radius = 0.0  # 血管半径
        
    def add_child(self, child_point):
        """添加子节点"""
        self.children.append(child_point)
        child_point.parent = self
        
    def get_path_to_root(self) -> List['CtlPoint']:
        """获取到根节点的路径"""
        path = [self]
        current = self.parent
        while current is not None:
            path.append(current)
            current = current.parent
        return path[::-1]  # 反转以获得从根到当前节点的路径
        
    def __repr__(self):
        return f"CtlPoint({self.point}, children={len(self.children)}, extreme={self.is_extreme})"


class CenterlineManager:
    """血管中心线管理器，用于提取和管理血管分割结果的中心线"""
    
    def __init__(self):
        """初始化中心线管理器"""
        self.centerlines = []  # 存储所有中心线段
        self.extreme_points = []  # 极端点列表
        self.start_point = None  # 起始点
        self.point_tree = None  # 点树结构
        self.bifurcation_merge_distance = 3.0  # 分叉点融合距离阈值
        self.skeleton = None  # 骨架图，用于路径查找
        self.extreme = []  # 端点列表
        self.bifurcations = []  # 分叉点列表（包括融合后的）
        
        # 血管流动规则过滤参数
        self.vessel_filter_config = {
            'min_score_threshold': 0.3,  # 最小评分阈值
            'weights': {
                'smoothness': 0.3,      # 平滑度权重
                'direction': 0.2,      # 方向一致性权重
                'curvature': 0.2,      # 曲率权重
                'bifurcation': 0.3      # 分叉角度权重
            },
            'optimal_curvature_ratio': 1.2,  # 最优曲率比
            'max_angle_deviation': np.pi/3,   # 最大角度偏差（60度）
            'enable_vessel_filtering': True,   # 是否启用血管规则过滤
            'sampling_config': {
                'enable_sampling': True,      # 是否启用路径采样
                'sampling_interval': 10,       # 采样间隔（像素）
                'min_segment_length': 2,      # 最小段长度
                'sampling_method': 'adaptive'  # 采样方法：'uniform', 'adaptive', 'keypoints'
            }
        }
    
    def set_bifurcation_merge_distance(self, distance: float):
        """
        设置分叉点融合距离阈值
        
        Args:
            distance: 距离阈值，相距小于此值的分叉点将被融合
        """
        self.bifurcation_merge_distance = distance
        logger.info(f"分叉点融合距离阈值设置为: {distance}")
        
    def find_extreme_point(self, skeleton: np.ndarray) -> Tuple[List[CtlPoint], List[CtlPoint]]:
        """
        找到骨架图中的极端点（端点和分叉点）
        
        Args:
            skeleton: 二值化骨架图
            
        Returns:
            (极端点列表, 分叉点列表)
        """
        logger.info("开始查找极端点")
        extreme_points = []
        
        # 定义8连通的邻域
        neighbors = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
        
        # 找到所有前景点
        foreground_points = np.where(skeleton == 1)
        
        for i in range(len(foreground_points[0])):
            y, x = foreground_points[0][i], foreground_points[1][i]
            
            # 计算邻居数量
            neighbor_count = 0
            for dy, dx in neighbors:
                ny, nx = y + dy, x + dx
                if (0 <= ny < skeleton.shape[0] and 
                    0 <= nx < skeleton.shape[1] and 
                    skeleton[ny, nx] == 1):
                    neighbor_count += 1
            
            # 端点（邻居数为1）或分叉点（邻居数≥3）
            if neighbor_count == 1 or neighbor_count >= 3:
                point = CtlPoint((int(x), int(y)))
                point.neighbor_count = neighbor_count
                point.is_extreme = True
                point.is_endpoint = (neighbor_count == 1)
                point.is_bifurcation = (neighbor_count >= 3)
                extreme_points.append(point)
        
        # 融合相近的分叉点
        extreme, bifurcations = self._merge_nearby_bifurcations(extreme_points, self.bifurcation_merge_distance)
        
        self.extreme = extreme
        self.bifurcations = bifurcations
        logger.info(f"找到 {len(extreme_points)} 个极端点（融合后）")
        return extreme, bifurcations
    
    def find_start_point(self, extreme: List[CtlPoint], skeleton: np.ndarray, main_seg: List[np.ndarray]=None) -> Tuple[int, int]:
        """
        从极端点中选择起始点，优先选择位于左上角的点
        
        Args:
            extreme_dict: 极端点字典，key为坐标，value为出现次数
            
        Returns:
            起始点坐标
        """
        logger.info("开始选择起始点")
        
        # 优先选择只出现一次的点（端点）

        if main_seg is not None:
            start_seg = main_seg[0] * main_seg[1] * skeleton
            point_list = np.where(start_seg == 1)
            start_index = np.argmin(point_list[0]+point_list[1], axis=0)
            start_point = (point_list[1][start_index], point_list[0][start_index])

        else:
            endpoints = [point for point in extreme if point.is_endpoint]
            start_point = min(endpoints, key=lambda p: p.point[0] + p.point[1])
            start_point = start_point.point

        self.start_point = start_point
        logger.info(f"选择起始点: {start_point}")
        return start_point
    
    def _cluster_bifurcations(self, bifurcations: List[CtlPoint], distance_threshold: float) -> List[CtlPoint]:
        """
        使用连通组件聚类算法对分叉点进行聚类融合
        
        Args:
            bifurcations: 分叉点列表
            distance_threshold: 距离阈值
            
        Returns:
            融合后的分叉点列表
        """
        if not bifurcations:
            return []
        
        n = len(bifurcations)
        if n == 1:
            return bifurcations
        
        # 构建邻接矩阵
        adjacency = np.zeros((n, n), dtype=bool)
        for i in range(n):
            for j in range(i + 1, n):
                dist = np.sqrt((bifurcations[i].point[0] - bifurcations[j].point[0])**2 + 
                              (bifurcations[i].point[1] - bifurcations[j].point[1])**2)
                if dist <= distance_threshold:
                    adjacency[i, j] = True
                    adjacency[j, i] = True
        
        # 使用DFS查找连通组件
        visited = [False] * n
        merged_bifurcations = []
        
        def dfs(node, component):
            visited[node] = True
            component.append(node)
            for neighbor in range(n):
                if adjacency[node, neighbor] and not visited[neighbor]:
                    dfs(neighbor, component)
        
        for i in range(n):
            if not visited[i]:
                component = []
                dfs(i, component)
                
                # 对每个连通组件进行融合
                if len(component) > 1:
                    # 计算加权质心（根据邻居数加权）
                    total_weight = sum(bifurcations[idx].neighbor_count for idx in component)
                    if total_weight > 0:
                        weighted_x = sum(bifurcations[idx].point[0] * bifurcations[idx].neighbor_count 
                                       for idx in component) / total_weight
                        weighted_y = sum(bifurcations[idx].point[1] * bifurcations[idx].neighbor_count 
                                       for idx in component) / total_weight
                    else:
                        # 如果权重为0，使用简单平均
                        weighted_x = sum(bifurcations[idx].point[0] for idx in component) / len(component)
                        weighted_y = sum(bifurcations[idx].point[1] for idx in component) / len(component)
                    
                    center_x = int(round(weighted_x))
                    center_y = int(round(weighted_y))
                    
                    # 创建融合后的分叉点
                    merged_point = CtlPoint((center_x, center_y))
                    merged_point.is_extreme = True
                    merged_point.is_bifurcation = True
                    merged_point.is_endpoint = False
                    merged_point.neighbor_count = max(bifurcations[idx].neighbor_count for idx in component)
                    
                    merged_bifurcations.append(merged_point)
                    logger.debug(f"融合了 {len(component)} 个分叉点到位置 {(center_x, center_y)}")
                else:
                    # 单独的分叉点，直接保留
                    merged_bifurcations.append(bifurcations[component[0]])
        
        return merged_bifurcations
    
    def _merge_nearby_bifurcations(self, extreme_points: List[CtlPoint], distance_threshold: float = 3.0) -> List[CtlPoint]:
        """
        融合相近的分叉点
        
        Args:
            extreme_points: 原始极端点列表
            distance_threshold: 距离阈值，小于此距离的分叉点将被融合
            
        Returns:
            融合后的极端点列表
        """
        logger.debug(f"开始融合相近的分叉点，距离阈值: {distance_threshold}")
        
        # 分离端点和分叉点
        endpoints = [p for p in extreme_points if p.is_endpoint]
        bifurcations = [p for p in extreme_points if p.is_bifurcation]
        
        if not bifurcations:
            logger.warning("没有分叉点需要融合")
            return extreme_points, bifurcations
        
        logger.debug(f"发现 {len(bifurcations)} 个分叉点需要融合")
        
        # 使用改进的聚类算法对分叉点进行聚类
        merged_bifurcations = self._cluster_bifurcations(bifurcations, distance_threshold)
        
        # 合并端点和融合后的分叉点
        logger.info(f"融合完成: {len(bifurcations)} -> {len(merged_bifurcations)} 个分叉点")
        return endpoints, merged_bifurcations
 
    def visulize_centerline(self, save_path, index: int):
        """
        可视化中心线
        """
        os.makedirs(save_path, exist_ok=True)
        
        fused_img = np.zeros((512, 512, 3)).astype(np.uint8)
        
        # 在融合后的图像上绘制中心线
        for centerline in self.centerlines:
            for point in centerline:
                cv2.circle(fused_img, point.point, 2, (0, 0, 255), -1)
        
        # 在融合后的图像上绘制极端点
        for point in self.extreme:
            cv2.circle(fused_img, point.point, 2, (0, 0, 255), -1)
        
        # 在融合后的图像上绘制分叉点
        for point in self.bifurcations:
            cv2.circle(fused_img, point.point, 2, (0, 255, 0), -1)
        
        # 在融合后的图像上绘制起始点
        cv2.circle(fused_img, self.start_point, 2, (255, 255, 255), -1)
        
        return fused_img


    def build_all_paths_to_targets(self, skeleton: np.ndarray, 
                                  start_point: CtlPoint, 
                                  target_points: List[CtlPoint]) -> Dict[Tuple[int, int], List[List[Tuple[int, int]]]]:
        """
        构建从起始点到所有目标点的所有可能路径
        
        Args:
            skeleton: 二值化骨架图
            start_point: 起始点
            target_points: 目标点列表（包括端点和分叉点）
            
        Returns:
            字典，key为目标点坐标，value为从起始点到该目标点的所有路径列表
        """
        logger.info(f"开始构建从起始点 {start_point} 到 {len(target_points)} 个目标点的所有路径")
        
        self.skeleton = skeleton
        all_paths = {}
        # 
        
        # 为每个目标点找到所有可能的路径
        for target_point in target_points:
            
            paths_to_target = self._find_all_skeleton_paths(
                skeleton, 
                start_point, 
                target_point.point
            )
            
            if paths_to_target:
                all_paths[target_point.point] = paths_to_target
                logger.debug(f"成功找到到点 {target_point.point} 的 {len(paths_to_target)} 条路径")
            else:
                logger.debug(f"无法找到到点 {target_point.point} 的路径")
                all_paths[target_point.point] = []
        
        total_paths = sum(len(paths) for paths in all_paths.values())
        logger.info(f"路径构建完成，总共找到 {total_paths} 条路径")
        # 筛选3条路程最短的路径
        for target_point, paths in all_paths.items():
            if len(paths) > 3:
                all_paths[target_point] = paths[:]
            else:
                all_paths[target_point] = paths
        self.all_paths = all_paths
        return all_paths
    
    def decompose_paths_to_segments(self, skeleton: np.ndarray = None) -> Dict[str, List[List[Tuple[int, int]]]]:
        """
        将完整路径分解为任意两点间的路径段
        
        Args:
            skeleton: 二值化骨架图（可选，如果未提供则使用类中保存的骨架）
            
        Returns:
            字典，包含不同类型的路径段：
            - 'start_to_nearest': 从起始点到最近节点的路径列表
            - 'node_to_node': 节点到节点的路径段列表
            - 'node_to_endpoint': 节点到端点的路径段列表
        """
        if not hasattr(self, 'all_paths') or not self.all_paths:
            logger.warning("还未构建路径，请先调用 build_all_paths_to_targets")
            return {}
        
        if skeleton is None and hasattr(self, 'skeleton'):
            skeleton = self.skeleton
        elif skeleton is None:
            logger.error("需要提供骨架图进行路径分解")
            return {}
        
        logger.info("开始分解路径为节点间的路径段")
        
        # 收集所有节点坐标（端点和分叉点）
        all_nodes = set()
        if hasattr(self, 'extreme') and self.extreme:
            all_nodes.update([node.point for node in self.extreme])
        if hasattr(self, 'bifurcations') and self.bifurcations:
            all_nodes.update([node.point for node in self.bifurcations])
        
        decomposed_segments = {
            'all_segments': []
        }
        
        # 分解每条完整路径
        for target_point, complete_paths in self.all_paths.items():
            for complete_path in complete_paths:
                if len(complete_path) < 2:
                    continue
                
                # 在路径中找到中间节点
                intermediate_nodes = self._find_intermediate_nodes_in_path(complete_path, all_nodes)
                
                # 将路径分解为段
                path_segments = self._split_path_by_nodes(complete_path, intermediate_nodes)
                decomposed_segments['all_segments'].extend(path_segments)
        
        # 去重
        decomposed_segments['all_segments'] = self._remove_duplicate_segments(decomposed_segments['all_segments'])
        # 输出统计信息
        total_segments = sum(len(segments) for segments in decomposed_segments.values())
        logger.info(f"路径分解完成，共生成 {total_segments} 个路径段：")
        
        self.decomposed_segments = decomposed_segments
        return decomposed_segments
    
    def _is_in_8_connected_neighborhood(self, point1: Tuple[int, int], point2: Tuple[int, int]) -> bool:
        """
        检查两个点是否在八连通邻域内
        
        Args:
            point1: 第一个点坐标 (x, y)
            point2: 第二个点坐标 (x, y)
            
        Returns:
            如果两点在八连通邻域内则返回True，否则返回False
        """
        x1, y1 = point1
        x2, y2 = point2
        
        # 计算坐标差值
        dx = abs(x1 - x2)
        dy = abs(y1 - y2)
        
        # 八连通邻域：两点的x和y坐标差值都不超过1
        return dx <= 1 and dy <= 1 and (dx != 0 or dy != 0)  # 排除自身
    
    def _find_closest_path_point_to_node(self, path: List[Tuple[int, int]], 
                                       node: Tuple[int, int]) -> Tuple[int, int]:
        """
        找到路径上距离给定节点最近的点
        
        Args:
            path: 路径点列表
            node: 目标节点坐标
            
        Returns:
            距离节点最近的路径点索引和坐标
        """
        min_distance = float('inf')
        closest_idx = -1
        
        for i, point in enumerate(path):
            # 计算欧几里得距离
            distance = math.sqrt((point[0] - node[0])**2 + (point[1] - node[1])**2)
            if distance < min_distance:
                min_distance = distance
                closest_idx = i
                
        return closest_idx, path[closest_idx] if closest_idx >= 0 else None

    def _find_intermediate_nodes_in_path(self, path: List[Tuple[int, int]], 
                                       all_nodes: Set[Tuple[int, int]]) -> List[Tuple[int, int]]:
        """
        在路径中查找中间节点（分叉点和端点），处理八连通中路径绕过分叉点的情况
        
        Args:
            path: 路径点列表
            all_nodes: 所有节点坐标集合
            
        Returns:
            路径中的中间节点列表，按在路径中的顺序排列
        """
        intermediate_nodes = []
        path_nodes_with_indices = []  # 存储(index, node_coord)元组
        
        # 首先检查路径中直接包含的节点
        for i, point in enumerate(path):
            if point in all_nodes and i > 0:  # 跳过起始点
                path_nodes_with_indices.append((i, point))
        
        # 检查路径是否经过分叉点的八连通邻域但没有直接经过分叉点
        processed_nodes = set([node for _, node in path_nodes_with_indices])
        
        for node in all_nodes:
            if node in processed_nodes:
                continue  # 已经直接经过了，跳过
                
            # 检查路径是否经过该节点的八连通邻域
            path_passes_near_node = False
            closest_path_idx = -1
            min_distance = float('inf')
            
            for i, path_point in enumerate(path):
                if i == 0:  # 跳过起始点
                    continue
                    
                if self._is_in_8_connected_neighborhood(path_point, node):
                    # 计算距离，选择最近的路径点
                    distance = math.sqrt((path_point[0] - node[0])**2 + (path_point[1] - node[1])**2)
                    if distance < min_distance:
                        min_distance = distance
                        closest_path_idx = i
                        path_passes_near_node = True
            
            # 如果路径经过该节点的邻域，将节点添加到中间节点列表
            if path_passes_near_node and closest_path_idx > 0:
                path_nodes_with_indices.append((closest_path_idx, node))
                logger.debug(f"检测到路径在索引{closest_path_idx}处经过分叉点{node}的八连通邻域，距离:{min_distance:.2f}")
        
        # 按路径索引排序，确保节点按在路径中的出现顺序排列
        path_nodes_with_indices.sort(key=lambda x: x[0])
        
        # 提取节点坐标
        intermediate_nodes = [node for _, node in path_nodes_with_indices]
        
        logger.debug(f"路径中找到{len(intermediate_nodes)}个中间节点: {intermediate_nodes}")
        return intermediate_nodes
    
    def _split_path_by_nodes(self, path: List[Tuple[int, int]], 
                           nodes: List[Tuple[int, int]]) -> List[List[Tuple[int, int]]]:
        """
        根据中间节点将路径分割为多个段，处理不直接在路径中但在邻近位置的分叉点
        
        Args:
            path: 完整路径
            nodes: 分割点列表（可能包含不直接在路径中的分叉点）
            
        Returns:
            分割后的路径段列表
        """
        if not nodes:
            return [path]
        
        segments = []
        current_start = 0
        
        # 找到每个节点对应的路径分割索引
        node_split_info = []  # 存储(split_index, node, is_direct)
        
        for node in nodes:
            if node in path:
                # 节点直接在路径中
                try:
                    node_index = path.index(node)
                    if node_index > current_start:
                        node_split_info.append((node_index, node, True))
                        logger.debug(f"分叉点{node}直接在路径索引{node_index}处")
                except ValueError:
                    continue
            else:
                # 节点不在路径中，寻找最近的路径点作为分割点
                closest_idx = -1
                min_distance = float('inf')
                
                for i, path_point in enumerate(path):
                    if i <= current_start:  # 跳过已处理的部分
                        continue
                        
                    if self._is_in_8_connected_neighborhood(path_point, node):
                        distance = math.sqrt((path_point[0] - node[0])**2 + (path_point[1] - node[1])**2)
                        if distance < min_distance:
                            min_distance = distance
                            closest_idx = i
                
                if closest_idx > current_start:
                    node_split_info.append((closest_idx, node, False))
                    logger.debug(f"分叉点{node}在路径索引{closest_idx}处的邻域内，距离:{min_distance:.2f}")
        
        # 按分割索引排序
        node_split_info.sort(key=lambda x: x[0])
        
        # 根据分割信息创建路径段
        for split_idx, node, is_direct in node_split_info:
            if split_idx > current_start:
                if is_direct:
                    # 节点直接在路径中，包含该节点
                    segment = path[current_start:split_idx + 1]
                else:
                    # 节点不在路径中，创建到最近路径点的段，并将分叉点连接到段的末尾
                    path_segment = path[current_start:split_idx + 1]
                    # 创建连接到分叉点的完整段
                    segment = path_segment + [node] if path_segment[-1] != node else path_segment
                
                if len(segment) >= 2:
                    segments.append(segment)
                    logger.debug(f"创建路径段: 长度{len(segment)}, 起点{segment[0]}, 终点{segment[-1]}")
                
                # 更新下一段的起始点
                if is_direct:
                    current_start = split_idx
                else:
                    # 对于不直接在路径中的分叉点，下一段从分叉点开始
                    current_start = split_idx
        
        # 添加最后一段（如果存在）
        if current_start < len(path) - 1:
            final_segment = path[current_start:]
            if len(final_segment) >= 2:
                segments.append(final_segment)
                logger.debug(f"添加最后路径段: 长度{len(final_segment)}")
        
        logger.debug(f"路径分割完成，共创建{len(segments)}个段")
        return segments
    
    def _is_nearest_node_to_start(self, node_coord: Tuple[int, int], 
                                 all_nodes: Set[Tuple[int, int]]) -> bool:
        """
        判断给定节点是否是距离起始点最近的节点
        
        Args:
            node_coord: 节点坐标
            all_nodes: 所有节点坐标集合
            
        Returns:
            是否为最近节点
        """
        if not hasattr(self, 'start_point') or self.start_point is None:
            return False
        
        # 计算到起始点的距离
        node_distance = np.linalg.norm(np.array(node_coord) - np.array(self.start_point))
        
        # 检查是否是最短距离
        min_distance = float('inf')
        for other_node in all_nodes:
            if other_node != self.start_point:  # 排除起始点本身
                distance = np.linalg.norm(np.array(other_node) - np.array(self.start_point))
                min_distance = min(min_distance, distance)
        
        # 允许小的误差（像素级别）
        return abs(node_distance - min_distance) < 2.0
    
    def _remove_duplicate_segments(self, segments: List[List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
        """
        去除重复的路径段
        
        Args:
            segments: 路径段列表
            
        Returns:
            去重后的路径段列表
        """
        unique_segments = []
        seen_segments = set()
        
        for segment in segments:
            if len(segment) < 2:
                continue
                
            # 创建段的标识符（起始点和结束点，以及路径长度）
            segment_id = (segment[0], segment[-1], len(segment))
            
            # 检查是否已存在相似的段
            is_duplicate = False
            for existing_id in seen_segments:
                if (segment_id[0] == existing_id[0] and 
                    segment_id[1] == existing_id[1] and
                    abs(segment_id[2] - existing_id[2]) <= 2):  # 允许长度差异
                    is_duplicate = True
                    break
            
            if not is_duplicate:
                unique_segments.append(segment)
                seen_segments.add(segment_id)
        
        return unique_segments
    
    def get_nearest_node_paths(self) -> List[List[Tuple[int, int]]]:
        """
        获取从起始点到最近节点的路径
        
        Returns:
            从起始点到最近节点的路径列表
        """
        if not hasattr(self, 'decomposed_segments'):
            logger.warning("还未分解路径，请先调用 decompose_paths_to_segments")
            return []
        
        return self.decomposed_segments.get('start_to_nearest', [])
    
    def get_node_to_node_paths(self) -> List[List[Tuple[int, int]]]:
        """
        获取节点到节点的路径段
        
        Returns:
            节点到节点的路径段列表
        """
        if not hasattr(self, 'decomposed_segments'):
            logger.warning("还未分解路径，请先调用 decompose_paths_to_segments")
            return []
        
        return self.decomposed_segments.get('node_to_node', [])
    
    def get_node_to_endpoint_paths(self) -> List[List[Tuple[int, int]]]:
        """
        获取节点到端点的路径段
        
        Returns:
            节点到端点的路径段列表
        """
        if not hasattr(self, 'decomposed_segments'):
            logger.warning("还未分解路径，请先调用 decompose_paths_to_segments")
            return []
        
        return self.decomposed_segments.get('node_to_endpoint', [])
    
    def build_adjacency_graph(self) -> Dict[Tuple[int, int], List[Tuple[int, int]]]:
        """
        基于分解的路径段构建邻接图，表示节点间的连接关系
        
        Returns:
            邻接图字典，key为节点坐标，value为相邻节点坐标列表
        """
        if not hasattr(self, 'decomposed_segments'):
            logger.warning("还未分解路径，请先调用 decompose_paths_to_segments")
            return {}
        
        adjacency_graph = {}
        
        # 从所有路径段中构建邻接关系
        all_segments = []
        all_segments.extend(self.decomposed_segments.get('start_to_nearest', []))
        all_segments.extend(self.decomposed_segments.get('node_to_node', []))
        all_segments.extend(self.decomposed_segments.get('node_to_endpoint', []))
        
        for segment in all_segments:
            if len(segment) < 2:
                continue
            
            start_node = segment[0]
            end_node = segment[-1]
            
            # 添加双向连接
            if start_node not in adjacency_graph:
                adjacency_graph[start_node] = []
            if end_node not in adjacency_graph[start_node]:
                adjacency_graph[start_node].append(end_node)
            
            if end_node not in adjacency_graph:
                adjacency_graph[end_node] = []
            if start_node not in adjacency_graph[end_node]:
                adjacency_graph[end_node].append(start_node)
        
        logger.info(f"构建邻接图完成，包含 {len(adjacency_graph)} 个节点")
        return adjacency_graph
    
    def print_path_decomposition_summary(self):
        """
        打印路径分解的摘要信息
        """
        if not hasattr(self, 'decomposed_segments'):
            print("还未分解路径，请先调用 decompose_paths_to_segments")
            return
        
        print("\n=== 路径分解摘要 ===")
        
        segments = self.decomposed_segments
        
        # 统计信息
        start_to_nearest = segments.get('start_to_nearest', [])
        node_to_node = segments.get('node_to_node', [])
        node_to_endpoint = segments.get('node_to_endpoint', [])
        
        total_segments = len(start_to_nearest) + len(node_to_node) + len(node_to_endpoint)
        
        print(f"总路径段数: {total_segments}")
        print(f"起始点到最近节点路径段: {len(start_to_nearest)} 条")
        print(f"节点到节点路径段: {len(node_to_node)} 条")
        print(f"节点到端点路径段: {len(node_to_endpoint)} 条")
        
        # 详细信息
        if start_to_nearest:
            print(f"\n起始点到最近节点的路径:")
            for i, segment in enumerate(start_to_nearest):
                print(f"  路径 {i+1}: {segment[0]} -> {segment[-1]} (长度: {len(segment)} 点)")
        
        if node_to_node:
            print(f"\n节点间的路径段 (前5条):")
            for i, segment in enumerate(node_to_node[:5]):
                print(f"  路径 {i+1}: {segment[0]} -> {segment[-1]} (长度: {len(segment)} 点)")
            if len(node_to_node) > 5:
                print(f"  ... 还有 {len(node_to_node) - 5} 条路径段")
        
        if node_to_endpoint:
            print(f"\n节点到端点的路径段 (前5条):")
            for i, segment in enumerate(node_to_endpoint[:5]):
                print(f"  路径 {i+1}: {segment[0]} -> {segment[-1]} (长度: {len(segment)} 点)")
            if len(node_to_endpoint) > 5:
                print(f"  ... 还有 {len(node_to_endpoint) - 5} 条路径段")
        
        # 构建邻接图
        adjacency = self.build_adjacency_graph()
        if adjacency:
            print(f"\n=== 节点连接关系 ===")
            print(f"图中节点总数: {len(adjacency)}")
            
            # 显示每个节点的连接度
            connection_counts = [(node, len(connections)) for node, connections in adjacency.items()]
            connection_counts.sort(key=lambda x: x[1], reverse=True)
            
            print("节点连接度排序 (前10个):")
            for node, count in connection_counts[:10]:
                print(f"  节点 {node}: {count} 个连接")
    
    def example_usage_path_decomposition(self, skeleton: np.ndarray, 
                                       start_point: CtlPoint, 
                                       target_points: List[CtlPoint]) -> None:
        """
        路径分解功能的使用示例
        
        Args:
            skeleton: 二值化骨架图
            start_point: 起始点
            target_points: 目标点列表
        """
        logger.info("\n=== 路径分解功能使用示例 ===")
        
        # 步骤1: 首先构建完整路径
        logger.info("1. 构建从起始点到所有目标点的完整路径...")
        all_paths = self.build_all_paths_to_targets(skeleton, start_point, target_points)
        logger.info(f"   构建完成，共找到 {len(all_paths)} 个目标点的路径")
        
        # 步骤2: 分解路径为段
        logger.info("\n2. 分解路径为节点间的路径段...")
        decomposed_segments = self.decompose_paths_to_segments(skeleton)
        
        # 步骤3: 去除重复的路径段
        logger.info("\n3. 去除重复的路径段...")
        decomposed_segments = self.remove_duplicate_segments_last(decomposed_segments)
        
        ctl_segments = []
        ctl_segments.extend(decomposed_segments['all_segments'])
        
        return ctl_segments

    def remove_duplicate_segments_last(self, decomposed_segments: Dict[str, List[List[Tuple[int, int]]]]) -> Dict[str, List[List[Tuple[int, int]]]]:
        """
        去除重复的路径段
        
        使用相似度阈值（0.8）来判断路径段是否重复，同时考虑正向和反向路径。
        算法会保留第一个遇到的路径段，去除后续相似度过高的路径段。
        
        Args:
            decomposed_segments: 包含 'all_segments' 键的字典，值为路径段列表
            
        Returns:
            去重后的路径段字典
        """
        if 'all_segments' not in decomposed_segments or not decomposed_segments['all_segments']:
            logger.warning("输入的路径段为空，跳过去重处理")
            return decomposed_segments
            
        # 存储已处理的路径段（只存储正向，避免冗余）
        processed_segments = []  # 存储正向 set 格式的路径段用于快速比较
        unique_segments = []     # 存储原始格式的唯一路径段
        
        similarity_threshold = 0.7  # 相似度阈值
        original_count = len(decomposed_segments['all_segments'])
        
        logger.info(f"开始去除重复路径段，原始路径段数量: {original_count}")
        
        for i, segment in enumerate(decomposed_segments['all_segments']):
            # 跳过空路径段
            if not segment:
                logger.warning(f"跳过空路径段: 索引 {i}")
                continue
                
            current_segment_set = set(segment)
            current_reversed_set = set(segment[::-1])
            is_duplicate = False
            
            # 检查当前路径段是否与已有路径段重复
            for existing_segment_set in processed_segments:
                # 计算正向相似度：当前路径段 vs 已存储路径段
                forward_similarity = len(current_segment_set & existing_segment_set) / max(len(current_segment_set), len(existing_segment_set))
                
                # 计算反向相似度：当前路径段的反向 vs 已存储路径段
                reverse_similarity = len(current_reversed_set & existing_segment_set) / max(len(current_reversed_set), len(existing_segment_set))
                
                # 如果任一方向的相似度超过阈值，则认为是重复
                if forward_similarity > similarity_threshold or reverse_similarity > similarity_threshold:
                    is_duplicate = True
                    logger.debug(f"发现重复路径段: 索引 {i}, 正向相似度: {forward_similarity:.3f}, 反向相似度: {reverse_similarity:.3f}")
                    break
            
            # 如果不重复，则添加到结果中（只存储正向set，避免冗余）
            if not is_duplicate:
                processed_segments.append(current_segment_set)  # 只存储正向set
                dis_start = np.linalg.norm(np.array(segment[0]) - np.array(self.start_point))
                dis_end = np.linalg.norm(np.array(segment[-1]) - np.array(self.start_point))
                if dis_start <= dis_end:
                    unique_segments.append(segment)
                else:
                    unique_segments.append(segment[::-1])
                logger.debug(f"保留路径段: 索引 {i}, 长度: {len(segment)}")
        
        # 更新结果
        decomposed_segments['all_segments'] = unique_segments
        removed_count = original_count - len(unique_segments)
        
        logger.info(f"去除重复路径段完成！原始数量: {original_count}, 去重后数量: {len(unique_segments)}, 移除数量: {removed_count}")
        
        # 如果移除了大量路径段，给出警告
        if removed_count > original_count * 0.5:
            logger.warning(f"移除了超过50%的路径段 ({removed_count}/{original_count})，请检查相似度阈值设置是否合适")
        
        return decomposed_segments
    
    def find_path_between_nodes(self, start_node: Tuple[int, int], 
                               end_node: Tuple[int, int]) -> List[Tuple[int, int]]:
        """
        查找两个特定节点之间的路径
        
        Args:
            start_node: 起始节点坐标
            end_node: 目标节点坐标
            
        Returns:
            连接两个节点的路径，如果找不到则返回空列表
        """
        if not hasattr(self, 'decomposed_segments'):
            logger.warning("还未分解路径，请先调用 decompose_paths_to_segments")
            return []
        
        # 在所有路径段中查找匹配的段
        all_segments = []
        all_segments.extend(self.decomposed_segments.get('start_to_nearest', []))
        all_segments.extend(self.decomposed_segments.get('node_to_node', []))
        all_segments.extend(self.decomposed_segments.get('node_to_endpoint', []))
        
        # 直接匹配
        for segment in all_segments:
            if len(segment) >= 2:
                if (segment[0] == start_node and segment[-1] == end_node) or \
                   (segment[0] == end_node and segment[-1] == start_node):
                    # 如果方向相反，反转路径
                    if segment[0] == end_node:
                        return segment[::-1]
                    else:
                        return segment
        
        # 如果直接匹配不到，尝试通过邻接图查找多段路径
        adjacency = self.build_adjacency_graph()
        if start_node in adjacency and end_node in adjacency[start_node]:
            # 两个节点直接相邻，但可能路径被分割了，尝试重建
            logger.warning(f"节点 {start_node} 和 {end_node} 在邻接图中相邻，但未找到直接路径段")
        
        return []
    
    def _find_all_skeleton_paths(self, skeleton: np.ndarray, 
                                start: Tuple[int, int], 
                                target: Tuple[int, int],
                                max_paths: int = 10) -> List[List[Tuple[int, int]]]:
        """
        在骨架图上使用优化的DFS算法寻找从起点到终点的所有可能路径
        
        Args:
            skeleton: 二值化骨架图
            start: 起始点坐标 (x, y)
            target: 目标点坐标 (x, y)
            max_paths: 最大路径数量限制，防止路径过多
            
        Returns:
            所有可能的路径列表，每个路径是坐标点的列表
        """
        # 检查起点和终点是否在骨架图上
        start_y, start_x = start[1], start[0]
        target_y, target_x = target[1], target[0]
        
        # 如果起点或终点不在骨架图上，尝试找到最近的骨架点
        if skeleton[start_y, start_x] == 0:
            start = self._find_nearest_skeleton_point(skeleton, start)
            if start is None:
                logger.warning(f"起点 {(start_x, start_y)} 附近没有找到骨架点")
                return []
            start_y, start_x = start[1], start[0]
        
        if skeleton[target_y, target_x] == 0:
            target = self._find_nearest_skeleton_point(skeleton, target)
            if target is None:
                logger.warning(f"终点 {(target_x, target_y)} 附近没有找到骨架点")
                return []
            target_y, target_x = target[1], target[0]
        
        # 如果起点和终点相同，返回单点路径
        if start == target:
            return [[start]]

        # 定义8连通邻域 - 按照优先级排序（先直线后对角）
        neighbors = [(0, -1), (0, 1), (-1, 0), (1, 0), (-1, -1), (-1, 1), (1, -1), (1, 1)]
        
        all_paths = []
        shortest_found = float('inf')  # 记录找到的最短路径长度

        # 开始DFS搜索
        logger.debug(f"开始DFS搜索：从 {start} 到 {target}")
        initial_visited = {start}
        # 实现基于栈的DFS搜索，避免递归
        # 栈中的每个元素是一个元组：(当前位置, 当前路径, 访问过的点集合)
        stack = [(start, [start], initial_visited.copy())]

        while stack and len(all_paths) < max_paths:
            current_pos, current_path, visited = stack.pop()
            
            
            # 如果到达目标点，保存路径
            if current_pos == target:
                all_paths.append(current_path.copy())
                # 更新最短路径长度
                if len(current_path) < shortest_found:
                    shortest_found = len(current_path)
                continue
            
            # 遍历所有邻居点
            neighbors_to_explore = []
            for dx, dy in neighbors:
                next_x = current_pos[0] + dx
                next_y = current_pos[1] + dy
                next_pos = (next_x, next_y)
                
                # 检查边界条件
                if (next_y < 0 or next_y >= skeleton.shape[0] or 
                    next_x < 0 or next_x >= skeleton.shape[1]):
                    continue
                
                # 检查是否在骨架上
                if skeleton[next_y, next_x] == 0:
                    continue
                
                # 检查是否已访问过
                if next_pos in visited:
                    continue
                
                neighbors_to_explore.append((next_pos, 1))
                
            # 按照启发式距离排序，优先探索更接近目标的点
            # 但是为了找到所有路径，我们仍然会探索所有有效邻居
            neighbors_to_explore.sort(key=lambda x: x[1], reverse=True)

            # 将有效邻居加入栈中
            for next_pos, _ in neighbors_to_explore:
                new_path = current_path + [next_pos]
                new_visited = visited.copy()
                new_visited.add(next_pos)
                
                # 添加到栈中
                stack.append((next_pos, new_path, new_visited))
        
        # 如果达到最大路径数限制，记录日志
        if len(all_paths) >= max_paths:
            logger.warning(f"从 {start} 到 {target} 的路径搜索达到最大限制 {max_paths}")
        
        # 过滤重复路径和过短路径
        unique_paths = []
        for path in all_paths:
            if len(path) >= 2:  # 至少包含起点和终点
                # 检查是否是重复路径
                is_duplicate = False
                for existing_path in unique_paths:
                    if (len(path) == len(existing_path) and 
                        path[0] == existing_path[0] and 
                        path[-1] == existing_path[-1]):
                        # 更详细的重复检查：检查路径中点的重叠度
                        overlap = len(set(path) & set(existing_path))
                        overlap_ratio = overlap / len(path)
                        if overlap_ratio > 0.98:  # 98%重叠认为是重复路径
                            is_duplicate = True
                            break
                
                if not is_duplicate:
                    unique_paths.append(path)
        
        # 应用血管流动规则过滤
        if unique_paths:
            scored_paths = self._filter_paths_by_vessel_rules(unique_paths, start)
            # 只返回路径，不返回评分
            # filtered_paths = [path for path, score in scored_paths]
            # filtered_paths = scored_paths[0][0:1] if len(scored_paths)>0 else []
            if len(scored_paths)>0:
                filtered_paths = [tmp[0] for tmp in scored_paths]
            else:
                filtered_paths = []
            
            logger.debug(f"从 {start} 到 {target} 的路径过滤结果: "
                       f"原始={len(all_paths)}, 去重={len(unique_paths)}, 血管规则过滤={len(filtered_paths)}")
            
            return filtered_paths
        else:
            logger.debug(f"从 {start} 到 {target} 没有找到有效路径")
            return []
   
    def _find_nearest_skeleton_point(self, skeleton: np.ndarray, target_point: Tuple[int, int], 
                                   max_search_radius: int = 10) -> Optional[Tuple[int, int]]:
        """
        使用BFS算法找到距离目标点最近的骨架点
        
        Args:
            skeleton: 二值化骨架图，1表示骨架点，0表示背景
            target_point: 目标点坐标 (x, y)
            max_search_radius: 最大搜索半径，超过此范围则停止搜索
            
        Returns:
            最近的骨架点坐标 (x, y)，如果未找到则返回None
        """
        target_x, target_y = target_point
        
        # 如果目标点本身就在骨架上，直接返回
        if (0 <= target_y < skeleton.shape[0] and 
            0 <= target_x < skeleton.shape[1] and 
            skeleton[target_y, target_x] == 1):
            return target_point
        
        # 使用BFS进行搜索
        # 队列中存储的是 (x, y, distance) 元组
        queue = deque([(target_x, target_y, 0)])
        visited = set()
        visited.add((target_x, target_y))
        
        # 定义8连通邻域
        directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
        
        while queue:
            current_x, current_y, distance = queue.popleft()
            
            # 如果搜索距离超过最大半径，停止搜索
            if distance > max_search_radius:
                break
            
            # 检查当前点是否为骨架点
            if (0 <= current_y < skeleton.shape[0] and 
                0 <= current_x < skeleton.shape[1] and 
                skeleton[current_y, current_x] == 1):
                logger.debug(f"找到最近骨架点: {(current_x, current_y)}, 距离: {distance}")
                return (current_x, current_y)
            
            # 探索邻居点
            for dx, dy in directions:
                next_x = current_x + dx
                next_y = current_y + dy
                next_point = (next_x, next_y)
                
                # 检查边界条件和是否已访问
                if (next_point not in visited and
                    0 <= next_y < skeleton.shape[0] and 
                    0 <= next_x < skeleton.shape[1]):
                    
                    visited.add(next_point)
                    queue.append((next_x, next_y, distance + 1))
        
        logger.warning(f"在半径 {max_search_radius} 内未找到骨架点，目标点: {target_point}")
        return None

    def _sample_path_points(self, path: List[Tuple[int, int]], 
                           sampling_method: str = 'uniform',
                           sampling_interval: int = 3,
                           min_segment_length: int = 2) -> List[Tuple[int, int]]:
        """
        对路径进行采样以提高角度计算的稳定性
        
        Args:
            path: 原始路径点列表
            sampling_method: 采样方法
                - 'uniform': 等间隔采样
                - 'adaptive': 自适应采样（在曲率变化大的地方密集采样）
                - 'keypoints': 关键点采样（端点、转折点等）
            sampling_interval: 采样间隔（像素距离）
            min_segment_length: 最小段长度
            
        Returns:
            采样后的路径点列表
        """
        if len(path) <= 2:
            return path
        
        if sampling_method == 'uniform':
            return self._uniform_sampling(path, sampling_interval)
        elif sampling_method == 'adaptive':
            return self._adaptive_sampling(path, sampling_interval, min_segment_length)
        elif sampling_method == 'keypoints':
            return self._keypoint_sampling(path, sampling_interval)
        else:
            logger.warning(f"未知的采样方法: {sampling_method}，使用等间隔采样")
            return self._uniform_sampling(path, sampling_interval)
    
    def _uniform_sampling(self, path: List[Tuple[int, int]], interval: int) -> List[Tuple[int, int]]:
        """等间隔采样"""
        if len(path) <= 2:
            return path
        
        sampled_path = [path[0]]  # 始终包含起始点
        current_distance = 0
        last_sampled_idx = 0
        
        for i in range(1, len(path)):
            # 计算从上一个采样点到当前点的累积距离
            segment_distance = np.linalg.norm(np.array(path[i]) - np.array(path[i-1]))
            current_distance += segment_distance
            
            # 如果距离达到采样间隔，添加当前点
            if current_distance >= interval:
                sampled_path.append(path[i])
                current_distance = 0
                last_sampled_idx = i
        
        # 确保包含终点（如果终点不是最后采样的点）
        if last_sampled_idx < len(path) - 1:
            sampled_path.append(path[-1])
        
        return sampled_path
    
    def _adaptive_sampling(self, path: List[Tuple[int, int]], 
                          base_interval: int, min_segment_length: int) -> List[Tuple[int, int]]:
        """自适应采样：在曲率变化大的地方进行更密集的采样"""
        if len(path) <= 3:
            return path
        
        # 首先计算每个点的曲率
        curvatures = []
        for i in range(1, len(path) - 1):
            # 计算前后两个向量的角度差
            v1 = np.array(path[i]) - np.array(path[i-1])
            v2 = np.array(path[i+1]) - np.array(path[i])
            
            dot_product = np.dot(v1, v2)
            norms = np.linalg.norm(v1) * np.linalg.norm(v2)
            
            if norms > 0:
                cos_angle = np.clip(dot_product / norms, -1.0, 1.0)
                angle = np.arccos(cos_angle)
                curvatures.append(angle)
            else:
                curvatures.append(0.0)
        
        # 根据曲率调整采样间隔
        sampled_path = [path[0]]
        current_distance = 0
        
        for i in range(1, len(path)):
            segment_distance = np.linalg.norm(np.array(path[i]) - np.array(path[i-1]))
            current_distance += segment_distance
            
            # 计算自适应间隔
            if i-1 < len(curvatures):
                # 曲率越大，采样间隔越小
                curvature_factor = max(0.3, 1.0 - curvatures[i-1] / np.pi)
                adaptive_interval = base_interval * curvature_factor
            else:
                adaptive_interval = base_interval
            
            adaptive_interval = max(min_segment_length, adaptive_interval)
            
            if current_distance >= adaptive_interval:
                sampled_path.append(path[i])
                current_distance = 0
        
        # 确保包含终点
        if sampled_path[-1] != path[-1]:
            sampled_path.append(path[-1])
        
        return sampled_path
    
    def _keypoint_sampling(self, path: List[Tuple[int, int]], base_interval: int) -> List[Tuple[int, int]]:
        """关键点采样：保留端点、转折点和等间隔点"""
        if len(path) <= 3:
            return path
        
        # 首先进行等间隔采样
        uniform_sampled = self._uniform_sampling(path, base_interval)
        
        # 识别转折点（角度变化超过阈值的点）
        keypoints = set([path[0], path[-1]])  # 端点
        
        angle_threshold = np.pi / 6  # 30度阈值
        
        for i in range(1, len(path) - 1):
            # 计算角度变化
            v1 = np.array(path[i]) - np.array(path[i-1])
            v2 = np.array(path[i+1]) - np.array(path[i])
            
            dot_product = np.dot(v1, v2)
            norms = np.linalg.norm(v1) * np.linalg.norm(v2)
            
            if norms > 0:
                cos_angle = np.clip(dot_product / norms, -1.0, 1.0)
                angle = np.arccos(cos_angle)
                
                # 如果角度变化超过阈值，标记为关键点
                if angle > angle_threshold:
                    keypoints.add(path[i])
        
        # 合并等间隔采样点和关键点
        all_points = set(uniform_sampled) | keypoints
        
        # 按照在原路径中的顺序排序
        path_indices = {point: path.index(point) for point in all_points if point in path}
        sampled_path = sorted(all_points, key=lambda p: path_indices.get(p, float('inf')))
        
        # 过滤掉不在原路径中的点
        sampled_path = [p for p in sampled_path if p in path]
        
        return sampled_path

    def _calculate_path_angle_changes(self, path: List[Tuple[int, int]], use_sampling: bool = True) -> List[float]:
        """
        计算路径中每个点的角度变化
        
        Args:
            path: 路径点列表
            use_sampling: 是否使用路径采样提高稳定性
            
        Returns:
            角度变化列表（弧度）
        """
        if len(path) < 3:
            return []
        
        # 根据配置决定是否进行路径采样
        analysis_path = path
        if use_sampling and self.vessel_filter_config['sampling_config']['enable_sampling']:
            sampling_config = self.vessel_filter_config['sampling_config']
            analysis_path = self._sample_path_points(
                path,
                sampling_method=sampling_config['sampling_method'],
                sampling_interval=sampling_config['sampling_interval'],
                min_segment_length=sampling_config['min_segment_length']
            )
            logger.debug(f"路径采样: {len(path)} -> {len(analysis_path)} 点，"
                        f"方法: {sampling_config['sampling_method']}, "
                        f"间隔: {sampling_config['sampling_interval']}")
        
        if len(analysis_path) < 3:
            return []
        
        angle_changes = []
        for i in range(1, len(analysis_path) - 1):
            # 计算前后两个向量
            v1 = np.array(analysis_path[i]) - np.array(analysis_path[i-1])
            v2 = np.array(analysis_path[i+1]) - np.array(analysis_path[i])
            
            # 计算向量夹角
            dot_product = np.dot(v1, v2)
            norms = np.linalg.norm(v1) * np.linalg.norm(v2)
            
            if norms > 0:
                cos_angle = np.clip(dot_product / norms, -1.0, 1.0)
                angle = np.arccos(cos_angle)
                angle_changes.append(angle)
            else:
                angle_changes.append(0.0)
        
        return angle_changes
    
    def _calculate_path_smoothness(self, path: List[Tuple[int, int]]) -> float:
        """
        计算路径平滑度评分（0-1，1为最平滑）
        
        Args:
            path: 路径点列表
            
        Returns:
            平滑度评分
        """
        if len(path) < 3:
            return 1.0
        
        # 使用采样后的路径计算角度变化
        angle_changes = self._calculate_path_angle_changes(path, use_sampling=True)
        if not angle_changes:
            return 1.0
        
        # 计算平均角度变化和最大角度变化
        mean_angle = np.mean(angle_changes)
        max_angle = np.max(angle_changes)
        angle_variance = np.var(angle_changes)
        
        # 血管应该相对平滑，大的角度变化会降低评分
        # 使用指数衰减函数，大角度变化会被重度惩罚
        smoothness_score = np.exp(-mean_angle * 2) * np.exp(-max_angle * 1.5) * np.exp(-angle_variance * 3)
        
        return min(1.0, max(0.0, smoothness_score))
    
    def _calculate_direction_consistency(self, path: List[Tuple[int, int]], 
                                       start_point: Tuple[int, int]) -> float:
        """
        计算路径方向一致性评分（0-1，1为最一致）
        基于血流从中心向外周流动的原理
        
        Args:
            path: 路径点列表
            start_point: 起始点（通常是血管根部）
            
        Returns:
            方向一致性评分
        """
        if len(path) < 2:
            return 1.0
        
        # 计算总体方向向量（从起点到终点）
        overall_direction = np.array(path[-1]) - np.array(path[0])
        if np.linalg.norm(overall_direction) == 0:
            return 0.0
        
        overall_direction = overall_direction / np.linalg.norm(overall_direction)
        
        # 计算每个段的方向一致性
        direction_scores = []
        for i in range(len(path) - 1):
            segment_direction = np.array(path[i+1]) - np.array(path[i])
            if np.linalg.norm(segment_direction) > 0:
                segment_direction = segment_direction / np.linalg.norm(segment_direction)
                # 计算与总体方向的一致性
                consistency = np.dot(segment_direction, overall_direction)
                direction_scores.append(max(0, consistency))  # 只考虑正向一致性
        
        if not direction_scores:
            return 0.0
        
        # 返回平均方向一致性
        return np.mean(direction_scores)
    
    def _calculate_path_curvature(self, path: List[Tuple[int, int]]) -> float:
        """
        计算路径曲率评分（0-1，1为曲率最适中）
        
        Args:
            path: 路径点列表
            
        Returns:
            曲率评分
        """
        if len(path) < 3:
            return 1.0
        
        # 计算路径的实际长度与直线距离的比值
        actual_length = 0
        for i in range(len(path) - 1):
            actual_length += np.linalg.norm(np.array(path[i+1]) - np.array(path[i]))
        
        straight_distance = np.linalg.norm(np.array(path[-1]) - np.array(path[0]))
        
        if straight_distance == 0:
            return 0.0
        
        curvature_ratio = actual_length / straight_distance
        
        # 使用配置的最优曲率比
        optimal_ratio = self.vessel_filter_config['optimal_curvature_ratio']
        deviation = abs(curvature_ratio - optimal_ratio)
        
        # 使用高斯函数，在最优比值附近评分最高
        curvature_score = np.exp(-(deviation ** 2) / (2 * 0.3 ** 2))
        
        return min(1.0, max(0.0, curvature_score))
    
    def _check_bifurcation_angles(self, path: List[Tuple[int, int]], 
                                 bifurcation_points: List[CtlPoint]) -> float:
        """
        检查路径经过分叉点时的角度是否合理
        
        Args:
            path: 路径点列表
            bifurcation_points: 分叉点列表
            
        Returns:
            分叉角度评分（0-1）
        """
        if not bifurcation_points:
            return 1.0
        
        bifurcation_coords = {bp.point for bp in bifurcation_points}
        bifurcation_scores = []
        
        for i, point in enumerate(path):
            if point in bifurcation_coords:
                # 检查分叉点前后的角度
                if i > 0 and i < len(path) - 1:
                    angle_changes = self._calculate_path_angle_changes(path[max(0, i-2):min(len(path), i+3)])
                    if angle_changes:
                        # 分叉点的角度变化应该在合理范围内（30-120度）
                        max_angle = np.max(angle_changes)
                        if np.pi/6 <= max_angle <= 2*np.pi/3:  # 30-120度
                            bifurcation_scores.append(1.0)
                        else:
                            # 角度过小或过大都会降低评分
                            score = np.exp(-abs(max_angle - np.pi/3) * 2)  # 60度为最优
                            bifurcation_scores.append(score)
        
        return np.mean(bifurcation_scores) if bifurcation_scores else 1.0
    
    def _filter_paths_by_vessel_rules(self, paths: List[List[Tuple[int, int]]], 
                                     start_point: Tuple[int, int]) -> List[Tuple[List[Tuple[int, int]], float]]:
        """
        根据血管流动规则过滤路径
        
        Args:
            paths: 原始路径列表
            start_point: 起始点
            
        Returns:
            过滤后的路径列表，每个元素为(路径, 综合评分)的元组
        """
        # 检查是否启用血管过滤
        if not self.vessel_filter_config['enable_vessel_filtering']:
            logger.warning("血管规则过滤已禁用，返回原始路径")
            return [(path, 1.0) for path in paths]
        
        logger.debug(f"开始根据血管流动规则过滤 {len(paths)} 条路径")
        
        # 使用配置的参数
        min_score_threshold = self.vessel_filter_config['min_score_threshold']
        weights = self.vessel_filter_config['weights']
        
        scored_paths = []
        
        for path in paths:
            if len(path) < 2:
                continue
            
            # 计算各项评分
            # 计算平滑度
            smoothness_score = self._calculate_path_smoothness(path)
            # 计算方向一致性
            direction_score = self._calculate_direction_consistency(path, start_point)
            # 计算曲率
            curvature_score = self._calculate_path_curvature(path)
            # 计算分叉角度
            bifurcation_score = self._check_bifurcation_angles(path, self.bifurcations)
            
            # 使用配置的权重计算综合评分
            total_score = (weights['smoothness'] * smoothness_score +
                          weights['direction'] * direction_score +
                          weights['curvature'] * curvature_score +
                          weights['bifurcation'] * bifurcation_score)
            
            # 只保留评分超过阈值的路径
            if total_score >= min_score_threshold:
                scored_paths.append((path, total_score))
                logger.debug(f"路径评分: 平滑度={smoothness_score:.3f}, 方向性={direction_score:.3f}, "
                           f"曲率={curvature_score:.3f}, 分叉={bifurcation_score:.3f}, 总分={total_score:.3f}")
            else:
                logger.debug(f"路径因评分过低被过滤: {total_score:.3f} < {min_score_threshold}")
        
        # 按评分降序排序
        scored_paths.sort(key=lambda x: x[1], reverse=True)
        
        logger.debug(f"血管规则过滤完成: {len(paths)} -> {len(scored_paths)} 条路径")
        return scored_paths

    def configure_vessel_filter(self, 
                               min_score_threshold: float = None,
                               smoothness_weight: float = None,
                               direction_weight: float = None,
                               curvature_weight: float = None,
                               bifurcation_weight: float = None,
                               optimal_curvature_ratio: float = None,
                               enable_filtering: bool = None,
                               # 采样配置参数
                               enable_sampling: bool = None,
                               sampling_interval: int = None,
                               sampling_method: str = None,
                               min_segment_length: int = None):
        """
        配置血管流动规则过滤参数
        
        Args:
            min_score_threshold: 最小评分阈值(0-1)，低于此值的路径将被过滤
            smoothness_weight: 平滑度权重(0-1)
            direction_weight: 方向一致性权重(0-1)
            curvature_weight: 曲率权重(0-1)
            bifurcation_weight: 分叉角度权重(0-1)
            optimal_curvature_ratio: 最优曲率比(>1.0)，血管弯曲程度的理想值
            enable_filtering: 是否启用血管规则过滤
            enable_sampling: 是否启用路径采样以提高角度计算稳定性
            sampling_interval: 采样间隔（像素距离）
            sampling_method: 采样方法 ('uniform', 'adaptive', 'keypoints')
            min_segment_length: 最小段长度
        """
        if min_score_threshold is not None:
            self.vessel_filter_config['min_score_threshold'] = max(0.0, min(1.0, min_score_threshold))
            
        if smoothness_weight is not None:
            self.vessel_filter_config['weights']['smoothness'] = max(0.0, min(1.0, smoothness_weight))
            
        if direction_weight is not None:
            self.vessel_filter_config['weights']['direction'] = max(0.0, min(1.0, direction_weight))
            
        if curvature_weight is not None:
            self.vessel_filter_config['weights']['curvature'] = max(0.0, min(1.0, curvature_weight))
            
        if bifurcation_weight is not None:
            self.vessel_filter_config['weights']['bifurcation'] = max(0.0, min(1.0, bifurcation_weight))
            
        if optimal_curvature_ratio is not None:
            self.vessel_filter_config['optimal_curvature_ratio'] = max(1.0, optimal_curvature_ratio)
            
        if enable_filtering is not None:
            self.vessel_filter_config['enable_vessel_filtering'] = enable_filtering
        
        # 配置采样参数
        if enable_sampling is not None:
            self.vessel_filter_config['sampling_config']['enable_sampling'] = enable_sampling
            
        if sampling_interval is not None:
            self.vessel_filter_config['sampling_config']['sampling_interval'] = max(1, sampling_interval)
            
        if sampling_method is not None:
            valid_methods = ['uniform', 'adaptive', 'keypoints']
            if sampling_method in valid_methods:
                self.vessel_filter_config['sampling_config']['sampling_method'] = sampling_method
            else:
                logger.warning(f"无效的采样方法: {sampling_method}，有效选项: {valid_methods}")
                
        if min_segment_length is not None:
            self.vessel_filter_config['sampling_config']['min_segment_length'] = max(1, min_segment_length)
        
        # 归一化权重，确保总和为1
        total_weight = sum(self.vessel_filter_config['weights'].values())
        if total_weight > 0:
            for key in self.vessel_filter_config['weights']:
                self.vessel_filter_config['weights'][key] /= total_weight
        
        logger.info(f"血管过滤参数已更新: {self.vessel_filter_config}")

    def get_path_scores(self, paths: List[List[Tuple[int, int]]], 
                       start_point: Tuple[int, int]) -> List[Dict]:
        """
        获取路径的详细评分信息
        
        Args:
            paths: 路径列表
            start_point: 起始点
            
        Returns:
            包含每条路径详细评分的字典列表
        """
        path_scores = []
        
        for i, path in enumerate(paths):
            if len(path) < 2:
                continue
            
            # 计算各项评分
            smoothness_score = self._calculate_path_smoothness(path)
            direction_score = self._calculate_direction_consistency(path, start_point)
            curvature_score = self._calculate_path_curvature(path)
            bifurcation_score = self._check_bifurcation_angles(path, self.bifurcations)
            
            # 使用配置的权重计算综合评分
            weights = self.vessel_filter_config['weights']
            total_score = (weights['smoothness'] * smoothness_score +
                          weights['direction'] * direction_score +
                          weights['curvature'] * curvature_score +
                          weights['bifurcation'] * bifurcation_score)
            
            # 计算路径统计信息
            path_length = len(path)
            actual_distance = sum(np.linalg.norm(np.array(path[j+1]) - np.array(path[j])) 
                                for j in range(len(path) - 1))
            straight_distance = np.linalg.norm(np.array(path[-1]) - np.array(path[0]))
            curvature_ratio = actual_distance / straight_distance if straight_distance > 0 else float('inf')
            
            path_info = {
                'path_index': i,
                'path_length': path_length,
                'actual_distance': actual_distance,
                'straight_distance': straight_distance,
                'curvature_ratio': curvature_ratio,
                'scores': {
                    'smoothness': smoothness_score,
                    'direction': direction_score,
                    'curvature': curvature_score,
                    'bifurcation': bifurcation_score,
                    'total': total_score
                },
                'weights_used': weights.copy(),
                'passes_threshold': total_score >= self.vessel_filter_config['min_score_threshold']
            }
            
            path_scores.append(path_info)
        
        # 按总分降序排序
        path_scores.sort(key=lambda x: x['scores']['total'], reverse=True)
        
        return path_scores
    
    def print_path_analysis(self, paths: List[List[Tuple[int, int]]], 
                           start_point: Tuple[int, int]):
        """
        打印路径分析结果
        
        Args:
            paths: 路径列表
            start_point: 起始点
        """
        path_scores = self.get_path_scores(paths, start_point)
        
        print(f"\n=== 路径分析报告 ===")
        print(f"总路径数: {len(paths)}")
        print(f"评分阈值: {self.vessel_filter_config['min_score_threshold']:.3f}")
        print(f"权重配置: {self.vessel_filter_config['weights']}")
        print("-" * 80)
        
        for i, info in enumerate(path_scores):
            print(f"路径 #{info['path_index']} (排名: {i+1})")
            print(f"  长度: {info['path_length']} 像素")
            print(f"  实际距离: {info['actual_distance']:.2f}")
            print(f"  直线距离: {info['straight_distance']:.2f}")
            print(f"  曲率比: {info['curvature_ratio']:.3f}")
            print(f"  评分详情:")
            for score_name, score_value in info['scores'].items():
                print(f"    {score_name}: {score_value:.3f}")
            print(f"  是否通过: {'✓' if info['passes_threshold'] else '✗'}")
            print("-" * 40)

    def set_vessel_type_preset(self, vessel_type: str):
        """
        设置针对特定血管类型的预设配置
        
        Args:
            vessel_type: 血管类型，可选值：
                - 'major': 主要血管（主动脉、大静脉等）
                - 'branch': 分支血管（中等动静脉）
                - 'capillary': 毛细血管网络
                - 'coronary': 冠状动脉（需要高精度）
                - 'retinal': 视网膜血管（精细结构）
        """
        presets = {
            'major': {
                'min_score_threshold': 0.5,
                'weights': {'smoothness': 0.4, 'direction': 0.35, 'curvature': 0.15, 'bifurcation': 0.1},
                'optimal_curvature_ratio': 1.1,
                'sampling_config': {'sampling_interval': 4, 'sampling_method': 'uniform'},
                'description': '主要血管：高平滑度和方向一致性要求，较大采样间隔'
            },
            'branch': {
                'min_score_threshold': 0.35,
                'weights': {'smoothness': 0.3, 'direction': 0.25, 'curvature': 0.25, 'bifurcation': 0.2},
                'optimal_curvature_ratio': 1.2,
                'sampling_config': {'sampling_interval': 3, 'sampling_method': 'adaptive'},
                'description': '分支血管：平衡的评分要求，自适应采样'
            },
            'capillary': {
                'min_score_threshold': 0.25,
                'weights': {'smoothness': 0.2, 'direction': 0.15, 'curvature': 0.35, 'bifurcation': 0.3},
                'optimal_curvature_ratio': 1.4,
                'sampling_config': {'sampling_interval': 2, 'sampling_method': 'keypoints'},
                'description': '毛细血管：允许更多弯曲和分叉，关键点采样'
            },
            'coronary': {
                'min_score_threshold': 0.45,
                'weights': {'smoothness': 0.35, 'direction': 0.3, 'curvature': 0.25, 'bifurcation': 0.1},
                'optimal_curvature_ratio': 1.15,
                'sampling_config': {'sampling_interval': 2, 'sampling_method': 'adaptive'},
                'description': '冠状动脉：高精度要求，精细自适应采样'
            },
            'retinal': {
                'min_score_threshold': 0.4,
                'weights': {'smoothness': 0.35, 'direction': 0.25, 'curvature': 0.2, 'bifurcation': 0.2},
                'optimal_curvature_ratio': 1.25,
                'sampling_config': {'sampling_interval': 3, 'sampling_method': 'adaptive'},
                'description': '视网膜血管：精细结构分析，自适应采样'
            }
        }
        
        if vessel_type not in presets:
            available_types = list(presets.keys())
            logger.error(f"未知的血管类型: {vessel_type}，可用类型: {available_types}")
            return
        
        preset = presets[vessel_type]
        
        # 应用预设配置
        self.configure_vessel_filter(
            min_score_threshold=preset['min_score_threshold'],
            smoothness_weight=preset['weights']['smoothness'],
            direction_weight=preset['weights']['direction'],
            curvature_weight=preset['weights']['curvature'],
            bifurcation_weight=preset['weights']['bifurcation'],
            optimal_curvature_ratio=preset['optimal_curvature_ratio'],
            sampling_interval=preset['sampling_config']['sampling_interval'],
            sampling_method=preset['sampling_config']['sampling_method']
        )
        
        logger.info(f"已应用 '{vessel_type}' 血管类型预设: {preset['description']}")
        logger.info(f"配置详情: 阈值={preset['min_score_threshold']}, "
                   f"权重={preset['weights']}, 曲率比={preset['optimal_curvature_ratio']}")

    def configure_path_sampling(self, 
                               enable_sampling: bool = True,
                               sampling_interval: int = 3,
                               sampling_method: str = 'uniform',
                               min_segment_length: int = 2):
        """
        专门配置路径采样参数
        
        Args:
            enable_sampling: 是否启用路径采样
            sampling_interval: 采样间隔（像素距离）
            sampling_method: 采样方法
                - 'uniform': 等间隔采样，适合主要血管
                - 'adaptive': 自适应采样，在弯曲处更密集，适合复杂血管
                - 'keypoints': 关键点采样，保留转折点，适合精细分析
            min_segment_length: 最小段长度
        """
        self.vessel_filter_config['sampling_config'].update({
            'enable_sampling': enable_sampling,
            'sampling_interval': max(1, sampling_interval),
            'sampling_method': sampling_method,
            'min_segment_length': max(1, min_segment_length)
        })
        
        logger.info(f"路径采样配置已更新: {self.vessel_filter_config['sampling_config']}")
        
        if enable_sampling:
            logger.info(f"采样方法说明:")
            logger.info(f"  - uniform: 等间隔采样，减少噪声，适合平滑血管")
            logger.info(f"  - adaptive: 自适应采样，在弯曲处保持精度")
            logger.info(f"  - keypoints: 关键点采样，保留重要转折信息")

    def compare_sampling_methods(self, path: List[Tuple[int, int]]) -> Dict[str, Dict]:
        """
        比较不同采样方法对同一路径的影响
        
        Args:
            path: 要分析的路径
            
        Returns:
            各种采样方法的比较结果
        """
        if len(path) < 5:
            return {'error': '路径太短，无法进行有效比较'}
        
        methods = ['uniform', 'adaptive', 'keypoints']
        original_config = self.vessel_filter_config['sampling_config'].copy()
        comparison_results = {}
        
        # 计算原始路径（无采样）的基准
        no_sampling_angles = self._calculate_path_angle_changes(path, use_sampling=False)
        baseline_stats = {
            'point_count': len(path),
            'angle_count': len(no_sampling_angles),
            'mean_angle': np.mean(no_sampling_angles) if no_sampling_angles else 0,
            'std_angle': np.std(no_sampling_angles) if no_sampling_angles else 0,
            'max_angle': np.max(no_sampling_angles) if no_sampling_angles else 0
        }
        comparison_results['baseline_no_sampling'] = baseline_stats
        
        # 测试不同采样方法
        for method in methods:
            # 临时修改配置
            self.vessel_filter_config['sampling_config']['sampling_method'] = method
            self.vessel_filter_config['sampling_config']['enable_sampling'] = True
            
            # 采样路径
            sampled_path = self._sample_path_points(
                path,
                sampling_method=method,
                sampling_interval=original_config['sampling_interval'],
                min_segment_length=original_config['min_segment_length']
            )
            
            # 计算采样后的角度变化
            sampled_angles = self._calculate_path_angle_changes(sampled_path, use_sampling=False)
            
            # 计算统计信息
            method_stats = {
                'sampled_point_count': len(sampled_path),
                'reduction_ratio': len(sampled_path) / len(path),
                'angle_count': len(sampled_angles),
                'mean_angle': np.mean(sampled_angles) if sampled_angles else 0,
                'std_angle': np.std(sampled_angles) if sampled_angles else 0,
                'max_angle': np.max(sampled_angles) if sampled_angles else 0,
                'stability_improvement': {
                    'std_reduction': (baseline_stats['std_angle'] - np.std(sampled_angles)) / baseline_stats['std_angle'] 
                                   if baseline_stats['std_angle'] > 0 and sampled_angles else 0,
                    'max_angle_reduction': (baseline_stats['max_angle'] - np.max(sampled_angles)) / baseline_stats['max_angle']
                                         if baseline_stats['max_angle'] > 0 and sampled_angles else 0
                }
            }
            
            comparison_results[method] = method_stats
        
        # 恢复原始配置
        self.vessel_filter_config['sampling_config'] = original_config
        
        # 添加推荐
        best_method = self._recommend_sampling_method(comparison_results)
        comparison_results['recommendation'] = best_method
        
        return comparison_results
    
    def _recommend_sampling_method(self, comparison_results: Dict) -> Dict[str, str]:
        """基于比较结果推荐最佳采样方法"""
        methods = ['uniform', 'adaptive', 'keypoints']
        
        # 计算综合评分
        scores = {}
        for method in methods:
            if method not in comparison_results:
                continue
                
            stats = comparison_results[method]
            stability = stats['stability_improvement']
            
            # 综合评分：稳定性改进 + 合理的采样比例
            stability_score = (stability['std_reduction'] + stability['max_angle_reduction']) / 2
            reduction_score = 1.0 - abs(stats['reduction_ratio'] - 0.4)  # 理想采样比例40%
            
            scores[method] = stability_score * 0.7 + reduction_score * 0.3
        
        if not scores:
            return {'method': 'uniform', 'reason': '默认推荐'}
        
        best_method = max(scores, key=scores.get)
        
        reasons = {
            'uniform': '提供稳定的等间隔采样，适合平滑血管',
            'adaptive': '在弯曲处保持精度，适合复杂形状血管', 
            'keypoints': '保留关键转折点，适合需要精细分析的场景'
        }
        
        return {
            'method': best_method,
            'score': scores[best_method],
            'reason': reasons.get(best_method, ''),
            'scores': scores
        }

    def print_sampling_comparison(self, path: List[Tuple[int, int]]):
        """
        打印采样方法比较结果
        
        Args:
            path: 要分析的路径
        """
        results = self.compare_sampling_methods(path)
        
        if 'error' in results:
            print(f"错误: {results['error']}")
            return
        
        print("\n=== 路径采样方法比较 ===")
        print(f"原始路径点数: {results['baseline_no_sampling']['point_count']}")
        print(f"原始角度数量: {results['baseline_no_sampling']['angle_count']}")
        print(f"原始角度统计: 均值={results['baseline_no_sampling']['mean_angle']:.3f}, "
              f"标准差={results['baseline_no_sampling']['std_angle']:.3f}")
        print("-" * 60)
        
        methods = ['uniform', 'adaptive', 'keypoints']
        for method in methods:
            if method not in results:
                continue
                
            stats = results[method]
            print(f"\n{method.upper()} 采样:")
            print(f"  采样后点数: {stats['sampled_point_count']} "
                  f"(压缩比: {stats['reduction_ratio']:.1%})")
            print(f"  角度统计: 均值={stats['mean_angle']:.3f}, "
                  f"标准差={stats['std_angle']:.3f}, 最大={stats['max_angle']:.3f}")
            print(f"  稳定性改进: 标准差减少{stats['stability_improvement']['std_reduction']:.1%}, "
                  f"最大角度减少{stats['stability_improvement']['max_angle_reduction']:.1%}")
        
        print("\n" + "=" * 60)
        rec = results['recommendation']
        print(f"推荐方法: {rec['method'].upper()}")
        print(f"推荐理由: {rec['reason']}")
        print(f"综合评分: {rec['score']:.3f}")
