#!/usr/bin/env python3
"""
医学图像处理自动化流程脚本
完整的端到端处理流程，包括：
1. 创建目录结构
2. 移动预测结果到相应子目录
3. 运行normalize.py
4. 运行segment.py和move_the_seg_mask.py
5. 运行do_metrics.py
"""

import os
import sys
import subprocess
import argparse
import logging
from pathlib import Path
from typing import List, Dict, Optional
import glob
import shutil

# 添加当前目录到Python路径，以便导入其他脚本
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from create_subject_dirs import SubjectDirectoryCreator
from do_metrics import MedicalImageProcessor

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class AutoPipeline:
    """医学图像处理自动化流程"""

    def __init__(self, base_dir: str = ".."):
        self.base_dir = Path(base_dir)
        self.logger = logging.getLogger(__name__)

        # 初始化子组件
        self.dir_creator = SubjectDirectoryCreator(base_dir)
        self.metrics_processor = MedicalImageProcessor(base_dir)

        # 定义脚本路径
        self.scripts_dir = Path(__file__).parent
        self.normalize_script = self.scripts_dir / "normalize.py"
        self.segment_script = self.scripts_dir / "segment.py"
        self.move_seg_script = self.scripts_dir / "move_the_seg_mask.py"
        self.do_metrics_script = self.scripts_dir / "do_metrics.py"

    def step1_create_directories(self, subject_ids: List[str], dry_run: bool = False) -> bool:
        """
        步骤1: 创建目录结构
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤1: 创建目录结构")
        self.logger.info("=" * 60)

        return self.dir_creator.create_subject_directories(subject_ids, dry_run)

    def step2_move_predictions(self, subject_ids: List[str], prediction_configs: Dict, dry_run: bool = False) -> bool:
        """
        步骤2: 移动预测结果到相应子目录
        
        Args:
            subject_ids: 受试者ID列表
            prediction_configs: 预测文件配置，格式如下：
            {
                "subject_id": {
                    "proposed": [{"src": "path/to/src", "subdir": "method_name"}],
                    "source": [{"src": "path/to/src"}],
                    "contrast_methods": [{"src": "path/to/src"}]
                }
            }
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤2: 移动预测结果到相应子目录")
        self.logger.info("=" * 60)

        success = True

        for subject_id in subject_ids:
            if subject_id not in prediction_configs:
                self.logger.warning(f"未找到受试者 {subject_id} 的预测文件配置")
                continue

            self.logger.info(f"--- 处理受试者: {subject_id} ---")
            config = prediction_configs[subject_id]

            # 处理每种类型的预测文件
            for pred_type in ["proposed", "source", "contrast_methods"]:
                if pred_type not in config:
                    continue

                self.logger.info(f"  移动 {pred_type} 文件...")

                for file_config in config[pred_type]:
                    src_pattern = file_config["src"]
                    subdir = file_config.get("subdir", "")

                    # 查找匹配的文件
                    files = glob.glob(src_pattern, recursive=True)
                    if not files:
                        self.logger.warning(f"    未找到匹配文件: {src_pattern}")
                        continue

                    # 确定目标目录
                    if pred_type == "proposed" and subdir:
                        dst_dir = self.base_dir / "prediction" / subject_id / pred_type / subdir
                    else:
                        dst_dir = self.base_dir / "prediction" / subject_id / pred_type

                    # 移动文件
                    for src_file in files:
                        # 对于contrast_methods，根据文件名自动生成更清晰的目标文件名
                        target_name = ""
                        if pred_type == "contrast_methods":
                            src_name = Path(src_file).name

                            # 根据受试者ID确定方向
                            orientation = "sagittal"  # 默认为axial/transverse方向
                            if "_sag" in subject_id:
                                orientation = "axial"
                            elif "_tra" in subject_id:
                                orientation = "sagittal"

                            # 提取方法名并生成标准化的文件名
                            if "mcinr" in src_name.lower():
                                target_name = f"infer_source_{orientation}_LR_mcinr.nii.gz"
                            elif "smore" in src_name.lower():
                                target_name = f"infer_source_{orientation}_LR_smore.nii.gz"
                            # 如果文件名已经是标准格式，保持不变
                            else:
                                target_name = src_name

                        if not self._move_file(src_file, dst_dir, dry_run, target_name=target_name):
                            success = False

        return success

    def step3_normalize(self, subject_ids: List[str], dry_run: bool = False) -> bool:
        """
        步骤3: 运行normalize.py进行图像标准化
        处理prediction目录下的所有子目录：proposed/, source/, contrast_methods/
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤3: 运行图像标准化")
        self.logger.info("=" * 60)

        success = True

        for subject_id in subject_ids:
            self.logger.info(f"--- 标准化受试者: {subject_id} ---")

            # 检查prediction目录是否存在
            prediction_dir = self.base_dir / "prediction" / subject_id
            if not prediction_dir.exists():
                self.logger.warning(f"未找到prediction目录: {prediction_dir}")
                continue

            # 目标processed目录
            processed_dir = self.base_dir / "prediction" / subject_id / "processed"

            # 需要处理的子目录类型
            subdirs_to_process = ["proposed", "source", "contrast_methods"]
            all_source_dirs = []

            # 收集所有需要标准化的目录
            for subdir_type in subdirs_to_process:
                subdir_path = prediction_dir / subdir_type
                if not subdir_path.exists():
                    self.logger.info(f"  跳过不存在的目录: {subdir_path}")
                    continue

                if subdir_type == "proposed":
                    # proposed目录下可能有多个子目录
                    method_dirs = [d for d in subdir_path.iterdir() if d.is_dir()]
                    if method_dirs:
                        all_source_dirs.extend(method_dirs)
                        self.logger.info(f"  找到 {len(method_dirs)} 个proposed方法目录")
                    else:
                        # 如果proposed目录下直接有文件
                        files = list(subdir_path.glob("*.nii.gz"))
                        if files:
                            all_source_dirs.append(subdir_path)
                            self.logger.info(f"  找到proposed目录中的 {len(files)} 个文件")
                else:
                    # source和contrast_methods目录直接包含文件
                    files = list(subdir_path.glob("*.nii.gz"))
                    if files:
                        all_source_dirs.append(subdir_path)
                        self.logger.info(f"  找到 {subdir_type} 目录中的 {len(files)} 个文件")

            if not all_source_dirs:
                self.logger.warning(f"未找到需要标准化的文件: {subject_id}")
                continue

            if dry_run:
                self.logger.info(f"  [模拟] 将标准化 {len(all_source_dirs)} 个目录到 {processed_dir}")
                for source_dir in all_source_dirs:
                    files = list(source_dir.glob("*.nii.gz"))
                    self.logger.info(f"    [模拟] 处理: {source_dir} ({len(files)} 个文件)")
            else:
                # 调用normalize.py的功能
                try:
                    from normalize import resample_unite2
                    dir_list = [str(d) for d in all_source_dirs]
                    self.logger.info(f"  开始标准化 {len(dir_list)} 个目录...")
                    resample_unite2(dir_list, str(processed_dir))
                    self.logger.info(f"  ✓ 标准化完成: {subject_id}")
                except Exception as e:
                    self.logger.error(f"  ✗ 标准化失败 {subject_id}: {e}")
                    success = False

        return success

    def step4_segment(self, subject_ids: List[str], device: str = "gpu:0", dry_run: bool = False) -> bool:
        """
        步骤4: 运行segment.py进行图像分割
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤4: 运行图像分割")
        self.logger.info("=" * 60)

        # 检查设备可用性
        if not dry_run:
            self._check_device_availability(device)

        success = True

        for subject_id in subject_ids:
            self.logger.info(f"--- 分割受试者: {subject_id} ---")

            # 检查processed目录
            processed_dir = self.base_dir / "prediction" / subject_id / "processed"
            if not processed_dir.exists():
                self.logger.warning(f"未找到processed目录: {processed_dir}")
                continue

            # 查找需要分割的文件
            files = list(processed_dir.glob("*.nii.gz"))
            if not files:
                self.logger.warning(f"processed目录中未找到.nii.gz文件: {processed_dir}")
                continue

            # 分割输出目录
            seg_dir = self.base_dir / "segmentation" / subject_id

            if dry_run:
                self.logger.info(f"  [模拟] 将分割 {len(files)} 个文件 (设备: {device})")
                for file in files:
                    self.logger.info(f"    [模拟] 分割: {file}")
            else:
                # 确保分割目录存在
                seg_dir.mkdir(parents=True, exist_ok=True)

                self.logger.info(f"  开始分割 {len(files)} 个文件...")

                # 分割每个文件
                for i, file in enumerate(files, 1):
                    classname = file.stem.replace('.nii', '')
                    outpath = seg_dir / f"seg_{classname}"

                    self.logger.info(f"  [{i}/{len(files)}] 处理文件: {file.name}")

                    if not self._run_segmentation(str(file), str(outpath), device):
                        self.logger.error(f"  文件分割失败: {file.name}")
                        success = False
                        # 继续处理其他文件，不要因为一个文件失败就停止
                        continue

                    # 在每个文件处理后清理GPU内存
                    if device.startswith("gpu") or device.startswith("cuda"):
                        self._cleanup_gpu_memory()

        return success

    def _check_device_availability(self, device: str):
        """检查设备可用性"""
        if device.startswith("gpu") or device.startswith("cuda"):
            try:
                import torch
                if not torch.cuda.is_available():
                    self.logger.warning("CUDA不可用，建议使用 --device cpu")
                    return

                gpu_id = int(device.split(":")[-1]) if ":" in device else 0
                if gpu_id >= torch.cuda.device_count():
                    self.logger.warning(f"GPU {gpu_id} 不存在，可用GPU数量: {torch.cuda.device_count()}")
                    return

                gpu_name = torch.cuda.get_device_name(gpu_id)
                self.logger.info(f"使用设备: {gpu_name} (GPU {gpu_id})")

            except ImportError:
                self.logger.warning("PyTorch未安装，无法检查GPU状态")
        else:
            self.logger.info(f"使用设备: {device}")

    def _cleanup_gpu_memory(self):
        """清理GPU内存"""
        try:
            import torch
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
        except ImportError:
            pass

    def step5_move_seg_masks(self, subject_ids: List[str], dry_run: bool = False) -> bool:
        """
        步骤5: 移动分割掩膜到正确位置
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤5: 移动分割掩膜")
        self.logger.info("=" * 60)

        success = True

        for subject_id in subject_ids:
            self.logger.info(f"--- 移动掩膜: {subject_id} ---")

            seg_dir = self.base_dir / "segmentation" / subject_id
            if not seg_dir.exists():
                self.logger.warning(f"分割目录不存在: {seg_dir}")
                continue

            # 查找prostate.nii.gz文件
            prostate_files = list(seg_dir.glob("**/prostate.nii.gz"))
            if not prostate_files:
                self.logger.warning(f"未找到prostate.nii.gz文件: {seg_dir}")
                continue

            self.logger.info(f"  找到 {len(prostate_files)} 个掩膜文件")

            for file in prostate_files:
                if not self._move_seg_mask(str(file), str(seg_dir), dry_run):
                    success = False

        return success

    def step6_move_reference_files(self, subject_ids: List[str], reference_configs: Dict, dry_run: bool = False) -> bool:
        """
        步骤6: 移动参考图像和掩膜到reference目录
        支持动态路径生成，不依赖配置文件中的硬编码路径
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤6: 移动参考图像和掩膜")
        self.logger.info("=" * 60)

        success = True

        for subject_id in subject_ids:
            self.logger.info(f"--- 移动参考文件: {subject_id} ---")

            # 目标目录
            ref_dir = self.base_dir / "reference" / subject_id

            # 如果有配置文件，使用配置文件
            if subject_id in reference_configs:
                success &= self._move_reference_from_config(subject_id, reference_configs[subject_id], ref_dir, dry_run)
            else:
                # 没有配置文件时，使用智能路径推断
                success &= self._move_reference_auto(subject_id, ref_dir, dry_run)

        return success

    def _move_reference_from_config(self, subject_id: str, config: Dict, ref_dir: Path, dry_run: bool) -> bool:
        """从配置文件移动参考文件"""
        success = True

        # 处理参考图像
        if "images" in config:
            self.logger.info("  移动参考图像...")
            for img_config in config["images"]:
                src_pattern = self._resolve_dynamic_path(img_config["src"], subject_id)
                target_name = img_config.get("name", "")

                files = glob.glob(src_pattern, recursive=True)
                if not files:
                    self.logger.warning(f"    未找到参考图像: {src_pattern}")
                    continue

                for src_file in files:
                    if not self._move_file(src_file, ref_dir, dry_run, target_name=target_name):
                        success = False

        # 处理参考掩膜
        if "masks" in config:
            self.logger.info("  移动参考掩膜...")
            for mask_config in config["masks"]:
                src_pattern = self._resolve_dynamic_path(mask_config["src"], subject_id)
                target_name = mask_config.get("name", "")

                files = glob.glob(src_pattern, recursive=True)
                if not files:
                    self.logger.warning(f"    未找到参考掩膜: {src_pattern}")
                    continue

                for src_file in files:
                    if not self._move_file(src_file, ref_dir, dry_run, target_name=target_name):
                        success = False

        return success

    def _move_reference_auto(self, subject_id: str, ref_dir: Path, dry_run: bool) -> bool:
        """自动推断并移动参考文件"""
        self.logger.info("  使用自动路径推断...")
        success = True

        # 从processed目录查找参考图像
        processed_dir = self.base_dir / "prediction" / subject_id / "processed"
        if processed_dir.exists():
            # 查找source文件作为参考图像
            source_files = list(processed_dir.glob("source_*_registered.nii.gz"))
            if source_files:
                self.logger.info(f"  找到 {len(source_files)} 个参考图像")
                for src_file in source_files:
                    if not self._move_file(str(src_file), ref_dir, dry_run, target_name=src_file.name):
                        success = False
            else:
                self.logger.warning(f"  未在 {processed_dir} 中找到参考图像")

        # 从segmentation目录查找参考掩膜
        seg_dir = self.base_dir / "segmentation" / subject_id
        if seg_dir.exists():
            # 查找prostate文件作为参考掩膜
            mask_files = list(seg_dir.glob("prostate_source_*_registered.nii.gz"))
            if mask_files:
                self.logger.info(f"  找到 {len(mask_files)} 个参考掩膜")
                for src_file in mask_files:
                    # 保持原始的掩膜名称，不添加额外的"seg"
                    target_name = src_file.name

                    if not self._move_file(str(src_file), ref_dir, dry_run, target_name=target_name):
                        success = False
            else:
                self.logger.warning(f"  未在 {seg_dir} 中找到参考掩膜")

        return success

    def _resolve_dynamic_path(self, path_template: str, subject_id: str) -> str:
        """解析动态路径模板，支持变量替换"""
        # 支持 {subject_id} 变量替换
        resolved_path = path_template.replace("{subject_id}", subject_id)

        # 支持基于subject_id的智能推断
        # 例如：CHEN-REN-GENG_sag 应该使用 sagittal 相关的文件
        if "_sag" in subject_id:
            resolved_path = resolved_path.replace("_axial_", "_sagittal_")
            resolved_path = resolved_path.replace("axial", "sagittal")
        elif "_tra" in subject_id:
            resolved_path = resolved_path.replace("_sagittal_", "_axial_")
            resolved_path = resolved_path.replace("sagittal", "axial")

        return resolved_path

    def step7_compute_metrics(self, subject_ids: List[str], dry_run: bool = False) -> bool:
        """
        步骤7: 计算评估指标
        """
        self.logger.info("=" * 60)
        self.logger.info("步骤7: 计算评估指标")
        self.logger.info("=" * 60)

        success = True

        for subject_id in subject_ids:
            self.logger.info(f"--- 计算指标: {subject_id} ---")

            if dry_run:
                self.logger.info(f"  [模拟] 将计算 {subject_id} 的SSIM和Dice指标")
            else:
                if not self.metrics_processor.process_subject(
                    subject_id, 
                    reslice=True, 
                    calc_ssim=True, 
                    calc_dice=True, 
                    method_types=['processed'],
                    dry_run=False
                ):
                    success = False

        return success

    def run_full_pipeline(self, subject_ids: List[str], prediction_configs: Dict, 
                         reference_configs: Dict = None, device: str = "gpu:0", dry_run: bool = False) -> bool:
        """
        运行完整的处理流程
        """
        self.logger.info("🚀 开始运行完整的医学图像处理流程")
        self.logger.info(f"受试者列表: {', '.join(subject_ids)}")
        self.logger.info(f"模拟运行: {'是' if dry_run else '否'}")

        if reference_configs is None:
            reference_configs = {}

        steps = [
            ("创建目录结构", lambda: self.step1_create_directories(subject_ids, dry_run)),
            ("移动预测结果", lambda: self.step2_move_predictions(subject_ids, prediction_configs, dry_run)),
            ("图像标准化", lambda: self.step3_normalize(subject_ids, dry_run)),
            ("图像分割", lambda: self.step4_segment(subject_ids, device, dry_run)),
            ("移动分割掩膜", lambda: self.step5_move_seg_masks(subject_ids, dry_run)),
            ("移动参考文件", lambda: self.step6_move_reference_files(subject_ids, reference_configs, dry_run)),
            ("计算评估指标", lambda: self.step7_compute_metrics(subject_ids, dry_run))
        ]

        success_count = 0
        for i, (step_name, step_func) in enumerate(steps, 1):
            self.logger.info(f"\n🔄 执行步骤 {i}/7: {step_name}")

            try:
                if step_func():
                    success_count += 1
                    self.logger.info(f"✅ 步骤 {i} 完成: {step_name}")
                else:
                    self.logger.error(f"❌ 步骤 {i} 失败: {step_name}")
            except Exception as e:
                self.logger.error(f"❌ 步骤 {i} 异常: {step_name} - {e}")

        self.logger.info("=" * 60)
        if success_count == len(steps):
            self.logger.info("🎉 完整流程执行成功!")
        else:
            self.logger.warning(f"⚠️ 流程部分完成: {success_count}/{len(steps)} 个步骤成功")
        self.logger.info("=" * 60)

        return success_count == len(steps)

    def _move_file(self, src_path: str, dst_dir: Path, dry_run: bool = False, target_name: str = "") -> bool:
        """移动单个文件"""
        try:
            src_path = Path(src_path)
            if not src_path.exists():
                self.logger.error(f"    源文件不存在: {src_path}")
                return False

            # 确定目标文件名
            if target_name:
                dst_file = dst_dir / target_name
            else:
                dst_file = dst_dir / src_path.name

            if dry_run:
                self.logger.info(f"    [模拟] 移动: {src_path} -> {dst_file}")
            else:
                dst_dir.mkdir(parents=True, exist_ok=True)
                shutil.copy2(src_path, dst_file)
                self.logger.info(f"    ✓ 移动: {src_path.name} -> {dst_file.name}")

            return True
        except Exception as e:
            self.logger.error(f"    ✗ 移动文件失败: {e}")
            return False

    def _run_segmentation(self, input_path: str, output_path: str, device: str) -> bool:
        """运行TotalSegmentator分割"""
        try:
            # 检查GPU内存使用情况
            if device.startswith("gpu") or device.startswith("cuda"):
                try:
                    import torch
                    if torch.cuda.is_available():
                        gpu_id = int(device.split(":")[-1]) if ":" in device else 0
                        if gpu_id < torch.cuda.device_count():
                            memory_allocated = torch.cuda.memory_allocated(gpu_id) / 1024**3  # GB
                            memory_reserved = torch.cuda.memory_reserved(gpu_id) / 1024**3   # GB
                            memory_total = torch.cuda.get_device_properties(gpu_id).total_memory / 1024**3  # GB
                            memory_free = memory_total - memory_reserved

                            self.logger.info(f"    GPU {gpu_id} 内存状态: {memory_free:.2f}GB 可用 / {memory_total:.2f}GB 总计")

                            if memory_free < 2.0:  # 如果可用内存少于2GB
                                self.logger.warning(f"    GPU内存不足 ({memory_free:.2f}GB)，尝试清理内存...")
                                torch.cuda.empty_cache()
                                memory_free = memory_total - torch.cuda.memory_reserved(gpu_id) / 1024**3
                                self.logger.info(f"    清理后可用内存: {memory_free:.2f}GB")

                                if memory_free < 1.0:  # 如果清理后仍然不足1GB
                                    self.logger.warning(f"    GPU内存仍然不足，建议使用CPU或等待其他进程释放内存")
                except ImportError:
                    pass

            command = [
                "TotalSegmentator",
                "-i", input_path,
                "-o", output_path,
                "-ta", "total_mr",
                "--device", device,
            ]

            self.logger.info(f"    执行分割: {Path(input_path).name} (设备: {device})")

            # 设置环境变量以优化内存使用
            env = os.environ.copy()
            env["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

            result = subprocess.run(
                command, 
                check=True, 
                capture_output=True, 
                text=True, 
                env=env,
                timeout=1800  # 30分钟超时
            )

            self.logger.info(f"    ✓ 分割完成: {Path(input_path).name}")
            return True

        except subprocess.TimeoutExpired:
            self.logger.error(f"    ✗ 分割超时: {Path(input_path).name}")
            return False
        except subprocess.CalledProcessError as e:
            error_msg = e.stderr.strip() if e.stderr else "未知错误"

            # 检查是否是内存不足错误
            if "CUDA out of memory" in error_msg or "OutOfMemoryError" in error_msg:
                self.logger.error(f"    ✗ GPU内存不足: {Path(input_path).name}")
                self.logger.error(f"    建议: 1) 使用CPU (--device cpu) 2) 释放GPU内存 3) 使用更小的batch size")
            else:
                self.logger.error(f"    ✗ 分割失败: {Path(input_path).name}")
                # 只显示错误信息的前几行，避免日志过长
                error_lines = error_msg.split('\n')[:5]
                for line in error_lines:
                    if line.strip():
                        self.logger.error(f"      {line.strip()}")
            return False
        except FileNotFoundError:
            self.logger.error("    ✗ TotalSegmentator未找到，请确保已安装")
            self.logger.error("    安装命令: pip install TotalSegmentator")
            return False
        except Exception as e:
            self.logger.error(f"    ✗ 分割异常: {e}")
            return False

    def _move_seg_mask(self, src_path: str, dst_dir: str, dry_run: bool = False) -> bool:
        """移动分割掩膜"""
        try:
            src_path = Path(src_path)
            parent_dir = src_path.parent.name

            # 生成新文件名
            if parent_dir.startswith("seg_"):
                new_name = f"prostate_{parent_dir[4:]}.nii.gz"
            else:
                new_name = f"prostate_{parent_dir}.nii.gz"

            dst_path = Path(dst_dir) / new_name

            if dry_run:
                self.logger.info(f"    [模拟] 移动掩膜: {src_path} -> {dst_path}")
            else:
                shutil.move(str(src_path), str(dst_path))
                self.logger.info(f"    ✓ 移动掩膜: {new_name}")

            return True
        except Exception as e:
            self.logger.error(f"    ✗ 移动掩膜失败: {e}")
            return False


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(
        description="医学图像处理自动化流程",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
使用示例:
  # 运行完整流程（需要提供配置文件）
  python auto_pipeline.py --subjects CHEN-REN-GENG --config config.json
  
  # 模拟运行
  python auto_pipeline.py --subjects CHEN-REN-GENG --config config.json --dry-run
  
  # 只运行特定步骤
  python auto_pipeline.py --subjects CHEN-REN-GENG --steps create_dirs normalize
  
  # 指定GPU设备
  python auto_pipeline.py --subjects CHEN-REN-GENG --config config.json --device gpu:1
        """
    )
    
    parser.add_argument(
        "--base-dir", 
        type=str, 
        default="..", 
        help="项目根目录路径"
    )
    parser.add_argument(
        "--subjects",
        type=str,
        nargs="+",
        required=True,
        help="要处理的受试者ID列表"
    )
    parser.add_argument(
        "--config",
        type=str,
        help="预测文件配置JSON文件路径"
    )
    parser.add_argument(
        "--reference-config",
        type=str,
        help="参考文件配置JSON文件路径"
    )
    parser.add_argument(
        "--steps",
        type=str,
        nargs="*",
        choices=["create_dirs", "move_predictions", "normalize", "segment", "move_reference", "move_masks", "metrics"],
        help="要执行的步骤列表，不指定则执行所有步骤"
    )
    parser.add_argument(
        "--device",
        type=str,
        default="gpu:0",
        help="分割使用的设备 (gpu:0, gpu:1, cpu等，默认: gpu:0)"
    )
    parser.add_argument(
        "--dry-run",
        action="store_true",
        help="模拟运行，不实际执行操作"
    )
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="详细输出"
    )
    
    return parser.parse_args()


def load_config(config_path: str) -> Dict:
    """加载配置文件"""
    import json
    
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except Exception as e:
        logging.error(f"加载配置文件失败: {e}")
        return {}


def main():
    """主函数"""
    args = parse_args()
    
    # 设置日志级别
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)
    
    # 初始化流程处理器
    pipeline = AutoPipeline(args.base_dir)
    
    # 加载预测文件配置
    prediction_configs = {}
    if args.config:
        prediction_configs = load_config(args.config)
    
    # 加载参考文件配置
    reference_configs = {}
    if args.reference_config:
        reference_configs = load_config(args.reference_config)
    
    # 确定要执行的步骤
    if args.steps:
        # 执行指定步骤
        step_mapping = {
            "create_dirs": lambda: pipeline.step1_create_directories(args.subjects, args.dry_run),
            "move_predictions": lambda: pipeline.step2_move_predictions(args.subjects, prediction_configs, args.dry_run),
            "normalize": lambda: pipeline.step3_normalize(args.subjects, args.dry_run),
            "segment": lambda: pipeline.step4_segment(args.subjects, args.device, args.dry_run),
            "move_masks": lambda: pipeline.step5_move_seg_masks(args.subjects, args.dry_run),
            "move_reference": lambda: pipeline.step6_move_reference_files(args.subjects, reference_configs, args.dry_run),
            "metrics": lambda: pipeline.step7_compute_metrics(args.subjects, args.dry_run)
        }
        
        success_count = 0
        for step in args.steps:
            logging.info(f"执行步骤: {step}")
            if step_mapping[step]():
                success_count += 1
            else:
                logging.error(f"步骤失败: {step}")
        
        logging.info(f"完成 {success_count}/{len(args.steps)} 个步骤")
    else:
        # 运行完整流程
        pipeline.run_full_pipeline(args.subjects, prediction_configs, reference_configs, args.device, args.dry_run)


if __name__ == "__main__":
    main()
