#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
import difflib
import hashlib
import json
import os
import shutil
import sys
import time
from pathlib import Path
from typing import Dict, Any, Tuple, List, Optional

# 统一的常量
CHECKPOINTS_DIR = ".checkpoints"
FILES_JSON = ".cpk_files"
NODES_JSON = ".cpk_nodes"
CKPIGNORE = ".ckp_ignore"

# 内置忽略（名称匹配）
BUILTIN_IGNORES = {
    CHECKPOINTS_DIR,  # 忽略内部管理目录
    ".git",           # 常见的git元数据
}

# 运行期忽略规则（由 .checkpoints/ckpignore 加载）
ignore_names: set[str] = set()
ignore_dirs: set[str] = set()         # 目录名（或相对路径片段）忽略
ignore_globs: List[str] = []          # 通配模式，使用 fnmatch

import fnmatch

def create_default_ckpignore(workspace: Path):
    """
    如果 .checkpoints/ckpignore 不存在，则创建并写入默认规则示例。
    """
    cp = workspace / CHECKPOINTS_DIR
    ckp = cp / CKPIGNORE
    if ckp.exists():
        return
    default_rules = """# ckpignore - ignore rules for checkpointor
# Lines starting with # are comments.
# Directory examples:
.git
.vscode
.checkpoints
venv/
__pycache__/

# File patterns:
*.log
*.tmp
*.bak
*.swp

# OS junk:
.DS_Store
Thumbs.db
"""
    ckp.parent.mkdir(parents=True, exist_ok=True)
    with open(ckp, "w", encoding="utf-8") as f:
        f.write(default_rules)

def load_ckpignore(workspace: Path):
    """
    从 workspace/.checkpoints/ckpignore 加载规则。
    规则语法（简化）：
    - 以 # 开头：注释
    - 空行：忽略
    - 以 / 结尾：目录名或路径片段（如 dist/、build/）
    - 含 * 的通配符：文件/路径匹配（如 *.log、*.tmp、**/*.bak）
    - 其他：名称精确匹配（文件或目录名）
    """
    global ignore_names, ignore_dirs, ignore_globs
    ignore_names = set(BUILTIN_IGNORES)  # 先放入内置忽略
    ignore_dirs = set()
    ignore_globs = []

    ckp_path = workspace / CHECKPOINTS_DIR / CKPIGNORE
    if not ckp_path.exists():
        return

    try:
        with open(ckp_path, "r", encoding="utf-8") as f:
            for raw in f:
                line = raw.strip()
                if not line or line.startswith("#"):
                    continue
                if line.endswith("/"):
                    ignore_dirs.add(line.rstrip("/"))
                elif "*" in line or "?" in line:
                    ignore_globs.append(line)
                else:
                    ignore_names.add(line)
    except Exception:
        # 忽略加载错误，保持最小工作
        pass

def should_ignore(workspace: Path, path: Path, is_dir: bool) -> bool:
    """
    基于名称、目录列表与通配模式判断是否忽略 path。
    """
    name = path.name
    rel = path.relative_to(workspace).as_posix()

    # 名称精确匹配（文件或目录）
    if name in ignore_names:
        return True

    # 目录规则
    if is_dir:
        # 若目录名命中，或路径片段中包含该名称
        if name in ignore_dirs:
            return True
        # 支持相对路径片段匹配（如 sub/build/）
        for d in ignore_dirs:
            if rel == d or rel.startswith(d + "/") or ("/" + d + "/") in ("/" + rel + "/"):
                return True

    # 通配
    for patt in ignore_globs:
        if fnmatch.fnmatch(rel, patt) or fnmatch.fnmatch(name, patt):
            return True

    return False

# 工具函数

def file_md5(path: Path) -> str:
    md5 = hashlib.md5()
    with open(path, "rb") as f:
        for chunk in iter(lambda: f.read(1024 * 1024), b""):
            md5.update(chunk)
    return md5.hexdigest()

def now_ts() -> float:
    return time.time()

def load_json(path: Path, default):
    if not path.exists():
        return default
    try:
        with open(path, "r", encoding="utf-8") as f:
            return json.load(f)
    except Exception:
        return default

def dump_json(path: Path, data):
    path.parent.mkdir(parents=True, exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

def workspace_paths(workspace: Path) -> Tuple[Path, Path, Path]:
    cp = workspace / CHECKPOINTS_DIR
    files_json = cp / FILES_JSON
    nodes_json = cp / NODES_JSON
    return cp, files_json, nodes_json

def normalize_relpath(workspace: Path, p: Path) -> str:
    # 使用相对路径，统一分隔符为 /
    rel = p.relative_to(workspace).as_posix()
    return rel

def get_file_info_entry(workspace: Path, rel_path: str) -> Dict[str, Any]:
    abs_path = workspace / rel_path
    stat = abs_path.stat()
    return {
        "updatetime": stat.st_mtime,
        "path": rel_path,
        "hash": file_md5(abs_path),
        "status": "unchanged"
    }

def scan_workspace_file_infos(workspace: Path) -> Dict[str, Dict[str, Any]]:
    """
    遍历 workspace，生成 file_infos（依据 ckpignore 与内置忽略）
    key: 相对路径（使用/分隔）
    value: { updatetime, path, hash, status }
    """
    file_infos = {}
    for root, dirs, files in os.walk(workspace):
        root_path = Path(root)

        # 过滤目录（就地修改 dirs 会让 os.walk 跳过被移除的子目录）
        keep_dirs = []
        for d in dirs:
            dpath = root_path / d
            if should_ignore(workspace, dpath, is_dir=True):
                continue
            keep_dirs.append(d)
        dirs[:] = keep_dirs

        for fn in files:
            abs_file = root_path / fn
            if should_ignore(workspace, abs_file, is_dir=False):
                continue
            rel = normalize_relpath(workspace, abs_file)
            try:
                entry = get_file_info_entry(workspace, rel)
                file_infos[rel] = entry
            except (FileNotFoundError, PermissionError):
                continue
    return file_infos

def compare_file_infos(old_infos: Dict[str, Any], new_infos: Dict[str, Any]) -> Dict[str, List[str]]:
    old_keys = set(old_infos.keys())
    new_keys = set(new_infos.keys())

    added = sorted(list(new_keys - old_keys))
    deleted = sorted(list(old_keys - new_keys))

    modified = []
    unchanged = []
    for k in sorted(list(old_keys & new_keys)):
        if old_infos[k].get("hash") != new_infos[k].get("hash"):
            modified.append(k)
        else:
            unchanged.append(k)

    return {
        "added": added,
        "deleted": deleted,
        "modified": modified,
        "unchanged": unchanged
    }

def ensure_initialized(workspace: Path):
    """
    初始化 .checkpoints，如果不存在则创建，并创建 base 节点
    """
    cp, files_json_path, nodes_json_path = workspace_paths(workspace)
    if cp.exists():
        # 已存在时也加载忽略规则
        return
    else:
        cp.mkdir(parents=True, exist_ok=True)
        create_default_ckpignore(workspace)
        # 先加载忽略规则（如果用户已经预先放置了 ckpignore）
        load_ckpignore(workspace)

        # 创建 base 节点：作为初始基线
        add_node(workspace, "base", "baseline")

def load_nodes(nodes_json_path: Path) -> List[Dict[str, Any]]:
    return load_json(nodes_json_path, [])

def save_nodes(nodes_json_path: Path, nodes: List[Dict[str, Any]]):
    dump_json(nodes_json_path, nodes)

def get_node_dir(workspace: Path, node_name: str) -> Path:
    return workspace / CHECKPOINTS_DIR / node_name

def get_workspace_file_infos(workspace: Path) -> Dict[str, Any]:
    _, files_json_path, _ = workspace_paths(workspace)
    return load_json(files_json_path, {})

def save_file_infos(files_json_path: Path, infos: Dict[str, Any]):
    dump_json(files_json_path, infos)


def update_status_by_diff(old_infos: Dict[str, Any], new_infos: Dict[str, Any]) -> Tuple[Dict[str, Any], Dict[str, List[str]]]:
    """
    基于 old_infos（上一基线）与 new_infos（当前扫描）对比：
    - 返回带 status 的 new_infos（added/modified/unchanged）
    - 同时返回 diff 集合（含 deleted）
    """
    diff = compare_file_infos(old_infos, new_infos)
    result = {}

    for k in diff["added"]:
        v = new_infos[k].copy()
        v["status"] = "added"
        result[k] = v

    for k in diff["modified"]:
        v = new_infos[k].copy()
        v["status"] = "modified"
        result[k] = v

    for k in diff["unchanged"]:
        v = old_infos[k].copy()
        v["status"] = "unchanged"
        result[k] = v

    # deleted 不写入 result（因为已不在 new_infos 中）
    return result, diff


def get_node_file_infos(workspace: Path, node_name: str) -> Dict[str, Any]:
    """
    读取节点目录下的 .cpk_files，并把每条记录的 path 从 node_name/rel 归一化为 rel。
    """
    node_dir = get_node_dir(workspace, node_name)
    node_infos_path = node_dir / FILES_JSON
    file_infos = load_json(node_infos_path, {})
    return file_infos

def add_node(workspace: Path, node_name: str, desc: str = ""):
    # 每次操作前加载一次忽略规则（支持用户动态修改 ckpignore）
    load_ckpignore(workspace)

    _, _, nodes_json_path = workspace_paths(workspace)
    nodes = load_nodes(nodes_json_path)
    if any(n.get("name") == node_name for n in nodes):
        print(f"节点已存在: {node_name}")
        return

    # 扫描当前 workspace
    scanned = scan_workspace_file_infos(workspace)

    # 读取当前 .checkpoints/.cpk_files（上一基线）
    ws_infos = get_workspace_file_infos(workspace)

    # 计算状态（保证新节点 files.json 中首次出现的文件为 added）
    node_infos_with_status, diff = update_status_by_diff(ws_infos, scanned)

    # 将新增与修改的文件复制到新节点目录
    checkpoints_dir = workspace / CHECKPOINTS_DIR
    node_dir = get_node_dir(workspace, node_name)
    node_dir.mkdir(parents=True, exist_ok=True)
    for rel in diff["added"] + diff["modified"]:
        src = workspace / rel
        dst = node_dir / rel
        dst.parent.mkdir(parents=True, exist_ok=True)
        if src.exists() and src.is_file():
            shutil.copy2(src, dst)
            dst_rel = normalize_relpath(checkpoints_dir, dst)
            node_infos_with_status[rel]['path'] = dst_rel

    # 更新基线：节点目录保存节点相对路径；工作区基线保存工作区相对路径
    workspace_file_info_path = workspace / CHECKPOINTS_DIR / FILES_JSON
    node_file_info_path = node_dir / FILES_JSON

    # 1) 保存到节点目录（path= node/rel）
    save_file_infos(node_file_info_path, node_infos_with_status)
    # 2) 保存到工作区基线files和节点一致（path= rel）
    shutil.copy2(node_file_info_path, workspace_file_info_path)

    # 更新 nodes.json
    nodes.append({
        "name": node_name,
        "desc": desc,
        "updatetime": now_ts()
    })
    save_nodes(nodes_json_path, nodes)

    # 输出结果
    print(f"创建节点: {node_name}")
    if diff["added"]:
        print("  新增:", *diff["added"])
    if diff["modified"]:
        print("  修改:", *diff["modified"])
    if diff["deleted"]:
        print("  删除:", *diff["deleted"])
    if not (diff["added"] or diff["modified"] or diff["deleted"]):
        print("  无变更")

def list_nodes(workspace: Path):
    _, _, nodes_json_path = workspace_paths(workspace)
    nodes = load_nodes(nodes_json_path)
    if not nodes:
        print("无节点")
        return
    for n in nodes:
        name = n.get("name", "")
        desc = n.get("desc", "")
        ts = n.get("updatetime", 0)
        print(f"{name}\t{desc}\t{ts}")

def remove_node(workspace: Path, node_name: str):
    if node_name == "base":
        print("不允许删除 base 节点")
        return
    _, _, nodes_json_path = workspace_paths(workspace)
    nodes = load_nodes(nodes_json_path)
    new_nodes = [n for n in nodes if n.get("name") != node_name]
    if len(new_nodes) == len(nodes):
        print(f"节点不存在: {node_name}")
        return

    node_dir = get_node_dir(workspace, node_name)
    if node_dir.exists():
        shutil.rmtree(node_dir)

    save_nodes(nodes_json_path, new_nodes)
    print(f"已删除节点: {node_name}")

def get_node_file_content(workspace: Path, node_name: str, rel_path: str) -> Optional[str]:
    """
    获取节点中指定文件的完整内容。
    优先从节点目录读取，若不存在则尝试从工作区读取。
    """
    node_dir = get_node_dir(workspace, node_name)
    file_path = node_dir / rel_path
    
    if file_path.exists():
        try:
            with open(file_path, "r", encoding="utf-8", errors="replace") as f:
                return f.read()
        except Exception:
            # 文件读取失败
            return None
    
    # 尝试从工作区读取
    ws_path = workspace / rel_path
    if ws_path.exists():
        try:
            with open(ws_path, "r", encoding="utf-8", errors="replace") as f:
                return f.read()
        except Exception:
            pass
    
    return None

def get_workspace_file_content(workspace: Path, rel_path: str) -> Optional[str]:
    """
    获取工作区中指定文件的完整内容。
    """
    file_path = workspace / rel_path
    if file_path.exists() and file_path.is_file():
        try:
            with open(file_path, "r", encoding="utf-8", errors="replace") as f:
                return f.read()
        except Exception:
            pass
    return None

def generate_unified_diff(workspace: Path, node_a: str, node_b: str, diff_info: Dict[str, List[str]], is_snapshot=False) -> str:
    """
    生成类 git unified diff 格式的文本。
    格式与 git diff 兼容，便于大模型分析。
    
    Args:
        workspace: 工作区路径
        node_a: 第一个节点名称（或快照标识）
        node_b: 第二个节点名称（或快照标识）
        diff_info: 差异信息字典（added, deleted, modified）
        is_snapshot: 是否为快照比较（node_b 为当前工作区）
    """
    lines = []
    lines.append(f"diff --ckp {node_a} {node_b}")
    lines.append(f"--- {node_a}")
    lines.append(f"+++ {node_b}")
    
    # 处理新增文件
    for rel_path in diff_info["added"]:
        if is_snapshot:
            content_b = get_workspace_file_content(workspace, rel_path)
        else:
            content_b = get_node_file_content(workspace, node_b, rel_path)
        
        if content_b is None:
            continue
        
        lines.append("")
        lines.append(f"--- /dev/null")
        lines.append(f"+++ b/{rel_path}")
        
        content_lines = content_b.splitlines()
        line_count = len(content_lines)
        if line_count > 0:
            lines.append(f"@@ -0,0 +1,{line_count} @@")
            for line in content_lines:
                lines.append(f"+{line}")
    
    # 处理删除文件
    for rel_path in diff_info["deleted"]:
        content_a = get_node_file_content(workspace, node_a, rel_path)
        if content_a is None:
            continue
        
        lines.append("")
        lines.append(f"--- a/{rel_path}")
        lines.append(f"+++ /dev/null")
        
        content_lines = content_a.splitlines()
        line_count = len(content_lines)
        if line_count > 0:
            lines.append(f"@@ -1,{line_count} +0,0 @@")
            for line in content_lines:
                lines.append(f"-{line}")
    
    # 处理修改文件
    for rel_path in diff_info["modified"]:
        content_a = get_node_file_content(workspace, node_a, rel_path)
        
        if is_snapshot:
            content_b = get_workspace_file_content(workspace, rel_path)
        else:
            content_b = get_node_file_content(workspace, node_b, rel_path)
        
        if content_a is None or content_b is None:
            # 无法读取文件内容，跳过
            continue
        
        lines_a = content_a.splitlines(keepends=False)
        lines_b = content_b.splitlines(keepends=False)
        
        # 使用 difflib 生成 unified diff
        unified = difflib.unified_diff(
            lines_a,
            lines_b,
            fromfile=f"a/{rel_path}",
            tofile=f"b/{rel_path}",
            lineterm=""
        )
        
        # 跳过前两行的 "---" 和 "+++"（我们已经添加了）
        unified_list = list(unified)
        if len(unified_list) > 2:
            lines.append("")  # 添加空行分隔
            # 从第3行开始（索引2）追加
            for line in unified_list[2:]:
                lines.append(line)
    
    return "\n".join(lines)

def compare_nodes(workspace: Path, node_a: str, node_b: str, output_diff: Optional[Path] = None):
    infos_a = get_node_file_infos(workspace, node_a)
    infos_b = get_node_file_infos(workspace, node_b)
    if not infos_a:
        print(f"节点不存在或无文件信息: {node_a}")
        return
    if not infos_b:
        print(f"节点不存在或无文件信息: {node_b}")
        return
    diff = compare_file_infos(infos_a, infos_b)
    print(f"比较 {node_a} -> {node_b}")
    print("  新增:", *diff["added"] if diff["added"] else [])
    print("  删除:", *diff["deleted"] if diff["deleted"] else [])
    print("  修改:", *diff["modified"] if diff["modified"] else [])
    if not (diff["added"] or diff["deleted"] or diff["modified"]):
        print("  无差异")
    else:
        # 如果有差异且指定了输出文件，生成 diff 文件
        if output_diff:
            diff_text = generate_unified_diff(workspace, node_a, node_b, diff, is_snapshot=False)
            output_diff.parent.mkdir(parents=True, exist_ok=True)
            with open(output_diff, "w", encoding="utf-8") as f:
                f.write(diff_text)
            print(f"差异已保存到: {output_diff}")

def export_node(workspace: Path, node_name: str, output_dir: Path):
    node_dir = get_node_dir(workspace, node_name)
    infos = get_node_file_infos(workspace, node_name)
    if not infos:
        print(f"节点不存在或无文件信息: {node_name}")
        return
    output_dir = output_dir / node_name
    output_dir.mkdir(parents=True, exist_ok=True)

    for rel, _meta in infos.items():
        src_from_node = node_dir / rel
        dst = output_dir / rel
        dst.parent.mkdir(parents=True, exist_ok=True)
        if src_from_node.exists():
            shutil.copy2(src_from_node, dst)
        else:
            ws_src = workspace / rel
            if ws_src.exists():
                shutil.copy2(ws_src, dst)
            else:
                continue
    print(f"已导出节点 {node_name} 到 {output_dir}")

# 新增常量
CPK_FILES = ".cpk_files"

def save_current_snapshot(workspace: Path) -> Dict[str, Any]:
    """
    扫描当前工作区，生成当前快照，保存到 .checkpoints/.cpk_files。
    返回快照的 file_infos（带 status）。
    """
    cp, _, _ = workspace_paths(workspace)
    scanned = scan_workspace_file_infos(workspace)
    # 基于当前 workspace 基线赋状态
    ws_baseline = get_workspace_file_infos(workspace)
    snapshot_with_status, _diff = update_status_by_diff(ws_baseline, scanned)
    dump_json(cp / CPK_FILES, snapshot_with_status)
    return snapshot_with_status

def load_snapshot(workspace: Path) -> Dict[str, Any]:
    cp, _, _ = workspace_paths(workspace)
    return load_json(cp / CPK_FILES, {})

def compare_with_snapshot(workspace: Path, node_name: str, output_diff: Optional[Path] = None):
    """
    将节点 node_name 的 files.json 与当前环境快照 .cpk_files 比较并打印。
    调用该函数前通常先调用 save_current_snapshot 以刷新快照。
    """
    node_infos = get_node_file_infos(workspace, node_name)
    if not node_infos:
        print(f"节点不存在或无文件信息: {node_name}")
        return
    snap = load_snapshot(workspace)
    if not snap:
        print("当前环境快照不存在，请先创建快照。")
        return
    diff = compare_file_infos(node_infos, snap)
    print(f"比较 节点[{node_name}] -> 当前环境")
    print("  新增:", *diff["added"] if diff["added"] else [])
    print("  删除:", *diff["deleted"] if diff["deleted"] else [])
    print("  修改:", *diff["modified"] if diff["modified"] else [])
    if not (diff["added"] or diff["deleted"] or diff["modified"]):
        print("  无差异")
    else:
        # 如果有差异且指定了输出文件，生成 diff 文件
        if output_diff:
            diff_text = generate_unified_diff(workspace, node_name, "current", diff, is_snapshot=True)
            output_diff.parent.mkdir(parents=True, exist_ok=True)
            with open(output_diff, "w", encoding="utf-8") as f:
                f.write(diff_text)
            print(f"差异已保存到: {output_diff}")

def safe_is_subpath(parent: Path, child: Path) -> True:
    try:
        child.relative_to(parent)
        return True
    except Exception:
        return False

def iter_workspace_files(workspace: Path) -> List[Path]:
    """列出工作区所有实际文件（排除 .checkpoints 与忽略规则）"""
    files = []
    for root, dirs, fnames in os.walk(workspace):
        root_path = Path(root)
        # 规避 .checkpoints
        dirs[:] = [d for d in dirs if (root_path / d).name != CHECKPOINTS_DIR]
        # 应用 ckpignore 规则
        keep_dirs = []
        for d in dirs:
            dpath = root_path / d
            if should_ignore(workspace, dpath, is_dir=True):
                continue
            keep_dirs.append(d)
        dirs[:] = keep_dirs
        for fn in fnames:
            fpath = root_path / fn
            if should_ignore(workspace, fpath, is_dir=False):
                continue
            files.append(fpath)
    return files

def remove_extra_workspace_files(workspace: Path, keep_rel_paths: set[str]):
    """删除工作区中不在 keep_rel_paths 集合里的文件（不含 .checkpoints 和忽略项）"""
    for f in iter_workspace_files(workspace):
        rel = f.relative_to(workspace).as_posix()
        if rel not in keep_rel_paths:
            try:
                f.unlink()
            except IsADirectoryError:
                # 忽略目录，这里只处理文件；空目录清理在末尾可选
                pass
            except Exception:
                pass

def ensure_parent(dst: Path):
    dst.parent.mkdir(parents=True, exist_ok=True)

def copy_file(src: Path, dst: Path):
    ensure_parent(dst)
    shutil.copy2(src, dst)

def checkout_node(workspace: Path, node_name: str):
    """
    将节点 node_name 的快照覆盖当前工作区：
    - 工作区额外存在但节点中没有的文件会被删除
    - 节点中记录的文件会被写入（优先使用节点目录中的文件，否则使用当前工作区现有文件作为 unchanged 的回退）
    强烈建议在执行前先 --stash 保存。
    """
    # 载入节点信息（归一为工作区相对路径）
    infos = get_node_file_infos(workspace, node_name)
    if not infos:
        print(f"节点不存在或无文件信息: {node_name}")
        return
    node_dir = get_node_dir(workspace, node_name)

    # 1) 删除多余文件（以节点 files.json 的 key 为保留集）
    keep_set = set(infos.keys())
    remove_extra_workspace_files(workspace, keep_set)

    # 2) 写入节点中的所有文件
    for rel in keep_set:
        src_from_node = node_dir / rel
        dst_in_ws = workspace / rel
        if src_from_node.exists():
            copy_file(src_from_node, dst_in_ws)
        else:
            # unchanged 未保存到节点目录，回退用当前文件（若不存在，则跳过）
            ws_src = workspace / rel
            if not ws_src.exists():
                # 节点未保存该文件且当前也无该文件，无法恢复该条 —— 跳过
                pass
    print(f"已将节点 {node_name} 状态 checkout 到工作区。")

def stash_dir(workspace: Path) -> Path:
    return workspace / CHECKPOINTS_DIR / "stash"

def clear_dir(path: Path):
    if path.exists():
        shutil.rmtree(path)
    path.mkdir(parents=True, exist_ok=True)

def stash_save(workspace: Path):
    """
    删除 .checkpoints/stash 中原有内容，复制当前 workspace 文件（排除 .checkpoints 与忽略项）到 stash。
    """
    load_ckpignore(workspace)  # 确保忽略规则生效
    sdir = stash_dir(workspace)
    clear_dir(sdir)
    for f in iter_workspace_files(workspace):
        rel = f.relative_to(workspace)
        dst = sdir / rel
        copy_file(f, dst)
    print(f"已保存当前工作区到 {sdir}")

def stash_pop(workspace: Path):
    """
    用 stash 内容覆盖 workspace：
    - 若 .checkpoints/stash 不存在：命令无效，直接返回（不修改工作区）
    - 存在：先清空工作区（排除 .checkpoints 与忽略项），再把 stash 全部文件复制回 workspace，
      最后删除整个 .checkpoints/stash 目录（一次性 pop）
    """
    sdir = stash_dir(workspace)
    if not sdir.exists():
        print("无可恢复的 stash。")
        return

    load_ckpignore(workspace)

    # 1) 清空工作区文件（不含 .checkpoints 与忽略项）
    for f in iter_workspace_files(workspace):
        try:
            f.unlink()
        except Exception:
            pass

    # 2) 复制 stash -> workspace
    for root, dirs, files in os.walk(sdir):
        root_path = Path(root)
        for fn in files:
            src = root_path / fn
            rel = src.relative_to(sdir)
            dst = workspace / rel
            copy_file(src, dst)

    # 3) 删除整个 stash 目录（一次性）
    try:
        shutil.rmtree(sdir)
    except Exception:
        # 容忍非致命错误
        pass

    print("已从 stash 恢复到工作区并删除 stash。")

def print_diff(title: str, diff: Dict[str, List[str]]):
    print(title)
    has_changes = False
    if diff["added"]:
        has_changes = True
        print("  新增:", *diff["added"])
    if diff["deleted"]:
        has_changes = True
        print("  删除:", *diff["deleted"])
    if diff["modified"]:
        has_changes = True
        print("  修改:", *diff["modified"])
    if not has_changes:
        print("  无差异")
    return has_changes

def status_workspace(workspace: Path) -> int:
    """
    显示当前工作区与最近基线(files.json)的差异。
    返回码：有差异返回 1，无差异返回 0。
    """
    # 确保忽略规则生效
    load_ckpignore(workspace)

    # 最近基线：.checkpoints/.cpk_files（通过 get_workspace_file_infos 获取）
    baseline = get_workspace_file_infos(workspace)  # Dict[str, Any]
    # 当前扫描：实际 workspace 文件状态
    scanned = scan_workspace_file_infos(workspace)       # Dict[str, Any]

    # 比较：baseline -> scanned
    diff = compare_file_infos(baseline, scanned)

    # 打印结果
    changed = print_diff("当前工作区 vs 最近基线", diff)
    # 返回码：有变化为 1，无变化为 0
    return 1 if changed else 0

def parse_args():
    parser = argparse.ArgumentParser(description="A simple checkpoint-like version management tool for workspace.")
    parser.add_argument("workspace", nargs="?", help="Workspace path (optional, defaults to current directory if .checkpoints exists)")

    # -a/--add: 不定长参数数组（1或2）。示例：-a newnode "desc text"
    parser.add_argument("-a", "--add", nargs="+", help="Create node: name [desc]")

    # -r/--remove: 字符串节点名。示例：-r oldnode
    parser.add_argument("-r", "--remove", help="Remove node by name")

    # -c/--compare: 不定长参数数组（1或2）。
    # 1参数：与当前环境比较；2参数：两个节点互比。示例：-c base 或 -c base new
    parser.add_argument("-c", "--compare", nargs="+", help="Compare: node [other_node] or node vs current snapshot")

    # -n/--node: 仅与 -o/--output 搭配使用
    parser.add_argument("-n", "--node", help="Node name (used with -o/--output)")

    # -o/--output: 导出目录
    parser.add_argument("-o", "--output", help="Export node to output directory")

    # -l/--list: 列表
    parser.add_argument("-l", "--list", action="store_true", help="List nodes")

    # 新增：checkout 指定节点覆盖当前工作区
    parser.add_argument("--checkout", help="Checkout node to overwrite current workspace")

    # 新增：stash 保存或恢复（pop）
    # 无参数等价于 "save"；可显式传入 "pop" 执行恢复
    parser.add_argument("--stash", nargs="?", const="save", choices=["save", "pop"],
                        help="Stash current workspace to .checkpoints/stash (default), or pop to restore")

    # 新增：--status 显示当前工作区与最近基线差异
    parser.add_argument("--status", action="store_true",
                        help="Show diff between current workspace and latest baseline")

    # 新增：--diff-output 将比较结果保存为 diff 文件（与 -c/--compare 配合使用）
    parser.add_argument("--diff-output", help="Save comparison diff to file (used with -c/--compare)")

    return parser.parse_args()

def main():
    args = parse_args()
    
    # 如果未提供 workspace 参数，检查当前目录是否有 .checkpoints 目录
    if args.workspace is None:
        current_dir = Path.cwd()
        checkpoints_dir = current_dir / CHECKPOINTS_DIR
        if checkpoints_dir.exists() and checkpoints_dir.is_dir():
            workspace = current_dir.resolve()
        else:
            print(f"错误：未指定工作目录，且当前目录下不存在 {CHECKPOINTS_DIR} 目录")
            print("请指定工作目录，例如：ckp.py /path/to/workspace [其他参数]")
            sys.exit(1)
    else:
        workspace = Path(args.workspace).resolve()
        if not workspace.exists():
            print(f"工作目录不存在: {workspace}")
            sys.exit(1)

    ensure_initialized(workspace)
    # 每次运行时加载一次忽略规则，允许动态生效
    load_ckpignore(workspace)

    # 列表
    if args.list:
        list_nodes(workspace)
        return

    # 添加节点：-a name [desc]
    if args.add is not None:
        if len(args.add) < 1 or len(args.add) > 2:
            print("用法错误：-a/--add 需要 1 或 2 个参数：name [desc]")
            sys.exit(2)
        name = args.add[0]
        desc = args.add[1] if len(args.add) == 2 else ""
        add_node(workspace, name, desc)
        return

    # 删除节点：-r name
    if args.remove is not None:
        node_to_remove = args.remove
        if not node_to_remove:
            print("删除节点需要提供名称，如：-r nodeName")
            sys.exit(2)
        remove_node(workspace, node_to_remove)
        return

    # 比较：
    # -c node           -> node vs 当前环境（先保存快照到 .cpk_files，再比较）
    # -c node other     -> node vs other
    if args.compare:
        diff_output = Path(args.diff_output).resolve() if args.diff_output else None
        if len(args.compare) == 1:
            left = args.compare[0]
            save_current_snapshot(workspace)  # 刷新快照
            compare_with_snapshot(workspace, left, output_diff=diff_output)
            return
        elif len(args.compare) == 2:
            left, right = args.compare
            compare_nodes(workspace, left, right, output_diff=diff_output)
            return
        else:
            print("用法错误：-c/--compare 仅支持 1 或 2 个参数：node [other_node]")
            sys.exit(2)

    # 导出：需要 -n 指定节点名，-o 指定输出目录
    if args.output:
        if not args.node:
            print("导出需要提供 --node 名称，例如：-n nodeName -o /path/to/output")
            sys.exit(2)
        export_node(workspace, args.node, Path(args.output).resolve())
        return

    # checkout 节点 -> 覆盖当前工作区
    if args.checkout:
        # 建议执行前先 --stash 保存
        checkout_node(workspace, args.checkout)
        return

    # stash
    if args.stash:
        if args.stash == "save":
            stash_save(workspace)
        elif args.stash == "pop":
            stash_pop(workspace)
        return

    # status
    if args.status:
        status_workspace(workspace)
        return

    print("已确保初始化完成。如需操作请使用 -a/-r/-c/-o/-l/--checkout/--stash 等参数。")

if __name__ == "__main__":
    main()
