# -*- coding: utf-8 -*-
"""
gather_analysis_results.py — 递归聚合 analysis_result.txt → CSV

用法：
  python gather_analysis_results.py --root . --output all_analysis.csv

参数：
  --root    根目录（默认：当前目录）
  --name    结果文件名（默认：analysis_result.txt）
  --output  聚合后的CSV路径（默认：analysis_aggregate.csv）
"""
import os
import re
import csv
import argparse
from datetime import datetime
from typing import Dict, Any, Tuple

# --- 工具 ---
def _try_read_text(path: str) -> str:
    # 尝试常见编码
    encodings = ["utf-8", "utf-8-sig", "gbk", "gb2312", "latin-1"]
    for enc in encodings:
        try:
            with open(path, "r", encoding=enc, errors="strict") as f:
                return f.read()
        except Exception:
            continue
    # 最后兜底
    with open(path, "r", encoding="utf-8", errors="ignore") as f:
        return f.read()

def _parse_kv_pairs(s: str) -> Dict[str, str]:
    """
    解析形如：'stable_sec=0.5, rel_tol=0.05, abs_tol=1.0'
    也兼容换行或多余空格。
    """
    out = {}
    # 将分隔统一为逗号
    for chunk in re.split(r"[,\uFF0C]+", s):
        if "=" in chunk:
            k, v = chunk.split("=", 1)
            out[k.strip()] = v.strip()
    return out

def _to_float(x) -> float:
    try:
        return float(x)
    except Exception:
        return None

def _parse_file(txt: str) -> Dict[str, Any]:
    """
    针对 analysis_result.txt 的常见行做鲁棒解析。
    允许字段缺失，缺失则留空(None)。
    """
    data = {
        "converged": None,
        "convergence_time_s": None,
        "index_at_converged": None,
        "final_error_norm": None,
        "final_error_x": None,
        "final_error_y": None,
        "final_error_z": None,
        "frame_start": None,
        "frame_end": None,
        "stable_sec": None,
        "rel_tol": None,
        "abs_tol": None,
        "start_mode": None,
        "start_idx": None,
    }
    # 行级解析（大小写不敏感）
    lines = txt.splitlines()
    for ln in lines:
        raw = ln.strip()
        low = raw.lower()

        # Convergence judge: stable_sec=0.5, rel_tol=0.05, abs_tol=1.0
        m = re.search(r"^convergence\s+judge\s*:\s*(.+)$", low, flags=re.I)
        if m:
            kv = _parse_kv_pairs(m.group(1))
            if "stable_sec" in kv: data["stable_sec"] = _to_float(kv["stable_sec"])
            if "rel_tol"   in kv: data["rel_tol"]   = _to_float(kv["rel_tol"])
            if "abs_tol"   in kv: data["abs_tol"]   = _to_float(kv["abs_tol"])

        # 某些文件把 start_mode/start_idx 单独放一行：start_mode=bend, start_idx=40
        if "start_mode" in low or "start_idx" in low:
            kv = _parse_kv_pairs(raw)
            if "start_mode" in kv: data["start_mode"] = kv["start_mode"]
            if "start_idx"  in kv: data["start_idx"]  = int(re.findall(r"\d+", kv["start_idx"])[0]) if re.findall(r"\d+", kv.get("start_idx","")) else None

        # Converged: YES/NO
        m = re.search(r"^converged\s*:\s*([A-Za-z]+)", low, flags=re.I)
        if m:
            data["converged"] = m.group(1).upper()

        # convergence_time(s): 4.233333  (from motion start)
        m = re.search(r"^convergence_time\(?s\)?\s*:\s*([\-0-9\.Ee]+)", low, flags=re.I)
        if m:
            data["convergence_time_s"] = _to_float(m.group(1))

        # index_at_converged : 421
        m = re.search(r"^index_at_converged\s*:\s*(\d+)", low, flags=re.I)
        if m:
            data["index_at_converged"] = int(m.group(1))

        # final_error_norm   : 42.994844
        m = re.search(r"^final_error_norm\s*:\s*([\-0-9\.Ee]+)", low, flags=re.I)
        if m:
            data["final_error_norm"] = _to_float(m.group(1))

        # final_error_xyz    : 22.008591, -30.506775, 20.821029
        m = re.search(r"^final_error_xyz\s*:\s*([^\n]+)$", low, flags=re.I)
        if m:
            triple = m.group(1)
            # 取前三个数字
            nums = re.findall(r"[\-+]?\d+(?:\.\d+)?", triple)
            if len(nums) >= 3:
                data["final_error_x"] = _to_float(nums[0])
                data["final_error_y"] = _to_float(nums[1])
                data["final_error_z"] = _to_float(nums[2])

        # frame-start / frame-end
        m = re.search(r"^frame-start\s*:\s*([\-0-9\.Ee]+)", low, flags=re.I)
        if m:
            data["frame_start"] = _to_float(m.group(1))
        m = re.search(r"^frame-end\s*:\s*([\-0-9\.Ee]+)", low, flags=re.I)
        if m:
            data["frame_end"] = _to_float(m.group(1))

    # 兜底：从全文再捞 start_mode/start_idx（以防它们不在单独行）
    if data["start_mode"] is None:
        m = re.search(r"start_mode\s*=\s*([A-Za-z0-9_\-+]+)", txt, flags=re.I)
        if m: data["start_mode"] = m.group(1)
    if data["start_idx"] is None:
        m = re.search(r"start_idx\s*=\s*(\d+)", txt, flags=re.I)
        if m: data["start_idx"] = int(m.group(1))

    return data

def walk_and_collect(root: str, filename: str):
    for dirpath, dirnames, filenames in os.walk(root):
        for fn in filenames:
            if fn.lower() == filename.lower():
                yield os.path.join(dirpath, fn)

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--root", default=".", help="起始根目录")
    ap.add_argument("--name", default="analysis_result.txt", help="要聚合的文件名")
    ap.add_argument("--output", default="analysis_aggregate.csv", help="输出CSV路径")
    args = ap.parse_args()

    rows = []
    for path in walk_and_collect(args.root, args.name):
        try:
            txt = _try_read_text(path)
            parsed = _parse_file(txt)
        except Exception as e:
            parsed = {k: None for k in [
                "converged","convergence_time_s","index_at_converged","final_error_norm",
                "final_error_x","final_error_y","final_error_z","frame_start","frame_end",
                "stable_sec","rel_tol","abs_tol","start_mode","start_idx"
            ]}
            parsed["error"] = str(e)

        stat = os.stat(path)
        mtime = datetime.fromtimestamp(stat.st_mtime).isoformat(timespec="seconds")
        rows.append({
            "file_path": os.path.abspath(path),
            "dir_name": os.path.abspath(os.path.dirname(path)),
            "file_mtime": mtime,
            **parsed
        })

    # 列顺序
    fieldnames = [
        "file_path","dir_name","file_mtime",
        "converged","convergence_time_s","index_at_converged",
        "final_error_norm","final_error_x","final_error_y","final_error_z",
        "frame_start","frame_end","stable_sec","rel_tol","abs_tol",
        "start_mode","start_idx"
    ]

    # 写CSV
    with open(args.output, "w", newline="", encoding="utf-8") as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        for r in rows:
            writer.writerow(r)

    print(f"Collected {len(rows)} file(s). Saved to: {args.output}")

if __name__ == "__main__":
    main()
