#!/usr/bin/env python3
"""
GCDBenchmark Trace Parser
Author: Claude Code
Date: 2025-10-28

解析xctrace导出的trace数据，提取性能指标并生成CSV输出。

支持的指标：
- benchmark_case: 测试场景名
- parameters: 场景参数
- sched_delay: 任务提交到执行的时延
- worker_count: GCD线程总数
- wakeup_count: 线程唤醒次数
- wakeup_per_execute: 每个任务的唤醒次数
- first_delay: 第一个任务的延迟
- max_delay: 最大任务延迟
- total_cost: 从第一个cnt_delay到test_end的总时间差(微秒)
- avg_cost: 平均每个任务的耗时(微秒)
"""

import argparse
import locale
import os

# 强制设置C locale以确保跨平台数字解析一致性
try:
    locale.setlocale(locale.LC_ALL, 'C')
except locale.Error:
    try:
        locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
    except locale.Error:
        # 如果都失败，使用系统默认
        pass
import csv
import os
import re
import subprocess
import sys
import xml.etree.ElementTree as ET
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any
import concurrent.futures
import threading


class TraceParser:
    """GCDBenchmark Trace数据解析器"""

    def __init__(self, trace_file: str, output_dir: str = ".", instance_id: str = None):
        self.trace_file = Path(trace_file)
        self.output_dir = Path(output_dir)

        # 使用trace文件名作为唯一的临时目录名
        trace_name = self.trace_file.name.replace('.trace', '').replace('/', '_').replace('\\', '_')
        self.temp_dir = self.output_dir / "temp_xml" / f"trace_{trace_name}"

        # 创建临时目录
        self.temp_dir.mkdir(parents=True, exist_ok=True)

        # 存储解析后的数据
        self.signpost_data = []
        self.thread_state_data = []
        self.gcd_data = []

        # test_start时间戳（用于过滤）
        self.test_start_time = None

        # 存储run信息
        self.runs = []

        # 线程锁用于线程安全的打印
        self.print_lock = threading.Lock()

    def _safe_print(self, message: str) -> None:
        """线程安全的打印函数"""
        with self.print_lock:
            print(message)

    def _export_xctrace_with_retry(self, export_name: str, xpath_query: str, output_file: Path,
                                 max_retries: int = 10, expected_min_size: int = 1000, timeout: int = 30) -> bool:
        """通用的xctrace导出重试机制"""
        import time

        for attempt in range(max_retries):
            try:
                cmd = [
                    "xcrun", "xctrace", "export",
                    "--input", str(self.trace_file),
                    "--xpath", xpath_query,
                    "--output", str(output_file)
                ]
                result = subprocess.run(cmd, capture_output=True, text=True, timeout=timeout, encoding='utf-8', errors='replace')

                # 验证导出结果
                if self._validate_export_result(output_file, expected_min_size=expected_min_size):
                    size = output_file.stat().st_size
                    print(f"    ✓ {export_name} → {output_file} ({size:,} bytes)")
                    return True  # 成功，退出重试循环

                # 导出失败或数据无效
                if attempt == max_retries - 1:
                    # 最后一次尝试失败
                    if output_file.exists():
                        size = output_file.stat().st_size
                        if size == 0:
                            print(f"    ✗ {export_name} (no data)")
                        else:
                            print(f"    ✗ {export_name} (invalid data: {size} bytes)")
                    else:
                        print(f"    ✗ {export_name} (file not created)")

                    if result.stderr:
                        print(f"      Error: {result.stderr}")
                else:
                    # 非最后一次尝试，准备重试
                    print(f"    ⚠️ {export_name}: Attempt {attempt + 1} failed, retrying...")
                    if output_file.exists():
                        try:
                            output_file.unlink()  # 删除失败的文件
                        except:
                            pass
                    time.sleep(1)  # 等待1秒后重试

            except subprocess.TimeoutExpired:
                if attempt == max_retries - 1:
                    print(f"    ✗ {export_name} (timeout)")
                else:
                    print(f"    ⚠️ {export_name}: Attempt {attempt + 1} timeout, retrying...")
                    time.sleep(timeout // 10)  # 超时后等待更长时间
            except Exception as e:
                if attempt == max_retries - 1:
                    print(f"    ✗ {export_name} (exception: {e})")
                else:
                    print(f"    ⚠️ {export_name}: Attempt {attempt + 1} exception: {e}, retrying...")
                    time.sleep(1)

        return False  # 所有重试都失败

    def _validate_export_result(self, xml_file: Path, expected_min_size: int = 1000) -> bool:
        """验证导出结果是否有效"""
        if not xml_file.exists():
            return False

        size = xml_file.stat().st_size
        if size < expected_min_size:
            return False

        # 检查XML格式和内容
        try:
            tree = ET.parse(xml_file)
            root = tree.getroot()
            # 检查是否有实际的数据行
            return len(root.findall(".//row")) > 0 or len(root.findall(".//event")) > 0
        except ET.ParseError:
            return False

    def _export_signpost_with_retry(self, run_number: int, signpost_file: Path) -> None:
        """导出signpost数据，带重试机制"""
        xpath_query = f"/trace-toc//run[@number='{run_number}']//table[@schema='os-signpost']"
        export_name = f"Run {run_number} os-signpost"

        # signpost数据可能很少，所以期望最小值设为100字节
        success = self._export_xctrace_with_retry(
            export_name=export_name,
            xpath_query=xpath_query,
            output_file=signpost_file,
            max_retries=10,
            expected_min_size=100,  # signpost数据可能很少
            timeout=30
        )

        # 无论成功失败都继续，因为后续代码会检查文件是否存在

    def export_trace(self) -> bool:
        """导出trace数据到XML文件"""
        print(f"Exporting trace data from: {self.trace_file}")
        print(f"Output directory: {self.temp_dir}")

        # 首先导出目录结构以了解run信息
        toc_file = self.temp_dir / "toc.xml"
        try:
            # 直接将TOC内容输出到文件
            cmd = [
                "xcrun", "xctrace", "export",
                "--input", str(self.trace_file),
                "--toc"
            ]

            print(f"Exporting TOC...")

            # 添加重试机制
            max_retries = 10
            retry_delay = 2  # 秒

            for attempt in range(max_retries):
                result = subprocess.run(cmd, capture_output=True, text=True, encoding='utf-8', errors='replace')

                if result.returncode == 0 and result.stdout:
                    # 写入文件
                    with open(toc_file, 'w', encoding='utf-8') as f:
                        f.write(result.stdout)
                    print(f"✓ TOC exported successfully ({len(result.stdout)} chars)")
                    break
                else:
                    print(f"✗ TOC export failed (attempt {attempt + 1}/{max_retries}) with return code: {result.returncode}")
                    if result.stderr:
                        print(f"stderr: {result.stderr}")

                    if attempt < max_retries - 1:
                        print(f"Retrying in {retry_delay} seconds...")
                        import time
                        time.sleep(retry_delay)
                        retry_delay *= 2  # 指数退避
                    else:
                        print("All retries failed, trying to parse with default run structure...")
                        self._create_default_runs()
                        return True

            if toc_file.exists():
                self._parse_run_info(toc_file)
                print(f"  ✓ Found {len(self.runs)} runs in trace file")
            else:
                print(f"  ✗ Failed to export table of contents")
                return False
        except Exception as e:
            print(f"  ✗ Exception exporting TOC: {e}")
            return False

        # 导出每个run的单独signpost数据
        print("Exporting signpost data per run...")
        for run in self.runs:
            run_number = run["number"]
            signpost_file = self.temp_dir / f"os-signpost-run{run_number}.xml"
            run["signpost_file"] = signpost_file

            print(f"  Exporting os-signpost for run {run_number}...")
            self._export_signpost_with_retry(run_number, signpost_file)

        # 导出共享的thread-state数据（所有run共享）
        print("Exporting shared data (thread-state)...")

        # 导出thread-state数据
        thread_file = self.temp_dir / "thread-state.xml"
        print(f"  Exporting thread-state...")
        xpath_query = f"/trace-toc//table[@schema='thread-state']"

        # thread-state数据通常很大，设置更长的超时时间和更小的期望最小值
        success = self._export_xctrace_with_retry(
            export_name="thread-state",
            xpath_query=xpath_query,
            output_file=thread_file,
            max_retries=15,  # 增加到15次重试
            expected_min_size=1000,  # thread-state数据通常很大
            timeout=180  # 更长的超时时间
        )

        # 无论成功失败，都为所有run设置thread_state_file
        for run in self.runs:
            run["thread_state_file"] = thread_file

        return True

    def _parse_run_info(self, toc_file: Path) -> None:
        """解析trace文件中的run信息"""
        try:
            if not toc_file.exists():
                print(f"Error: TOC file does not exist: {toc_file}")
                return

            if toc_file.stat().st_size == 0:
                print(f"Error: TOC file is empty: {toc_file}")
                return

            tree = ET.parse(toc_file)
            root = tree.getroot()

            for run_elem in root.findall(".//run"):
                run_number = run_elem.get("number")

                # 提取进程参数
                process_elem = run_elem.find(".//process")
                arguments = process_elem.get("arguments", "") if process_elem is not None else ""

                # 解析测试用例名和parameters（run名称）
                test_case, run_name = self._parse_test_arguments(arguments)

                # 提取时间信息
                summary_elem = run_elem.find(".//summary")
                start_date_str = summary_elem.find(".//start-date").get("fmt", "") if summary_elem is not None else ""
                end_date_str = summary_elem.find(".//end-date").get("fmt", "") if summary_elem is not None else ""

                # 解析时间戳 (转换为纳秒)
                start_timestamp = self._parse_iso_date(start_date_str)
                end_timestamp = self._parse_iso_date(end_date_str)

                run_info = {
                    "number": run_number,
                    "test_case": test_case,
                    "parameters": run_name,  # 直接使用从参数中读取的run名称
                    "arguments": arguments,
                    "start_timestamp": start_timestamp,
                    "end_timestamp": end_timestamp,
                    "signpost_file": None,
                    "thread_state_file": None,
                    "gcd_file": None,
                    "signpost_data": [],
                    "thread_state_data": [],
                    "gcd_data": [],
                    "test_start_time": None
                }

                self.runs.append(run_info)
                print(f"Run {run_number}: {run_name}")

        except ET.ParseError as e:
            print(f"XML ParseError in TOC file: {e}")
            print(f"TOC file size: {toc_file.stat().st_size if toc_file.exists() else 'file not found'}")
        except Exception as e:
            print(f"Error parsing run info: {e}")

    def _parse_iso_date(self, date_str: str) -> int:
        """解析ISO日期字符串为纳秒时间戳"""
        if not date_str:
            return 0

        try:
            # 格式: "2025-10-28T16:40:09.940+08:00"
            from datetime import datetime
            import pytz

            # 移除时区信息并解析
            if '+' in date_str:
                date_part = date_str.split('+')[0]
                tz_offset = int(date_str.split('+')[1].split(':')[0]) * 3600000000  # 转换为纳秒
            else:
                date_part = date_str
                tz_offset = 0

            # 解析日期时间
            dt = datetime.strptime(date_part, "%Y-%m-%dT%H:%M:%S.%f")

            # 转换为纳秒时间戳 (以某个基准时间开始，这里用相对时间)
            # 简化处理：只返回微秒部分
            return int(dt.timestamp() * 1_000_000_000) + tz_offset * 1_000_000
        except:
            return 0

    def _create_default_runs(self) -> None:
        """创建默认的run结构（用于TOC解析失败时）"""
        print("Creating default run structure...")

        # 根据我们已知的数据创建8个run
        default_runs = [
            ("fork_join", "task_count=512_task_duration=10us_qos-class=2"),
            ("fibonacci", "depth=10_task_duration=10us_qos-class=2"),
            ("airaw", "buffer_count=1_slice_count=512_task_duration=200us_qos-class=2"),
            ("serial_queue", "task_count=512_submit-interval=100us_task_duration=10us_qos-class=2"),
            ("concurrent_queue", "max-concurrency=2_submit-interval=100us_task_count=512_task_duration=1000us_qos-class=2"),
            ("periodic", "task_type=0_task_duration=100us_qos-class=2"),
            ("playback", "playback-file=camera_video_data.txt_qos-class=2"),
            ("ariaw_semaphore", "readers=3_writers=3_task_duration=100us_qos-class=2")
        ]

        for i, (test_case, parameters) in enumerate(default_runs, 1):
            run_info = {
                "number": str(i),
                "test_case": test_case,
                "parameters": parameters,
                "arguments": f"--auto-test {test_case} --{parameters.replace('_', '--').replace('=', ' ')}",
                "signpost_file": None,
                "thread_state_file": None,
                "gcd_file": None,
                "signpost_data": [],
                "thread_state_data": [],
                "gcd_data": [],
                "test_start_time": None
            }
            self.runs.append(run_info)

        print(f"Created {len(self.runs)} default runs")

    def _parse_test_arguments(self, arguments: str) -> Tuple[str, str]:
        """从进程参数中解析测试用例名和parameters（run名称）"""
        if not arguments or "--auto-test" not in arguments:
            return "unknown", "unknown"

        # 解析参数 - 处理可能的HTML实体编码
        try:
            # 如果参数中包含HTML实体，先解码
            import html
            arguments = html.unescape(arguments)
        except:
            pass  # 解码失败时使用原始字符串

        # 修复引号问题：HTML解码后可能出现错误的引号位置
        # 将 --parameters"value" 格式修复为 --parameters "value"
        arguments = re.sub(r'--parameters"([^"]+)"', r'--parameters "\1"', arguments)

        # 解析参数
        parts = arguments.split()
        test_case_idx = parts.index("--auto-test") + 1
        if test_case_idx >= len(parts):
            return "unknown", "unknown"

        test_case = parts[test_case_idx]

        # 查找--parameters参数
        run_name = "default"
        for i, part in enumerate(parts):
            if part == "--parameters" and i + 1 < len(parts):
                run_name = parts[i + 1]
                # 移除可能存在的引号
                run_name = run_name.strip('"\'')
                break

        # 特殊处理：对于playback测试，从--playback-file参数中提取文件名
        if test_case == "playback" and run_name == "default":
            # 查找--playback-file参数
            for i, part in enumerate(parts):
                if part == "--playback-file" and i + 1 < len(parts):
                    playback_file = parts[i + 1].strip('"\'')
                    # 从完整路径中提取文件名（不含扩展名）
                    import os.path
                    filename = os.path.basename(playback_file)
                    if filename.endswith('.playback'):
                        run_name = filename[:-10]  # 移除.playback扩展名
                    break

        return test_case, run_name

    def parse_signpost_data(self) -> None:
        """解析os-signpost数据并按进程ID分离给各个run"""
        print("Parsing os-signpost data with process ID separation...")

        # 为每个run解析单独的signpost文件
        for run in self.runs:
            signpost_file = run.get("signpost_file")
            if not signpost_file or not signpost_file.exists() or signpost_file.stat().st_size == 0:
                print(f"  No signpost data found for run {run['number']}")
                continue

            print(f"  Parsing signpost data for run {run['number']} from: {signpost_file}")
            try:
                tree = ET.parse(signpost_file)
                root = tree.getroot()

                # Build thread mapping first - go through all thread definitions in the document
                thread_mapping = {}
                for thread_elem in root.findall(".//thread"):
                    thread_id = thread_elem.get("id")
                    if thread_id:
                        # Look for tid element within this thread definition
                        tid_elem = thread_elem.find(".//tid")
                        if tid_elem is not None and tid_elem.text:
                            try:
                                thread_mapping[thread_id] = int(tid_elem.text)
                            except ValueError:
                                # Fallback to extracting from fmt attribute
                                thread_fmt = thread_elem.get("fmt", "")
                                tid_match = re.search(r"0x([0-9a-f]+)", thread_fmt)
                                if tid_match:
                                    thread_mapping[thread_id] = int(tid_match.group(1), 16)

                # Build signpost name mapping to handle references
                signpost_mapping = {}
                for signpost_elem in root.findall(".//signpost-name"):
                    signpost_id = signpost_elem.get("id")
                    if signpost_id:
                        fmt = signpost_elem.get("fmt", "")
                        signpost_mapping[signpost_id] = fmt

                process_data = defaultdict(list)

                for row in root.findall(".//row"):
                    # 提取进程ID - 需要处理XML引用
                    process_elem = row.find(".//process")
                    if process_elem is None:
                        continue

                    # 检查是否有ref属性，如果有则需要找到引用的进程元素
                    process_ref = process_elem.get("ref")
                    if process_ref:
                        # 找到引用的进程定义
                        ref_process_elem = root.find(f".//process[@id='{process_ref}']")
                        if ref_process_elem is not None:
                            # 从引用的进程元素中获取PID
                            pid_elem = ref_process_elem.find(".//pid")
                            if pid_elem is not None:
                                pid = pid_elem.get("fmt", "")
                            else:
                                # 尝试从进程的fmt属性解析
                                process_str = ref_process_elem.get("fmt", "")
                                pid_match = re.search(r"pid: (\d+)", process_str)
                                pid = pid_match.group(1) if pid_match else None
                        else:
                            continue
                    else:
                        # 直接从process元素获取pid
                        pid_elem = process_elem.find(".//pid")
                        if pid_elem is not None:
                            pid = pid_elem.get("fmt", "")
                        else:
                            # 尝试从process的fmt属性中解析
                            process_str = process_elem.get("fmt", "")
                            pid_match = re.search(r"pid: (\d+)", process_str)
                            if pid_match:
                                pid = pid_match.group(1)
                            else:
                                continue

                    if not pid:
                        continue

                    # 提取时间戳
                    time_elem = row.find(".//event-time")
                    if time_elem is None:
                        continue

                    # 直接使用数字值而不是格式化字符串
                    time_str = time_elem.text
                    if not time_str:
                        # 如果没有text，尝试从fmt属性解析
                        time_str = time_elem.get("fmt", "")

                    timestamp = self._parse_timestamp(time_str)

                    # 提取线程信息
                    thread_elem = row.find(".//thread")
                    process_elem = row.find(".//process")
                    thread_id = 0

                    if thread_elem is not None:
                        # Check if this is a thread definition with inline data
                        tid_elem = thread_elem.find(".//tid")
                        if tid_elem is not None and tid_elem.text:
                            try:
                                thread_id = int(tid_elem.text)
                            except ValueError:
                                pass
                        else:
                            # This is a thread reference, look it up in our mapping
                            thread_ref = thread_elem.get("ref", "")
                            if thread_ref and thread_ref in thread_mapping:
                                thread_id = thread_mapping[thread_ref]
                            else:
                                # Fallback to extracting from fmt attribute
                                thread_fmt = thread_elem.get("fmt", "")
                                tid_match = re.search(r"0x([0-9a-f]+)", thread_fmt)
                                if tid_match:
                                    thread_id = int(tid_match.group(1), 16)

                    # 提取事件信息 - 既要检查metadata也要检查signpost-name引用
                    metadata_elem = row.find(".//os-log-metadata")
                    event_str = ""
                    is_cnt_delay_by_ref = False

                    # 检查 os-log-metadata 中的事件信息
                    if metadata_elem is not None:
                        event_str = metadata_elem.get("fmt", "")

                    # 检查 signpost-name 引用是否指向 cnt_delay
                    signpost_names = row.findall(".//signpost-name")
                    for signpost_name in signpost_names:
                        # 检查fmt属性
                        fmt = signpost_name.get("fmt", "")
                        if fmt == "cnt_delay":
                            is_cnt_delay_by_ref = True
                            break
                        # 检查引用
                        ref = signpost_name.get("ref", "")
                        if ref and ref in signpost_mapping and signpost_mapping[ref] == "cnt_delay":
                            is_cnt_delay_by_ref = True
                            break

                    if not event_str and not is_cnt_delay_by_ref:
                        continue

                    event_data = {
                        "timestamp": timestamp,
                        "event": "unknown",
                        "raw": event_str,
                        "process_id": pid,  # 添加PID信息
                        "thread_id": thread_id  # 添加线程ID信息
                    }

                    # 解析test_start事件
                    if "test_start" in event_str:
                        event_data["event"] = "test_start"

                    # 解析test_end事件
                    elif "test_end" in event_str:
                        end_match = re.search(r"test_end = ([\d,]+)", event_str)
                        if end_match:
                            end_us = int(end_match.group(1).replace(",", ""))
                            event_data["event"] = "test_end"
                            event_data["end_us"] = end_us
                            # 调试输出：显示原始字符串和解析结果
                            print(f"    🔍 test_end parsed: '{event_str}' → {end_us}")

                    # 解析cnt_delay事件 - 既要检查metadata也要检查signpost引用
                    elif "cnt_delay =" in event_str or is_cnt_delay_by_ref:
                        # 如果是通过引用识别的cnt_delay，需要从metadata中提取延迟值
                        if is_cnt_delay_by_ref and "cnt_delay =" not in event_str:
                            # 对于引用的cnt_delay事件，延迟值应该在metadata中
                            delay_match = re.search(r"(\d{1,3}(?:,\d{3})*(?:\.\d+)?)", event_str)
                            if delay_match:
                                delay_str = delay_match.group(1).replace(",", "")
                                try:
                                    delay_us = int(float(delay_str))
                                    event_data["event"] = "cnt_delay"
                                    event_data["delay_us"] = delay_us
                                except ValueError:
                                    # 如果无法解析，跳过这个事件
                                    event_data["event"] = "cnt_delay"
                                    event_data["delay_us"] = 0
                            else:
                                # 如果找不到延迟值，设置为0
                                event_data["event"] = "cnt_delay"
                                event_data["delay_us"] = 0
                        else:
                            # 正常的metadata中的cnt_delay解析
                            delay_match = re.search(r"cnt_delay = ([\d,]+)", event_str)
                            if delay_match:
                                delay_str = delay_match.group(1)
                                # 清理所有可能的千位分隔符和空格，确保跨平台一致性
                                delay_str = delay_str.replace(",", "").replace(" ", "").replace("，", "")
                                delay_us = int(delay_str)
                                event_data["event"] = "cnt_delay"
                                event_data["delay_us"] = delay_us

                    process_data[pid].append(event_data)

                # 分配数据给run
                for pid, events in process_data.items():
                    # 查找test_start事件
                    test_start_events = [e for e in events if e["event"] == "test_start"]
                    if test_start_events:
                        run["test_start_time"] = test_start_events[0]["timestamp"]
                        test_start_timestamp = test_start_events[0]["timestamp"]
                    else:
                        test_start_timestamp = 0

                    # 查找test_end事件
                    test_end_events = [e for e in events if e["event"] == "test_end"]
                    if test_end_events:
                        # 在test_end时间上加2000us缓冲，确保捕获完整的线程状态转换
                        test_end_timestamp = test_end_events[0]["timestamp"] + 2000 * 1000  # 2000us = 2000 * 1000ns
                        run["test_end_time"] = test_end_timestamp
                    else:
                        # 补救措施：如果没有test_end事件，使用最后一个signpost事件的时间
                        if events:
                            # 按时间戳排序，取最后一个事件
                            events_sorted = sorted(events, key=lambda x: x["timestamp"])
                            last_event = events_sorted[-1]
                            test_end_timestamp = last_event["timestamp"]
                            run["test_end_time"] = test_end_timestamp
                            print(f"    📋 Using fallback: test_end_time set to last event timestamp: {test_end_timestamp}")
                        else:
                            test_end_timestamp = 0

                    # 获取所有cnt_delay事件
                    cnt_delay_events = [e for e in events if e["event"] == "cnt_delay"]
                    run["signpost_data"] = cnt_delay_events

                    # 简化输出格式
                    print(f"  Run {run['number']}: PID {pid}: {len(test_start_events)} test_start({test_start_timestamp}), {len(test_end_events)} test_end({test_end_timestamp}), {len(cnt_delay_events)} cnt_delay events")

            except Exception as e:
                print(f"    Error parsing os-signpost for run {run['number']}: {e}")
                import traceback
                traceback.print_exc()

    def parse_thread_state_data(self) -> None:
        """解析thread-state数据并按进程ID分配给各个run（单线程版本）"""
        print("🔍 Parsing thread-state data per run (single-threaded)...")

        # 顺序处理每个run
        for run in self.runs:
            try:
                run_number, thread_data = self._process_run_thread_state_worker(run)
                # 找到对应的run并设置数据
                for r in self.runs:
                    if r["number"] == run_number:
                        r["thread_state_data"] = thread_data
                        break
            except Exception as e:
                self._safe_print(f"    ❌ Error processing run {run['number']}: {e}")
                # 设置空数据以避免后续错误
                run["thread_state_data"] = []

    def _parse_run_thread_state_file(self, thread_state_file: str, run: Dict[str, Any]) -> List[Dict[str, Any]]:
        """解析单个run的thread-state文件"""
        try:
            tree = ET.parse(thread_state_file)
            root = tree.getroot()

            # 第一步：构建全局状态映射表
            state_mapping = {}
            for state_elem in root.findall(".//thread-state[@id]"):
                state_id = state_elem.get("id", "")
                state_fmt = state_elem.get("fmt", "")
                if state_id and state_fmt:
                    state_mapping[state_id] = state_fmt

            # 第二步：构建线程映射表（解决thread ref问题）
            thread_mapping = {}
            for thread_elem in root.findall(".//thread[@id]"):
                thread_id = thread_elem.get("id", "")
                thread_fmt = thread_elem.get("fmt", "")
                if thread_id and thread_fmt:
                    thread_mapping[thread_id] = thread_fmt

            print(f"    📋 Built thread mapping: {len(thread_mapping)} threads")
            print(f"    📋 Built state mapping: {len(state_mapping)} states")

            # 从signpost数据中获取产生过cnt_delay事件的线程ID
            cnt_delay_thread_ids = set()
            signpost_data = run.get("signpost_data", [])
            for event in signpost_data:
                if event.get("event") == "cnt_delay" and event.get("thread_id", 0) > 0:
                    cnt_delay_thread_ids.add(event["thread_id"])

            thread_events = []
            run_pid = run.get("pid")

            for row in root.findall(".//row"):
                try:
                    # 提取时间戳
                    start_elem = row.find(".//start-time")
                    if start_elem is not None:
                        start_text = start_elem.text
                        start_us = int(start_text) if start_text else 0
                        start_ns = start_us * 1000  # 转换为纳秒
                    else:
                        continue

                    # 提取线程信息 - 支持线程引用
                    thread_elem = row.find(".//thread")
                    if thread_elem is not None:
                        thread_str = thread_elem.get("fmt", "")
                        # 如果没有fmt属性，尝试通过ref查找
                        if not thread_str:
                            thread_ref = thread_elem.get("ref", "")
                            if thread_ref:
                                thread_str = thread_mapping.get(thread_ref, "")
                    else:
                        continue

                    # 提取状态信�� - 使用状态映射表
                    state_elem = row.find(".//thread-state")
                    state_ref = ""
                    state_str = "Unknown"
                    if state_elem is not None:
                        state_ref = state_elem.get("ref", "")
                        if state_ref:
                            # 使用ref查找实际状态名称
                            state_str = state_mapping.get(state_ref, "Unknown")
                        else:
                            # 如果没有ref，检查是否有直接的fmt属性
                            state_str = state_elem.get("fmt", "Unknown")

                    # 提取narrative信息（用于调试）
                    narrative_elem = row.find(".//narrative")
                    narrative_str = narrative_elem.get("fmt", "") if narrative_elem is not None else ""

                    # 提取持续时间
                    duration_elem = row.find(".//duration")
                    duration_us = 0
                    if duration_elem is not None:
                        duration_str = duration_elem.get("fmt", "")
                        duration_us = self._parse_duration(duration_str)

                    # 提取线程ID
                    tid_match = re.search(r"0x([0-9a-f]+)", thread_str)
                    tid = int(tid_match.group(1), 16) if tid_match else 0

                    # 检查是否是产生过cnt_delay事件的线程
                    is_gcd_thread = tid in cnt_delay_thread_ids

                    # 调试输出：打印线程信息以诊断GCD线程识别问题
                    # if is_gcd_thread:
                    #     print(f"        🔍 Found GCD thread: {thread_str} (tid={tid})")
                    # elif len(thread_events) < 5:  # 只打印前几个非GCD线程作为示例
                    #     print(f"        ℹ️ Non-GCD thread: {thread_str} (tid={tid})")

                    thread_events.append({
                        "start_timestamp": start_ns,
                        "start_timestamp_us": start_us,
                        "duration_us": duration_us,
                        "state": state_str,
                        "state_ref": state_ref,  # 保留ref用于调试
                        "narrative": narrative_str,
                        "thread_id": tid,
                        "thread_desc": thread_str,
                        "is_gcd_thread": is_gcd_thread,
                        "raw": f"{thread_str} -> {state_str} (ref:{state_ref}) for {duration_str}"
                    })

                except Exception as e:
                    continue  # 忽略单个事件的解析错误

            # 过滤出GCD工作线程
            gcd_events = [e for e in thread_events if e['is_gcd_thread']]

            print(f"    Found {len(thread_events)} events, {len(gcd_events)} GCD events")

            return gcd_events

        except Exception as e:
            print(f"    Error parsing thread-state file: {e}")
            return []

    def _process_run_thread_state_worker(self, run: Dict[str, Any]) -> Tuple[int, List[Dict[str, Any]]]:
        """处理单个run的thread-state导出和解析（用于多线程）"""
        run_number = run["number"]
        self._safe_print(f"  📋 Processing run {run_number}...")

        # 导出当前run的thread-state数据
        thread_state_file = os.path.join(self.temp_dir, f"thread-state-run{run_number}.xml")
        xpath_query = f"//trace-toc[1]/run[{run_number}]//table[@schema='thread-state']"

        # 使用重试机制导出thread-state数据
        success = self._export_xctrace_with_retry(
            export_name=f"thread-state-run{run_number}",
            xpath_query=xpath_query,
            output_file=Path(thread_state_file),
            max_retries=15,  # 增加重试次数
            expected_min_size=500,  # 降低期望最小值
            timeout=180  # 增加超时时间
        )

        if not success:
            self._safe_print(f"    ⚠️ Run {run_number}: Failed to export thread-state data after retries")
            return run_number, []

        if not os.path.exists(thread_state_file) or os.path.getsize(thread_state_file) == 0:
            self._safe_print(f"    ⚠️ Run {run_number}: No thread-state data available")
            return run_number, []

        # 添加调试信息
        file_size = os.path.getsize(thread_state_file)
        if file_size < 1000:  # 只对小文件输出警告
            self._safe_print(f"    ⚠️ Run {run_number}: thread-state file is small: {file_size} bytes")

        # 解析当前run的thread-state数据
        thread_data = self._parse_run_thread_state_file(thread_state_file, run)

        # 清理临时文件
        try:
            os.remove(thread_state_file)
        except:
            pass

        return run_number, thread_data

    def _parse_timestamp(self, time_str: str) -> int:
        """解析时间戳字符串为纳秒"""
        # 格式: "00:01.172.918" -> 直接使用数字值
        if not time_str:
            return 0

        try:
            # 如果时间字符串包含数字值，直接提取
            # XML格式: <event-time fmt="00:01.270.312">1270312125</event-time>
            # 我们应该直接使用1270312125，而不是解析"00:01.270.312"
            return int(time_str)  # 如果传入的是数字字符串
        except ValueError:
            try:
                # 如果传入的是格式化时间字符串，则解析
                parts = time_str.split(":")
                minutes = int(parts[0])
                seconds_parts = parts[1].split(".")
                seconds = int(seconds_parts[0])
                microseconds = int(seconds_parts[1]) if len(seconds_parts) > 1 else 0

                # 转换为纳秒
                return (minutes * 60 + seconds) * 1_000_000_000 + microseconds * 1_000
            except:
                return 0

    def _parse_duration(self, duration_str: str) -> int:
        """解析持续时间字符串为微秒"""
        # 格式: "562.14 ms" -> 微秒
        if not duration_str:
            return 0

        try:
            duration_str = duration_str.strip()
            if "ms" in duration_str:
                return int(float(duration_str.replace("ms", "").strip()) * 1000)
            elif "µs" in duration_str or "us" in duration_str:
                return int(float(duration_str.replace("µs", "").replace("us", "").strip()))
            elif "s" in duration_str:
                return int(float(duration_str.replace("s", "").strip()) * 1_000_000)
            else:
                return int(float(duration_str))
        except:
            return 0

    def filter_data_by_test_start(self) -> None:
        """根据test_start时间戳过滤数据（每个run独立过滤）"""
        for run in self.runs:
            if run["test_start_time"] is None:
                print(f"Warning: Run {run['number']} has no test_start, using all data")
                continue

            print(f"Filtering run {run['number']} data by test_start timestamp: {run['test_start_time']}")

            # 过滤signpost数据
            original_count = len(run["signpost_data"])
            run["signpost_data"] = [
                item for item in run["signpost_data"]
                if item["timestamp"] >= run["test_start_time"]
            ]
            print(f"  Run {run['number']} cnt_delay events: {original_count} -> {len(run['signpost_data'])}")

            # 过滤thread-state数据
            original_count = len(run["thread_state_data"])
            run["thread_state_data"] = [
                item for item in run["thread_state_data"]
                if item["start_timestamp"] >= run["test_start_time"]
            ]
            print(f"  Run {run['number']} thread-state events: {original_count} -> {len(run['thread_state_data'])}")

    def calculate_metrics(self) -> List[Dict[str, Any]]:
        """计算每个run的性能指标"""
        print("Calculating performance metrics...")

        metrics = []

        for run in self.runs:
            signpost_data = run.get("signpost_data", [])

            # 获取延迟数据 - 如果没有signpost数据，使用默认值
            if signpost_data:
                delays = [item["delay_us"] for item in signpost_data]
                total_tasks = len(delays)
                print(f"  📊 Run {run['number']}: Found {total_tasks} delay events")
            else:
                delays = []
                total_tasks = 0
                print(f"  ⚠️  Run {run['number']}: No signpost data (possibly QOS=-1 scenario)")

            # 计算worker_count - 统计GCD线程数量
            worker_count = self._calculate_gcd_thread_count(run)
            if worker_count == 0:
                worker_count = 1  # 至少有主线程

            # 计算wakeup_count - 只统计GCD相关线程
            wakeup_count = self._calculate_wakeup_count_for_run(run)

            # 将wakeup_count存储到run中用于显示
            run["wakeup_count"] = wakeup_count

            # 计算任务总数
            total_tasks = len(delays) if delays else 0

            # 计算总耗时 (total_cost): 从第一个cnt_delay到test_end的时间差
            first_cnt_delay_time = None
            if signpost_data:
                # 找到第一个cnt_delay事件的时间戳
                first_cnt_delay_time = signpost_data[0]["timestamp"]
            test_end_time = run.get("test_end_time", 0)

            total_cost = 0
            if first_cnt_delay_time is not None and test_end_time > 0:
                total_cost = test_end_time - first_cnt_delay_time  # 纳秒
                total_cost = total_cost / 1000.0  # 转换为微秒

            # 计算平均耗时 (avg_cost): total_cost / 任务数量
            avg_cost = total_cost / total_tasks if total_tasks > 0 else 0.0

            # 计算延迟指标 - 如果没有数据，使用默认值
            if delays:
                avg_delay = self._calculate_avg_delay(delays)
                first_delay = delays[0]  # 第一个任务的延迟（按时间顺序）
                max_delay = max(delays)
            else:
                avg_delay = 0.0
                first_delay = 0
                max_delay = 0

            # 汇总指标
            # 从run的parameters中提取纯参数（去掉test_case前缀）
            parameters = run["parameters"]
            test_case = run["test_case"]  # 获取test_case

            # 简化：直接去掉test_case前缀即可，不需要任何转换
            if parameters.startswith(test_case + "_"):
                parameters = parameters[len(test_case) + 1:]  # 去掉 "test_case_" 前缀
            # 兼容性：如果还有冒号分隔的情况也处理
            elif parameters.startswith(test_case + ":"):
                parameters = parameters[len(test_case) + 1:]  # 去掉 "test_case:" 前缀

            metrics.append({
                "benchmark_case": run["test_case"],
                "instructions": 0,  # 新增instructions列，值固定为0
                "parameters": parameters,
                "sched_delay": avg_delay / 1000.0 if avg_delay > 0 else 0.0,  # Convert ns to μs
                "worker_count": worker_count,
                "wakeup_count": wakeup_count,
                "wakeup_per_execute": wakeup_count / total_tasks if total_tasks > 0 else 0,
                "first_delay": first_delay / 1000.0 if first_delay > 0 else 0.0,  # Convert ns to μs
                "max_delay": max_delay / 1000.0 if max_delay > 0 else 0.0,  # Convert ns to μs
                "total_cost": total_cost,  # 从第一个cnt_delay到test_end的时间差(微秒)
                "avg_cost": avg_cost,  # total_cost / 任务数量
                "total_tasks": total_tasks,
                "run_number": run["number"]
            })

        return metrics

    def generate_process_thread_analysis(self) -> str:
        """生成进程-线程关联分析报告"""
        print("🔍 Generating process-thread association analysis...")

        analysis_file = self.output_dir / "process_thread_analysis.txt"

        with open(analysis_file, 'w', encoding='utf-8') as f:
            f.write("# 进程-线程关联分析\n")
            f.write("# 格式: 进程信息\n")
            f.write("#       线程列表: tid:name (不带进程号)\n\n")

            total_gcd_threads = 0

            for i, run in enumerate(self.runs, 1):
                # 从signpost数据中提取进程ID
                process_ids = set()
                for event in run.get('signpost_data', []):
                    if 'raw' in event and isinstance(event['raw'], str):
                        pid_match = re.search(r'pid:(\d+)', event['raw'])
                        if pid_match:
                            process_ids.add(pid_match.group(1))

                pid_str = list(process_ids)[0] if process_ids else "unknown"
                f.write(f"进程 {pid_str} (运行 {run['number']}) - {run['test_case']}: {run['parameters']}\n")

                # 分析线程数据 - 只保留GCDBenchmark线程
                thread_events = run.get('thread_state_data', [])
                unique_threads = {}

                for event in thread_events:
                    # 只处理包含GCDBenchmark的线程
                    if not event.get('is_gcd_thread', False):
                        continue

                    tid = event['thread_id']
                    if tid not in unique_threads:
                        # 从线程描述中提取进程ID和线程名
                        thread_desc = event['thread_desc']
                        tid_match = re.search(r'pid:\s*(\d+)', thread_desc)
                        thread_pid = tid_match.group(1) if tid_match else "unknown"

                        # 提取线程名 - 格式: "GCDBenchmark 0xa7ea10" -> "GCDBenchmark"
                        # 或 "com.apple.uikit.eventfetch-thread 0xa7ea0d"
                        name_match = re.search(r'([^(]+)\s+0x[0-9a-f]+', thread_desc)
                        thread_name = name_match.group(1).strip() if name_match else thread_desc.split()[0]

                        unique_threads[tid] = {
                            'pid': thread_pid,
                            'tid': tid,
                            'name': thread_name
                        }

                total_gcd_threads += len(unique_threads)

                # 输出线程列表 - 简化格式，去掉进程号
                if unique_threads:
                    f.write("线程列表: ")
                    thread_list = []
                    for thread_info in unique_threads.values():
                        thread_list.append(f"{thread_info['tid']}:{thread_info['name']}")
                    f.write(", ".join(thread_list))
                    f.write(f" ({len(unique_threads)}个GCD线程)\n")
                else:
                    f.write("线程列表: 无GCDBenchmark相关线程\n")

                # 输出signpost统计
                signpost_events = run.get('signpost_data', [])
                if signpost_events:
                    delays = [e['delay_us'] for e in signpost_events]
                    f.write(f"延迟统计: {len(signpost_events)}个事件, 范围{min(delays)}-{max(delays)}µs, 平均{sum(delays)/len(delays):.1f}µs\n")
                else:
                    f.write("延迟统计: 无数据\n")

                f.write("\n")

            f.write(f"总结: {len(self.runs)}个进程, 总计{total_gcd_threads}个GCD线程\n")

        print(f"  ✓ Process-thread analysis saved to: {analysis_file}")
        return str(analysis_file)

    def _calculate_wakeup_count_for_run(self, run: Dict[str, Any]) -> int:
        """计算单个run的wakeup次数 - 统计产生过cnt_delay事件的线程的Idle状态数量"""
        thread_state_data = run["thread_state_data"]

        # 从signpost数据中获取所有产生过cnt_delay事件的线程ID
        signpost_data = run.get("signpost_data", [])
        cnt_delay_thread_ids = set()
        for event in signpost_data:
            if event.get("event") == "cnt_delay" and event.get("thread_id", 0) > 0:
                cnt_delay_thread_ids.add(event["thread_id"])

        # 统计这些线程的wakeup次数（Idle状态）
        wakeup_count = 0
        for item in thread_state_data:
            if item["thread_id"] in cnt_delay_thread_ids and item["state"] == "Idle":
                wakeup_count += 1

        return wakeup_count

    def _calculate_gcd_thread_count(self, run: Dict[str, Any]) -> int:
        """计算单个run的GCD线程数量 - 基于cnt_delay事件的线程ID"""
        # 从signpost数据中获取所有产生过cnt_delay事件的线程ID
        signpost_data = run.get("signpost_data", [])
        cnt_delay_thread_ids = set()

        for event in signpost_data:
            if event.get("event") == "cnt_delay" and event.get("thread_id", 0) > 0:
                cnt_delay_thread_ids.add(event["thread_id"])

        return len(cnt_delay_thread_ids)

    def _calculate_avg_delay(self, delays: List[int]) -> float:
        """计算平均延迟"""
        if not delays:
            return 0.0
        return sum(delays) / len(delays)

    def generate_csv(self, metrics: List[Dict[str, Any]], output_file: str = None) -> str:
        """生成CSV输出"""
        if output_file is None:
            # 创建唯一的CSV文件名，使用trace文件名
            trace_name = self.trace_file.name.replace('.trace', '').replace('/', '_').replace('\\', '_')
            temp_csv_dir = self.output_dir / "temp_csv"
            temp_csv_dir.mkdir(parents=True, exist_ok=True)
            output_file = temp_csv_dir / f"trace_metrics_{trace_name}.csv"

        print(f"Generating CSV: {output_file}")

        # CSV列顺序
        fieldnames = [
            "benchmark_case",
            "parameters",
            "instructions",
            "sched_delay",
            "worker_count",
            "wakeup_per_execute",
            "first_delay",
            "max_delay",
            "total_cost",  # 从第一个cnt_delay到test_end的时间差(微秒)
            "avg_cost"  # total_cost / 任务数量
        ]

        try:
            with open(output_file, 'w', newline='', encoding='utf-8') as csvfile:
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames, lineterminator='\n')
                writer.writeheader()

                for metric in metrics:
                    # 从文件名提取轮次信息
                    filename = os.path.basename(self.trace_file)
                    run_number = metric.get('run_number', 1)

                    # 获���原始parameters
                    original_parameters = metric.get('parameters', '')

                    # 直接使用原始parameters，不再添加ROUND信息（因为现在已经包含ROUND了）
                    original_parameters = metric.get('parameters', '')
                    if original_parameters:
                        metric['parameters'] = original_parameters
                    else:
                        metric['parameters'] = f"_ROUND{run_number}"

                    # 只输出需要的列
                    row = {field: metric.get(field, 0) for field in fieldnames}
                    writer.writerow(row)

                    # 输出CSV行信息到控制台，便于批量脚本提取
                    csv_values = [str(row.get(field, '')) for field in fieldnames]
                    csv_line = '"' + '","'.join(csv_values) + '"'
                    print(f"解析结果 - CSV行: {csv_line}")

            print(f"  CSV generated successfully with {len(metrics)} rows")
            return str(output_file)

        except Exception as e:
            print(f"  Error generating CSV: {e}")
            raise

    def _extract_round_from_filename(self, filename: str) -> str:
        """从文件名中提取轮次信息"""
        import re

        # 移除.trace扩展名
        basename = os.path.splitext(filename)[0]

        # 查找轮次信息
        round_match = re.search(r'_ROUND(\d+)', basename)
        if round_match:
            return f"_ROUND{round_match.group(1)}"
        else:
            return ""  # 没有轮次信息时返回空字符串

    def cleanup(self):
        """清理临时文件（可选）"""
        # 注释掉自动清理，保留文件用于分析
        # if self.temp_dir.exists():
        #     import shutil
        #     shutil.rmtree(self.temp_dir)
        #     print(f"Cleaned up temporary directory: {self.temp_dir}")
        print(f"Temporary files preserved in: {self.temp_dir}")

    def parse(self) -> str:
        """执行完整的解析流程"""
        try:
            print(f"🚀 Starting trace parsing for: {self.trace_file}")
            print(f"📁 Output directory: {self.output_dir}")
            print(f"📁 Temporary directory: {self.temp_dir}")
            print()

            # 1. 导出trace数据
            print("📤 Step 1: Exporting trace data...")
            if not self.export_trace():
                raise Exception("Failed to export trace data")
            print(f"✅ Found {len(self.runs)} runs in trace file")
            print()

            # 2. 解析各种数据
            print("📊 Step 2: Parsing trace data...")
            print("  📍 Parsing os-signpost data...")
            self.parse_signpost_data()

            print("  🧵 Parsing thread-state data...")
            self.parse_thread_state_data()

            print()

            # 3. 根据test_start过滤数据
            print("⏰ Step 3: Filtering data by test_start timestamps...")
            self.filter_data_by_test_start()
            print()

            # 4. 计算性能指标
            print("📈 Step 4: Calculating performance metrics...")
            metrics = self.calculate_metrics()

            if not metrics:
                print("❌ No metrics calculated - check if trace data contains required events")
                return ""

            print(f"✅ Successfully calculated metrics for {len(metrics)} runs")
            print()

            # 4.5. 生成进程-线程关联分析
            print("🔍 Step 4.5: Generating process-thread association analysis...")
            analysis_file = self.generate_process_thread_analysis()
            print()

            # 5. 生成CSV
            print("💾 Step 5: Generating CSV output...")
            csv_file = self.generate_csv(metrics)
            print()

            # 6. 输出结果摘要
            print("📋 Step 6: Results Summary")
            print("=" * 50)
            print(f"📁 Trace file: {self.trace_file}")
            print(f"📊 Total runs found: {len(self.runs)}")
            print(f"📈 Metrics calculated: {len(metrics)}")
            print(f"💾 CSV output: {csv_file}")
            print()
            print("📊 Run Details:")
            # 创建 run_number 到 worker_count 的映射
            worker_counts = {metric['run_number']: metric['worker_count'] for metric in metrics}

            for run in self.runs:
                run_num = run['number']
                worker_count = worker_counts.get(run_num, 0)

                print(f"  Run {run_num}: {run['test_case']} - {run['parameters']}")
                print(f"    📍 cnt_delay events: {len(run['signpost_data'])}")
                print(f"    🧵 GCD threads: {worker_count}")
                if run.get('test_start_time'):
                    print(f"    ⏰ test_start: {run['test_start_time']}")
                    if run.get('test_end_time'):
                        print(f"    ⏰ test_end: {run['test_end_time']}")
                    if run.get('wakeup_count') is not None:
                        print(f"    🔄 wakeup_count: {run['wakeup_count']}")
                    else:
                        print(f"    🔄 wakeup_count: 0")
                else:
                    print(f"    ⚠️ No test_start found")
            print("=" * 50)

            return csv_file

        except Exception as e:
            print(f"❌ Error during parsing: {e}")
            import traceback
            traceback.print_exc()
            raise
        finally:
            self.cleanup()


def main():
    """���函数"""
    parser = argparse.ArgumentParser(description="Parse GCDBenchmark trace files and generate CSV")
    parser.add_argument("trace_file", help="Path to the .trace file")
    parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
    parser.add_argument("--output-dir", help="Output directory for analysis files (default: same as trace file)")

    args = parser.parse_args()

    if not os.path.exists(args.trace_file):
        print(f"Error: Trace file not found: {args.trace_file}")
        sys.exit(1)

    # 输出目录：优先使用--output-dir参数，否则使用trace文件所在目录
    if args.output_dir:
        output_dir = args.output_dir
    else:
        output_dir = os.path.dirname(os.path.abspath(args.trace_file))

    try:
        # 创建解析器并执行解析
        trace_parser = TraceParser(args.trace_file, output_dir)
        csv_file = trace_parser.parse()

        print(f"\n✅ Parsing completed successfully!")
        print(f"📊 Results saved to: {csv_file}")

    except Exception as e:
        print(f"\n❌ Parsing failed: {e}")
        if args.verbose:
            import traceback
            traceback.print_exc()
        sys.exit(1)


if __name__ == "__main__":
    main()
