#!/usr/bin/env python3
"""
批量解析目录下所有trace文件并合并到CSV的脚本
"""

import os
import subprocess
import csv
import re
from pathlib import Path
from datetime import datetime
import concurrent.futures
import threading
from queue import Queue
import fcntl  # 用于文件锁

def append_to_csv_with_lock(csv_file, csv_data, headers):
    """线程安全的CSV追加函数（文件已预先创建并写入表头）"""
    max_retries = 3
    retry_delay = 0.1  # 100ms

    for attempt in range(max_retries):
        try:
            # 使用文件锁确保线程安全
            with open(csv_file, 'a', newline='', encoding='utf-8') as csvfile:
                # 获取排他锁
                fcntl.flock(csvfile.fileno(), fcntl.LOCK_EX)

                # 直接追加数据（表头已预先写入）
                csv_writer = csv.writer(csvfile, lineterminator='\n')
                csv_writer.writerow(csv_data)

                # 确保数据写入磁盘
                csvfile.flush()
                os.fsync(csvfile.fileno())

                # 释放文件锁
                fcntl.flock(csvfile.fileno(), fcntl.LOCK_UN)

            return True

        except Exception as e:
            print(f"❌ CSV追加失败 (尝试 {attempt + 1}/{max_retries}): {e}")
            if attempt < max_retries - 1:
                import time
                time.sleep(retry_delay)
                retry_delay *= 2
            else:
                return False

    return False

def extract_parameters_from_filename(filename):
    """从文件名中提取参数信息"""
    # 移除.trace扩展名
    basename = os.path.splitext(filename)[0]

    # 提取场景名称（第一个下划线之前的部分）
    scenario = basename.split('_')[0]

    # 提取轮次信息（如果存在_ROUNDn）
    round_match = re.search(r'_ROUND(\d+)', basename)
    round_num = round_match.group(1) if round_match else "1"

    # 提取其他参数（第一��下划线之后，但排除_ROUND部分）
    params_part = basename[len(scenario)+1:]
    params_part = re.sub(r'_ROUND\d+', '', params_part)  # 移除轮次信息

    if params_part:
        parameters = f"{scenario}_{params_part}"
    else:
        parameters = scenario

    return scenario, round_num, parameters

def find_trace_files(directory):
    """查找目录下的所有trace文件（支持.trace文件和.trace目录）"""
    trace_files = []
    for root, dirs, files in os.walk(directory):
        # 查找.trace文件
        for file in files:
            if file.endswith('.trace'):
                full_path = os.path.join(root, file)
                trace_files.append(full_path)

        # 查找.trace目录（这些目录通常包含实际的trace数据文件）
        for dir_name in dirs:
            if dir_name.endswith('.trace'):
                dir_path = os.path.join(root, dir_name)
                # 直接添加目录本身，trace_parser.py可以处理trace目录
                trace_files.append(dir_path)

    # 去重并排序
    trace_files = list(set(trace_files))
    return sorted(trace_files)

def parse_single_trace(trace_path, target_directory, result_queue):
    """解析单个trace文件，结果放入队列（不直接写CSV）"""
    filename = os.path.basename(trace_path)

    # 检查是否已经处理过这个文件
    trace_name = filename.replace('.trace', '').replace('/', '_').replace('\\', '_')
    temp_csv_dir = os.path.join(target_directory, "temp_csv")
    expected_csv = os.path.join(temp_csv_dir, f"trace_metrics_{trace_name}.csv")

    if os.path.exists(expected_csv):
        print(f"\n[{threading.current_thread().name}] 跳过已处理的文件: {filename}")

        # 读取已有的CSV结果并放入队列（将在最后统一写入）
        try:
            loaded_rows = 0
            with open(expected_csv, 'r', encoding='utf-8') as csvfile:
                csv_reader = csv.reader(csvfile)
                next(csv_reader)  # 跳过表头
                for row in csv_reader:
                    if row and len(row) > 0:  # 确保不是空行
                        # 将缓存数据放入队列
                        result_queue.put(('success', trace_path, row, None))
                        loaded_rows += 1

            print(f"[{threading.current_thread().name}] 从缓存加载: {filename} ({loaded_rows}行)")

            # 将统计信息放入队列
            result_queue.put(('success', trace_path, loaded_rows, None))
            return
        except Exception as e:
            print(f"[{threading.current_thread().name}] 缓存文件读取失败，重新解析: {filename} ({e})")
            # 继续正常解析流程

    # 检查文件大小
    try:
        file_size = os.path.getsize(trace_path) if os.path.isfile(trace_path) else 0
        if os.path.isdir(trace_path):
            # 对于目录，估算大小
            total_size = 0
            for root, dirs, files in os.walk(trace_path):
                for file in files:
                    fp = os.path.join(root, file)
                    try:
                        total_size += os.path.getsize(fp)
                    except:
                        pass
            file_size = total_size

        size_mb = file_size / (1024 * 1024)
        if size_mb > 100:  # 大于100MB的文件
            print(f"\n[{threading.current_thread().name}] 开始解析大型文件: {filename} ({size_mb:.1f}MB)")
        else:
            print(f"\n[{threading.current_thread().name}] 开始解析: {filename}")
    except:
        print(f"\n[{threading.current_thread().name}] 开始解析: {filename}")

    try:
        # 调用trace_parser.py解析trace文件，指定输出目录为target_directory
        # trace_parser.py会自动创建基于文件名的临时目录
        # 增加超时时间到15分钟，因为有些trace文件很大
        result = subprocess.run(['python3', 'trace_parser.py', trace_path, '--output-dir', target_directory],
                              capture_output=True, text=True, cwd='.', timeout=900)  # 15分钟超时

        if result.returncode != 0:
            error_msg = f"解析失败: {trace_path}, 错误: {result.stderr}"
            print(error_msg)
            result_queue.put(('error', trace_path, None, error_msg))
            return

        output = result.stdout.strip()

        # 查找所有解析结果行（可能有多行，对应多个run）
        lines = output.split('\n')
        result_lines = []
        for line in lines:
            if '解析结果' in line and 'CSV行:' in line:
                # 提取CSV行数据
                csv_part = line.split('CSV行: ')[1]
                result_lines.append(csv_part)

        if not result_lines:
            error_msg = f"未找到解析结果: {trace_path}"
            # 检查是否有metrics但没有输出
            if 'CSV generated successfully with 0 rows' in output:
                error_msg += " (没有计算出metrics数据)"
            elif 'No signpost data found' in output:
                error_msg += " (没有signpost数据)"
            else:
                error_msg += f" (输出: {output[:200]}...)"
            print(error_msg)
            result_queue.put(('error', trace_path, None, error_msg))
            return

        # 处理所有CSV行（每个run一行）
        success_count = 0
        for result_line in result_lines:
            # 直接使用csv模块解析CSV字符串，避免手动处理引号
            import io
            csv_reader = csv.reader(io.StringIO(result_line))
            csv_data = next(csv_reader)  # 读取第一行

            # 确保有足够的列
            while len(csv_data) < 10:  # 需要10列（包括total_cost和avg_cost）
                csv_data.append('')

            # 将结果放入队列（所有数据将在最后统一写入）
            result_queue.put(('success', trace_path, csv_data, None))
            success_count += 1

        # 将统计信息放入队列
        result_queue.put(('success', trace_path, success_count, None))

        # 提取文件名信息
        filename = os.path.basename(trace_path)
        _, round_num, parameters = extract_parameters_from_filename(filename)

        print(f"[{threading.current_thread().name}] 完成解析: {parameters} (共{len(result_lines)}轮, 成功{success_count}轮)")

    except subprocess.TimeoutExpired:
        error_msg = f"解析超时: {trace_path}"
        print(error_msg)
        result_queue.put(('error', trace_path, None, error_msg))
    except Exception as e:
        error_msg = f"解析过程中出错: {trace_path}, 错误: {e}"
        print(error_msg)
        result_queue.put(('error', trace_path, None, error_msg))

def main():
    """主函数"""
    import sys

    print("=== 批量trace文件解析工具 ===")

    # 检查是否传入了目录参数
    if len(sys.argv) > 1:
        target_directory = sys.argv[1]
        print(f"使用指定目录: {target_directory}")
        if not os.path.exists(target_directory):
            print(f"错误: 目录不存在: {target_directory}")
            return
        target_directories = [target_directory]
    else:
        print("错误: 请指定要解析的目录")
        print("用法: python3 batch_parse_traces.py <trace_directory>")
        print("例如: python3 batch_parse_traces.py /Users/ufogxl/Desktop/GCDBenchmark_result_20251030_192030")
        return

    # 查找所有trace文件
    all_trace_files = []
    for directory in target_directories:
        if os.path.exists(directory):
            print(f"搜索目录: {directory}")
            trace_files = find_trace_files(directory)
            all_trace_files.extend(trace_files)
            print(f"  找到 {len(trace_files)} 个trace相关文件/目录")
        else:
            print(f"目录不存在: {directory}")

    if not all_trace_files:
        print("未找到任何trace文件")
        return

    print(f"找到 {len(all_trace_files)} 个trace文件:")
    for i, trace_file in enumerate(all_trace_files, 1):
        print(f"{i}. {trace_file}")

    # 创建输出目录和临时文件夹
    temp_dir = os.path.join(target_directory, "temp_xml")
    os.makedirs(temp_dir, exist_ok=True)
    print(f"临时文件目录: {temp_dir}")

    # 创建输出CSV文件（放在指定目录中）
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_csv = os.path.join(target_directory, f"batch_results_{timestamp}.csv")

    # 如果CSV文件已存在，先删除
    if os.path.exists(output_csv):
        os.remove(output_csv)
        print(f"删除已存在的CSV文件: {output_csv}")

    # CSV表头
    headers = [
        'benchmark_case', 'parameters', 'instructions', 'sched_delay', 'worker_count',
        'wakeup_per_execute', 'first_delay', 'max_delay',
        'total_cost', 'avg_cost'
    ]

    # 提前创建CSV文件并写入表头
    with open(output_csv, 'w', newline='', encoding='utf-8') as csvfile:
        csv_writer = csv.writer(csvfile, lineterminator='\n')
        csv_writer.writerow(headers)

    print(f"输出CSV文件: {output_csv}")
    print(f"各trace文件的分析结果将保存在: {target_directory}")
    print(f"开始多线程解析，最大并发数: 10")

    # 使用队列收集结果
    result_queue = Queue()

    # 多线程解析
    success_count = 0
    error_count = 0
    max_workers = 10

    # 检查是否有playback类大文件，如果有就减少并发数
    has_large_files = any('playback' in os.path.basename(f) for f in all_trace_files)
    if has_large_files:
        max_workers = 5  # 减少并发数，避免内存不足
        print(f"检测到大型playback文件，将并发数减少到: {max_workers}")

    with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 提交所有解析任务
        futures = []
        for trace_file in all_trace_files:
            future = executor.submit(parse_single_trace, trace_file, target_directory, result_queue)
            futures.append(future)

        # 等待所有任务完成
        concurrent.futures.wait(futures)

    # 收集所有结果并按顺序写入CSV
    print(f"\n所有解析任务完成，开始写入结果...")

    all_results = []
    total_rows = 0
    while not result_queue.empty():
        try:
            status, trace_path, csv_data, error_msg = result_queue.get_nowait()
            if status == 'success' and isinstance(csv_data, list) and len(csv_data) > 1:
                # 这是实际的数据行，不是统计信息
                all_results.append((trace_path, csv_data))
            elif status == 'success' and isinstance(csv_data, int):
                # 这是统计信息（行数）
                total_rows += csv_data
                success_count += 1
            else:
                print(f"❌ {error_msg}")
                error_count += 1
        except:
            break

    # 按原始文件顺序排序结果
    result_dict = {}
    for trace_path, csv_data in all_results:
        if trace_path not in result_dict:
            result_dict[trace_path] = []
        result_dict[trace_path].append(csv_data)

    # 按原始文件顺序输出所有数据
    ordered_results = []
    for trace_file in all_trace_files:
        if trace_file in result_dict:
            ordered_results.extend(result_dict[trace_file])

    # 单线程按顺序写入CSV（避免并发写入问题）
    print(f"开始写入CSV文件（共{len(ordered_results)}行数据）...")
    for csv_data in ordered_results:
        if append_to_csv_with_lock(output_csv, csv_data, headers):
            pass  # 写入成功
        else:
            print(f"❌ 写入失败: {csv_data}")

    print(f"CSV写入完成")

    print(f"\n=== 解析完成 ===")
    print(f"总文件数: {len(all_trace_files)}")
    print(f"成功解析: {success_count}")
    print(f"解析失败: {error_count}")
    print(f"汇总CSV文件: {output_csv}")
    print(f"总数据行数: {total_rows}")
    print(f"所有分析结果保存在: {target_directory}")
    print(f"临时文件保存在: {temp_dir}")

    # 显示CSV文件预览
    if os.path.exists(output_csv):
        print(f"\n=== CSV文件预览 ===")
        with open(output_csv, 'r', encoding='utf-8') as csvfile:
            reader = csv.reader(csvfile)
            rows = list(reader)
            for i, row in enumerate(rows[:6]):  # 显示前6行
                print(f"行{i+1}: {row}")

    # 显示分析目录汇总
    print(f"\n=== 分析目录汇总 ===")
    analysis_dirs = set()
    for trace_file in all_trace_files:
        trace_dir = os.path.dirname(trace_file)
        analysis_dir = os.path.join(trace_dir, "analysis")
        if os.path.exists(analysis_dir):
            analysis_dirs.add(analysis_dir)

    if analysis_dirs:
        print(f"生成的分析目录:")
        for analysis_dir in sorted(analysis_dirs):
            print(f"  {analysis_dir}")
            # 列出分析文件
            try:
                files = os.listdir(analysis_dir)
                if files:
                    print(f"    包含文件: {', '.join(sorted(files))}")
            except:
                pass

if __name__ == "__main__":
    main()
