import csv
import glob
import os
import sys
from collections import defaultdict

def process_output_directory(output_dir):
    """处理单个output目录"""
    print(f"正在处理目录: {output_dir}")
    
    # 重定向输出到对应目录的result.txt
    result_file = os.path.join(output_dir, "result.txt")
    original_stdout = sys.stdout
    
    with open(result_file, 'w', encoding='utf-8') as f:
        sys.stdout = f
        
        # 1. 读取 batch_size -> no_spec_time 的映射
        no_spec_file = "l40_final_analysis_results.txt"
        batch_no_spec = {}
        with open(no_spec_file, "r") as f_inner:
            for line in f_inner:
                line = line.strip()
                if not line or ':' not in line:
                    continue
                batch_size_str, no_spec_time_str = line.split(':')
                batch_size = int(batch_size_str.strip())
                no_spec_time_str = no_spec_time_str.strip()
                if '.' in no_spec_time_str:
                    # 取小数点后四位并转为整数
                    no_spec_time = float(no_spec_time_str.split('.')[-1])/10
                else:
                    no_spec_time = int(no_spec_time_str)
                batch_no_spec[batch_size] = no_spec_time
        print(batch_no_spec)

        # 用于存储每个文件每个batch size的平均speedup
        file_batch_avg_speedup = defaultdict(lambda: defaultdict(list))
        # 用于存储每个文件的total_forward_batch_spec_time_ms总和
        file_total_spec_time = defaultdict(float)

        # 2. 处理每个 timing_all_setting_rlhf_x.csv 文件
        csv_files = glob.glob(os.path.join(output_dir, "timings_all_setting_rlhf_*.csv"))
        for file_path in csv_files:
            rows = []
            
            # 检查文件是否有表头
            with open(file_path, 'r', newline='') as f_inner:
                first_line = f_inner.readline().strip()
                f_inner.seek(0)  # 重置文件指针到开头
                
                # 定义标准表头
                standard_headers = ['batch_size', 'speculative_num_draft_tokens', 'speculative_num_steps', 'top_k', 
                                   'average_generate_tokens_per_step', 'draft_time_ms', 'verify_time_ms', 
                                   'extend_after_decode_time_ms', 'total_forward_batch_spec_time_ms', 
                                   'total_seq_length', 'total_draft_tokens', 'step']
                
                # 如果第一行不是表头，则添加表头
                if not any(header in first_line for header in standard_headers):
                    print(f"为文件添加表头: {os.path.basename(file_path)}")
                    # 读取所有数据行
                    data_lines = f_inner.readlines()
                    
                    # 创建带表头的临时文件
                    temp_file_path = file_path + '.temp'
                    with open(temp_file_path, 'w', newline='') as temp_f:
                        # 写入表头
                        temp_f.write(','.join(standard_headers) + '\n')
                        # 写入数据
                        temp_f.writelines(data_lines)
                    
                    # 替换原文件
                    os.replace(temp_file_path, file_path)
            
            # 现在读取处理后的文件
            with open(file_path, newline='') as f_inner:
                reader = csv.DictReader(f_inner)
                # 只保留需要的字段
                fieldnames = ['batch_size', 'speculative_num_draft_tokens', 'speedup', 'step']
                for row in reader:
                    batch_size = int(row['batch_size'])
                    no_spec_time = batch_no_spec.get(batch_size, None)
                    if no_spec_time is None:
                        speedup = ''
                    else:
                        avg_gen = float(row['average_generate_tokens_per_step'])
                        total_spec = float(row['total_forward_batch_spec_time_ms'])
                        speedup = no_spec_time * avg_gen / total_spec if total_spec != 0 else ''
                        # 收集speedup数据用于计算平均值
                        if speedup != '':
                            file_batch_avg_speedup[file_path][batch_size].append(speedup)
                        # 累加total_forward_batch_spec_time_ms
                        file_total_spec_time[file_path] += total_spec
                    # 只保留需要的字段
                    new_row = {
                        'batch_size': row['batch_size'],
                        'speculative_num_draft_tokens': row['speculative_num_draft_tokens'],
                        'speedup': speedup,
                        'step': row['step']
                    }
                    rows.append(new_row)
            # 写回新文件（可覆盖原文件，也可写到新文件）
            out_path = file_path.replace('.csv', '_with_speedup.csv')
            with open(out_path, 'w', newline='') as f_inner:
                writer = csv.DictWriter(f_inner, fieldnames=fieldnames)
                writer.writeheader()
                writer.writerows(rows)
            print(f"已处理并生成: {out_path}")

        # 3. 输出每个文件的total_forward_batch_spec_time_ms总和
        print("\n=== 每个文件的total_forward_batch_spec_time_ms总和 ===")
        for file_path in sorted(file_total_spec_time.keys()):
            file_name = os.path.basename(file_path)
            total_time = file_total_spec_time[file_path]
            print(f"{file_name}: {total_time:.2f} ms")

        # 4. 计算并输出每个文件每个batch size的平均speedup
        print("\n=== 每个文件每个batch size的平均speedup ===")
        for file_path in sorted(file_batch_avg_speedup.keys()):
            file_name = os.path.basename(file_path)
            print(f"\n文件: {file_name}")
            print("batch_size | 平均speedup | 数据点数量")
            print("-" * 40)
            
            for batch_size in sorted(file_batch_avg_speedup[file_path].keys()):
                speedups = file_batch_avg_speedup[file_path][batch_size]
                avg_speedup = sum(speedups) / len(speedups)
                print(f"{batch_size:9d} | {avg_speedup:11.4f} | {len(speedups):10d}")

        # 5. 找出每个batch size下speedup最高的前五个文件
        print("\n=== 每个batch size下speedup最高的前五个文件 ===")
        print("batch_size | 排名 | 平均speedup | 文件")
        print("-" * 60)

        # 收集所有batch size的数据
        all_batch_data = defaultdict(list)
        for file_path in file_batch_avg_speedup:
            file_name = os.path.basename(file_path)
            for batch_size, speedups in file_batch_avg_speedup[file_path].items():
                avg_speedup = sum(speedups) / len(speedups)
                all_batch_data[batch_size].append((avg_speedup, file_name))

        # 找出每个batch size的前五个最佳文件
        for batch_size in sorted(all_batch_data.keys()):
            # 按speedup排序，取前五个
            sorted_data = sorted(all_batch_data[batch_size], key=lambda x: x[0], reverse=True)
            top_5 = sorted_data[:5]
            
            print(f"Batch Size {batch_size}:")
            for i, (speedup, file_name) in enumerate(top_5, 1):
                print(f"{batch_size:9d} | {i:4d} | {speedup:11.4f} | {file_name}")
            print()
            
        # 6. 分析speculative_num_draft_tokens
        print("\n=== speculative_num_draft_tokens分析 ===")
        for file_path in csv_files:
            if "our" in file_path:
                batch_tokens = defaultdict(list)
                
                with open(file_path, newline='') as f_inner:
                    reader = csv.DictReader(f_inner)
                    for row in reader:
                        batch_size = int(row['batch_size'])
                        num_draft_tokens = float(row['speculative_num_draft_tokens'])
                        batch_tokens[batch_size].append(num_draft_tokens)

                print(f"文件: {os.path.basename(file_path)}")
                print("batch_size | 平均speculative_num_draft_tokens | 数据点数量")
                print("-" * 45)
                for batch_size in sorted(batch_tokens.keys()):
                    tokens = batch_tokens[batch_size]
                    avg_tokens = sum(tokens) / len(tokens)
                    print(f"{batch_size:9d} | {avg_tokens:27.4f} | {len(tokens):10d}")
                print()
        
        # 恢复标准输出
        sys.stdout = original_stdout
        
        # 删除with_speedup文件
        with_speedup_files = glob.glob(os.path.join(output_dir, "*_with_speedup.csv"))
        for file_path in with_speedup_files:
            os.remove(file_path)
            print(f"已删除: {file_path}")
        
        print(f"处理完成: {output_dir}")

# 主程序：遍历outputs_1到outputs_25
# for i in range(0, 56):
#     output_dir = f"outputs_{i}"
#     if os.path.exists(output_dir):
#         process_output_directory(output_dir)
#     else:
#         print(f"目录不存在: {output_dir}")

process_output_directory("outputs_bs_64")
print("所有目录处理完成！")
