#!/usr/bin/env python3
"""
处理CSV文件，添加表头，计算统计数据
"""

import os
import pandas as pd
import glob
from collections import defaultdict

def process_csv_files():
    # 获取所有batch_*_accept.csv文件
    csv_files = glob.glob("sglang/outputs/batch_*_accept.csv")
    csv_files.sort()  # 按文件名排序
    
    print(f"找到 {len(csv_files)} 个CSV文件")
    
    # 标准表头
    header = ["req_id", "accept_length"]
    
    # 存储平均接受数大于等于4的记录
    high_accept_records = []
    
    for csv_file in csv_files:
        print(f"处理文件: {csv_file}")
        
        # 为每个文件单独统计
        file_req_stats = defaultdict(lambda: {"total": 0, "count": 0, "values": []})
        
        # 提前定义base_name
        base_name = os.path.splitext(os.path.basename(csv_file))[0]  # batch_0_accept
        
        try:
            # 读取CSV文件
            df = pd.read_csv(csv_file, header=None)
            
            # 检查是否有表头
            if len(df.columns) == 2:
                # 检查第一行是否是数字（没有表头）
                if pd.to_numeric(df.iloc[0, 0], errors='coerce') is not None:
                    # 没有表头，添加表头
                    df.columns = header
                    print(f"  - 添加表头: {header}")
                else:
                    # 有表头，检查是否正确
                    if list(df.columns) != header:
                        df.columns = header
                        print(f"  - 修正表头: {header}")
            else:
                print(f"  - 警告: 文件列数不正确，跳过: {csv_file}")
                continue
            
            # 确保数据类型正确
            df['req_id'] = pd.to_numeric(df['req_id'], errors='coerce')
            df['accept_length'] = pd.to_numeric(df['accept_length'], errors='coerce')
            
            # 移除无效行
            df = df.dropna()
            
            # 统计每个req_id的数据
            for _, row in df.iterrows():
                req_id = int(row['req_id'])
                accept_length = int(row['accept_length'])
                
                file_req_stats[req_id]["total"] += accept_length
                file_req_stats[req_id]["count"] += 1
                file_req_stats[req_id]["values"].append(accept_length)
            
            # 保存修正后的文件
            df.to_csv(csv_file, index=False)
            print(f"  - 处理完成，共 {len(df)} 行数据")
            
            # 计算当前文件的统计结果
            results = []
            for req_id in sorted(file_req_stats.keys()):
                stats = file_req_stats[req_id]
                total = stats["total"]
                count = stats["count"]
                avg = total / count if count > 0 else 0
                
                result = {
                    "req_id": req_id,
                    "total_accept_length": total,
                    "count": count,
                    "avg_accept_length": round(avg, 2)
                }
                results.append(result)
                
                # 检查是否平均接受数大于等于4
                if avg >= 4:
                    # 提取文件号
                    file_number = base_name.split('_')[1]  # batch_0_accept -> 0
                    high_accept_records.append({
                        "file_number": file_number,
                        "req_id": req_id,
                        "avg_accept_length": round(avg, 2),
                        "total_accept_length": total,
                        "count": count
                    })
            
            # 生成结果文件名
            result_file = f"sglang/outputs/result_{base_name}.txt"
            
            # 写入当前文件的结果
            with open(result_file, 'w', encoding='utf-8') as f:
                f.write(f"SGLang Eagle Speculative Decoding 接受Token数统计结果 - {base_name}\n")
                f.write("=" * 60 + "\n\n")
                
                f.write("每个请求ID的统计信息:\n")
                f.write("-" * 40 + "\n")
                f.write(f"{'请求ID':<8} {'总接受数':<12} {'请求次数':<10} {'平均接受数':<12}\n")
                f.write("-" * 40 + "\n")
                
                for result in results:
                    f.write(f"{result['req_id']:<8} {result['total_accept_length']:<12} {result['count']:<10} {result['avg_accept_length']:<12}\n")
            
            print(f"  - 统计结果已保存到: {result_file}")
            
        except Exception as e:
            print(f"  - 错误处理文件 {csv_file}: {e}")
            continue
    
    # 输出平均接受数大于等于4的记录
    print(f"\n" + "=" * 40)
    print("平均接受数大于等于4的记录:")
    print("=" * 40)
    
    if high_accept_records:
        # 按文件号排序
        high_accept_records.sort(key=lambda x: (int(x['file_number']), x['req_id']))
        
        # 保存到文件，只保存文件号和请求ID
        high_accept_file = "sglang/outputs/high_accept_records.txt"
        with open(high_accept_file, 'w', encoding='utf-8') as f:
            for record in high_accept_records:
                f.write(f"{record['file_number']} {record['req_id']}\n")
        
        print(f"高接受数记录已保存到: {high_accept_file}")
        print(f"共找到 {len(high_accept_records)} 个高接受数记录")
        
        # 控制台显示前几个记录作为示例
        print("前10个记录示例:")
        for i, record in enumerate(high_accept_records[:10]):
            print(f"文件{record['file_number']} 请求{record['req_id']}")
    else:
        print("没有找到平均接受数大于等于4的记录")
    
    print(f"\n所有文件处理完成！")
    return True

if __name__ == "__main__":
    results = process_csv_files() 