import csv
import glob
import os
import numpy as np
from collections import defaultdict

def analyze_total_forward_time():
    """分析每个文件中 total_forward_batch_spec_time_ms 的统计信息"""
    
    print("=== 每个文件 total_forward_batch_spec_time_ms 统计信息 ===\n")
    
    # 获取所有 csv 文件
    csv_files = glob.glob("timings_all_setting_rlhf_*.csv")
    
    for file_path in sorted(csv_files):
        file_name = os.path.basename(file_path)
        print(f"文件: {file_name}")
        print("-" * 60)
        
        times = []
        batch_times = defaultdict(list)
        
        # 读取文件
        with open(file_path, newline='') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row.get('total_forward_batch_spec_time_ms'):
                    try:
                        time_ms = float(row['total_forward_batch_spec_time_ms'])
                        times.append(time_ms)
                        
                        batch_size = int(row['batch_size'])
                        batch_times[batch_size].append(time_ms)
                    except (ValueError, KeyError):
                        continue
        
        if not times:
            print("  无有效数据")
            print()
            continue
            
        # 计算总体统计信息
        times_array = np.array(times)
        print(f"  总数据点: {len(times)}")
        print(f"  平均值: {np.mean(times_array):.4f} ms")
        print(f"  中位数: {np.median(times_array):.4f} ms")
        print(f"  标准差: {np.std(times_array):.4f} ms")
        print(f"  最小值: {np.min(times_array):.4f} ms")
        print(f"  最大值: {np.max(times_array):.4f} ms")
        
        # 分布区间统计
        print(f"  分布区间:")
        print(f"    < 20ms: {np.sum(times_array < 20):6d} ({np.sum(times_array < 20)/len(times)*100:5.1f}%)")
        print(f"    20-25ms: {np.sum((times_array >= 20) & (times_array < 25)):6d} ({np.sum((times_array >= 20) & (times_array < 25))/len(times)*100:5.1f}%)")
        print(f"    25-30ms: {np.sum((times_array >= 25) & (times_array < 30)):6d} ({np.sum((times_array >= 25) & (times_array < 30))/len(times)*100:5.1f}%)")
        print(f"    30-35ms: {np.sum((times_array >= 30) & (times_array < 35)):6d} ({np.sum((times_array >= 30) & (times_array < 35))/len(times)*100:5.1f}%)")
        print(f"    35-40ms: {np.sum((times_array >= 35) & (times_array < 40)):6d} ({np.sum((times_array >= 35) & (times_array < 40))/len(times)*100:5.1f}%)")
        print(f"    >= 40ms: {np.sum(times_array >= 40):6d} ({np.sum(times_array >= 40)/len(times)*100:5.1f}%)")
        
        # 按 batch_size 统计
        print(f"  按 batch_size 统计:")
        for batch_size in sorted(batch_times.keys()):
            batch_data = np.array(batch_times[batch_size])
            print(f"    batch_size {batch_size:2d}: 平均 {np.mean(batch_data):.4f}ms, 中位数 {np.median(batch_data):.4f}ms, 数据点 {len(batch_data):4d}")
        
        print()

def analyze_total_time_by_batch_size():
    """按 batch_size 分析所有文件的 total_forward_batch_spec_time_ms"""
    
    print("=== 按 Batch Size 的 total_forward_batch_spec_time_ms 分析 ===\n")
    
    # 获取所有 csv 文件
    csv_files = glob.glob("timings_all_setting_rlhf_*.csv")
    
    # 按 batch_size 收集数据
    batch_all_times = defaultdict(list)
    batch_file_times = defaultdict(lambda: defaultdict(list))
    
    for file_path in sorted(csv_files):
        file_name = os.path.basename(file_path)
        
        with open(file_path, newline='') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row.get('total_forward_batch_spec_time_ms'):
                    try:
                        time_ms = float(row['total_forward_batch_spec_time_ms'])
                        batch_size = int(row['batch_size'])
                        batch_all_times[batch_size].append(time_ms)
                        batch_file_times[batch_size][file_name].append(time_ms)
                    except (ValueError, KeyError):
                        continue
    
    # 按 batch_size 输出统计信息
    for batch_size in sorted(batch_all_times.keys()):
        print(f"Batch Size {batch_size}:")
        print("-" * 40)
        
        all_data = np.array(batch_all_times[batch_size])
        print(f"  总体: 平均 {np.mean(all_data):.4f}ms, 中位数 {np.median(all_data):.4f}ms, 数据点 {len(all_data)}")
        
        # 按文件统计
        for file_name in sorted(batch_file_times[batch_size].keys()):
            file_data = np.array(batch_file_times[batch_size][file_name])
            print(f"    {file_name}: 平均 {np.mean(file_data):.4f}ms, 数据点 {len(file_data):4d}")
        
        print()

if __name__ == "__main__":
    analyze_total_forward_time()
    print("\n" + "="*80 + "\n")
    analyze_total_time_by_batch_size() 