import csv
import glob
import os
import numpy as np
from collections import defaultdict

def analyze_speedup_distribution():
    """分析每个文件中 speedup 的分布情况"""
    
    # 获取所有带 speedup 的文件
    speedup_files = glob.glob("timings_all_setting_rlhf_*_with_speedup.csv")
    
    print("=== 每个文件 Speedup 分布情况分析 ===\n")
    
    for file_path in sorted(speedup_files):
        file_name = os.path.basename(file_path)
        print(f"文件: {file_name}")
        print("-" * 60)
        
        speedups = []
        batch_speedups = defaultdict(list)
        
        # 读取文件
        with open(file_path, newline='') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row['speedup'] and row['speedup'] != '':
                    speedup = float(row['speedup'])
                    speedups.append(speedup)
                    batch_size = int(row['batch_size'])
                    batch_speedups[batch_size].append(speedup)
        
        if not speedups:
            print("  无有效 speedup 数据")
            print()
            continue
            
        # 计算总体统计信息
        speedups_array = np.array(speedups)
        print(f"  总数据点: {len(speedups)}")
        print(f"  平均值: {np.mean(speedups_array):.4f}")
        print(f"  中位数: {np.median(speedups_array):.4f}")
        print(f"  标准差: {np.std(speedups_array):.4f}")
        print(f"  最小值: {np.min(speedups_array):.4f}")
        print(f"  最大值: {np.max(speedups_array):.4f}")
        
        # 分布区间统计
        print(f"  分布区间:")
        print(f"    < 0.5: {np.sum(speedups_array < 0.5):6d} ({np.sum(speedups_array < 0.5)/len(speedups)*100:5.1f}%)")
        print(f"    0.5-0.8: {np.sum((speedups_array >= 0.5) & (speedups_array < 0.8)):6d} ({np.sum((speedups_array >= 0.5) & (speedups_array < 0.8))/len(speedups)*100:5.1f}%)")
        print(f"    0.8-1.0: {np.sum((speedups_array >= 0.8) & (speedups_array < 1.0)):6d} ({np.sum((speedups_array >= 0.8) & (speedups_array < 1.0))/len(speedups)*100:5.1f}%)")
        print(f"    1.0-1.2: {np.sum((speedups_array >= 1.0) & (speedups_array < 1.2)):6d} ({np.sum((speedups_array >= 1.0) & (speedups_array < 1.2))/len(speedups)*100:5.1f}%)")
        print(f"    1.2-1.4: {np.sum((speedups_array >= 1.2) & (speedups_array < 1.4)):6d} ({np.sum((speedups_array >= 1.2) & (speedups_array < 1.4))/len(speedups)*100:5.1f}%)")
        print(f"    1.4-1.6: {np.sum((speedups_array >= 1.4) & (speedups_array < 1.6)):6d} ({np.sum((speedups_array >= 1.4) & (speedups_array < 1.6))/len(speedups)*100:5.1f}%)")
        print(f"    1.6-1.8: {np.sum((speedups_array >= 1.6) & (speedups_array < 1.8)):6d} ({np.sum((speedups_array >= 1.6) & (speedups_array < 1.8))/len(speedups)*100:5.1f}%)")
        print(f"    1.8-2.0: {np.sum((speedups_array >= 1.8) & (speedups_array < 2.0)):6d} ({np.sum((speedups_array >= 1.8) & (speedups_array < 2.0))/len(speedups)*100:5.1f}%)")
        print(f"    >= 2.0: {np.sum(speedups_array >= 2.0):6d} ({np.sum(speedups_array >= 2.0)/len(speedups)*100:5.1f}%)")
        
        # 按 batch_size 统计
        print(f"  按 batch_size 统计:")
        for batch_size in sorted(batch_speedups.keys()):
            batch_data = np.array(batch_speedups[batch_size])
            print(f"    batch_size {batch_size:2d}: 平均 {np.mean(batch_data):.4f}, 中位数 {np.median(batch_data):.4f}, 最高 {np.max(batch_data):.4f}, 数据点 {len(batch_data):4d}")
        
        print()

def analyze_speedup_by_batch_size():
    """按 batch_size 分析所有文件的 speedup 情况"""
    
    print("=== 按 Batch Size 的 Speedup 分析 ===\n")
    
    # 获取所有带 speedup 的文件
    speedup_files = glob.glob("timings_all_setting_rlhf_*_with_speedup.csv")
    
    # 按 batch_size 收集数据
    batch_all_speedups = defaultdict(list)
    batch_file_speedups = defaultdict(lambda: defaultdict(list))
    
    for file_path in sorted(speedup_files):
        file_name = os.path.basename(file_path)
        
        with open(file_path, newline='') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row['speedup'] and row['speedup'] != '':
                    speedup = float(row['speedup'])
                    batch_size = int(row['batch_size'])
                    batch_all_speedups[batch_size].append(speedup)
                    batch_file_speedups[batch_size][file_name].append(speedup)
    
    # 按 batch_size 输出统计信息
    for batch_size in sorted(batch_all_speedups.keys()):
        print(f"Batch Size {batch_size}:")
        print("-" * 40)
        
        all_data = np.array(batch_all_speedups[batch_size])
        print(f"  总体: 平均 {np.mean(all_data):.4f}, 中位数 {np.median(all_data):.4f}, 最高 {np.max(all_data):.4f}, 数据点 {len(all_data)}")
        
        # 按文件统计
        for file_name in sorted(batch_file_speedups[batch_size].keys()):
            file_data = np.array(batch_file_speedups[batch_size][file_name])
            print(f"    {file_name}: 平均 {np.mean(file_data):.4f}, 最高 {np.max(file_data):.4f}, 数据点 {len(file_data):4d}")
        
        print()

def find_highest_speedup_by_batch():
    """找出每个 batch size 下最高 speedup 的文件"""
    
    print("=== 每个 Batch Size 下最高 Speedup 的文件 ===\n")
    
    # 获取所有带 speedup 的文件
    speedup_files = glob.glob("timings_all_setting_rlhf_*_with_speedup.csv")
    
    # 按 batch_size 收集数据
    batch_file_max_speedups = defaultdict(dict)
    
    for file_path in sorted(speedup_files):
        file_name = os.path.basename(file_path)
        
        with open(file_path, newline='') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row['speedup'] and row['speedup'] != '':
                    speedup = float(row['speedup'])
                    batch_size = int(row['batch_size'])
                    
                    # 更新该 batch_size 下该文件的最大 speedup
                    if batch_size not in batch_file_max_speedups or file_name not in batch_file_max_speedups[batch_size]:
                        batch_file_max_speedups[batch_size][file_name] = speedup
                    else:
                        batch_file_max_speedups[batch_size][file_name] = max(batch_file_max_speedups[batch_size][file_name], speedup)
    
    # 按 batch_size 输出最高 speedup
    print("batch_size | 最高speedup | 文件")
    print("-" * 50)
    for batch_size in sorted(batch_file_max_speedups.keys()):
        max_speedup = 0
        best_file = ""
        
        for file_name, speedup in batch_file_max_speedups[batch_size].items():
            if speedup > max_speedup:
                max_speedup = speedup
                best_file = file_name
        
        print(f"{batch_size:9d} | {max_speedup:10.4f} | {best_file}")

if __name__ == "__main__":
    analyze_speedup_distribution()
    print("\n" + "="*80 + "\n")
    analyze_speedup_by_batch_size()
    print("\n" + "="*80 + "\n")
    find_highest_speedup_by_batch() 