import os
import csv
import glob

def extract_parameters_from_path(path):
    """从路径中提取dtype_str, token_num, block_size, head_num, seed参数"""
    # 路径格式: prof/test_str/local/A*/op_statistic.csv
    # test_str格式: dtype_strxtoken_numxblock_sizexhead_numxseed
    parts = path.split(os.sep)
    
    # 找到test_str部分（包含"x"分隔的参数）
    param_part = None
    for part in parts:
        if 'x' in part and len(part.split('x')) == 5:  # 确保是5个参数
            param_part = part
            break
    
    if not param_part:
        return None, None, None, None, None
    
    # 分割参数部分
    params = param_part.split('x')
    if len(params) != 5:
        return None, None, None, None, None
        
    dtype_str, token_num, block_size, head_num, seed = params
    return dtype_str, token_num, block_size, head_num, seed

def process_op_statistic_files(output_file):
    """处理所有op_statistic.csv文件并写入结果"""
    # 查找所有符合路径模式的文件
    # 路径模式: prof/test_str/local/A*/op_statistic.csv
    file_pattern = os.path.join('prof', '*', 'local*', 'A*', 'op_statistic.csv')
    files = glob.glob(file_pattern, recursive=True)
    
    if not files:
        print("未找到任何op_statistic.csv文件")
        return
    
    # 准备写入结果
    with open(output_file, 'w', newline='', encoding='utf-8') as out_csv:
        # 输出字段：原始参数 + 各算子时间 + 总时间
        fieldnames = [
            'dtype_str', 'token_num', 'block_size', 'head_num', 'seed',
            'matmul_bias_scale', 'pad_head_rope_x', 'rms_norm_quant',
            'matmul_ein_sum_triton', 'rms_norm_rope_rac_cal_kernel',
            'Slice', 'ConcatD', 'ZerosLike', 'Cast', 'total_time_us'
        ]
        writer = csv.DictWriter(out_csv, fieldnames=fieldnames)
        writer.writeheader()
        
        for file_path in files:
            print(f"处理文件: {file_path}")
            
            # 提取参数
            dtype_str, token_num, block_size, head_num, seed = extract_parameters_from_path(file_path)
            if not all([dtype_str, token_num, block_size, head_num, seed]):
                print(f"警告: 无法从路径 {file_path} 中提取参数，已跳过")
                continue
            
            # 存储每个算子的计算时间
            op_times = {
                'matmul_bias_scale': 0.0,
                'pad_head_rope_x': 0.0,
                'rms_norm_quant': 0.0,
                'matmul_ein_sum_triton': 0.0,
                'rms_norm_rope_rac_cal_kernel': 0.0,
                'Slice': 0.0,
                'ConcatD': 0.0,
                'ZerosLike': 0.0,
                'Cast': 0.0
            }
            
            # 读取并计算时间
            with open(file_path, 'r', encoding='utf-8') as in_csv:
                reader = csv.DictReader(in_csv)
                for row in reader:
                    op_type = row['OP Type']
                    count = int(row['Count'])
                    avg_time = float(row['Avg Time(us)'])
                    
                    # 计算单次调用时间：(count / 20) * avg_time
                    # 20是总调用次数，count是该算子在20次中的总执行次数
                    if op_type in op_times:
                        op_times[op_type] = (count / 20) * avg_time
            
            # 计算总时间
            total_time = sum(op_times.values())
            
            # 构建行数据
            row_data = {
                'dtype_str': dtype_str,
                'token_num': token_num,
                'block_size': block_size,
                'head_num': head_num,
                'seed': seed,
                'total_time_us': round(total_time, 3)
            }
            # 合并算子时间
            row_data.update({k: round(v, 3) for k, v in op_times.items()})
            
            # 写入结果
            writer.writerow(row_data)
    
    print(f"处理完成，结果已保存到 {output_file}")

if __name__ == "__main__":
    # 输出文件名称
    output_csv = 'operation_times.csv'
    process_op_statistic_files(output_csv)