import os
import pandas as pd
import re

def process_data(base_dir, target_dir):
    results = []
    
    for root, dirs, files in os.walk(base_dir):
        for file in files:
            # 改进后的正则表达式匹配复杂CN字段
            pattern = r'(\d+)node-(\d+)proc-(cn(?:\[[^\]]+\]|\d+))-(\d+)interation'
            match = re.match(pattern, file)
            if file.endswith('.csv') and match:
                try:
                    # 使用正则分组提取字段
                    xnode = int(match.group(1))
                    yproc = int(match.group(2))
                    cnxxx = match.group(3)  # 保持原始字符串格式
                    yiteration = int(match.group(4))

                    print(f"获取到文件信息：xnode = {xnode}, yproc = {yproc}, cnxxx = {cnxxx}, yiteration = {yiteration}")
                    
                    # 读取CSV文件
                    file_path = os.path.join(root, file)
                    df = pd.read_csv(file_path)
                    
                    # 过滤目标数据
                    #filtered = df[df['mean_appearance_time'] == 1]

                    # 使用新的过滤条件
                    # 过滤 (comm_type == 51 and mean_appearance_time == 1) 或者 (comm_type == 50 or comm_type == 55)
                    filtered = df[(df['mean_appearance_time'] == 1) | (df['comm_type'] == 50) | (df['comm_type'] == 55) | 
                    (df['comm_type'] == '50') | (df['comm_type'] == '55')]
                    
                    if not filtered.empty:
                        # 添加元数据列
                        filtered = filtered.assign(
                            subfolder=os.path.basename(root),
                            node=xnode,
                            proc=yproc,
                            cn=cnxxx,
                            iteration=yiteration
                        )
                        
                        # 选择输出列
                        result_df = filtered[[
                            'subfolder', 'node', 'proc', 'cn', 'iteration',
                            'comm_type', 'total_size', 'avg_comm_time'
                        ]]
                        results.append(result_df)
                
                except Exception as e:
                    print(f"Error processing {file}: {str(e)}")
    
    # 合并结果
    final_df = pd.concat(results, ignore_index=True) if results else pd.DataFrame()
    
    # 确保输出目录存在
    os.makedirs(target_dir, exist_ok=True)
    
    # 保存结果（添加编码处理）
    output_path = os.path.join(target_dir, 'summary.csv')
    final_df.to_csv(output_path, index=False, encoding='utf-8-sig')  # 解决中文编码问题[9](@ref)
    print(f"Output saved to: {output_path}")
    print(f"Total records processed: {len(final_df)}")

def analyze_summary(summary_path, end_dir):
    """
    分析summary.csv文件，对指定列分组后计算avg_comm_time的平均值
    
    参数:
        summary_path (str): summary.csv文件路径
        end_dir (str): 结果文件输出目录
    """
    try:
        # 确保输出目录存在
        os.makedirs(end_dir, exist_ok=True)
        
        # 读取summary.csv文件
        df = pd.read_csv(summary_path)
        
        # 检查数据是否为空
        if df.empty:
            print("警告: summary.csv文件为空，无法进行分析。")
            return
        
        # 定义分组列
        group_columns = ['subfolder', 'node', 'proc', 'cn', 'iteration', 'comm_type', 'total_size']
        
        # 按指定列分组并计算平均值
        grouped = df.groupby(group_columns)['avg_comm_time'].mean().reset_index()
        
        # # 重命名结果列
        # grouped.rename(columns={'avg_comm_time': 'avg_total_real_comm_time'}, inplace=True)
        
        # 保存结果
        output_path = os.path.join(end_dir, 'program_startup_cost.csv')
        grouped.to_csv(output_path, index=False)
        print(f"分析结果已保存至: {output_path}")
        print(f"分组统计记录数: {len(grouped)}")
        
        # 返回结果DataFrame（可选）
        return grouped
        
    except Exception as e:
        print(f"分析过程中出错: {str(e)}")
        import traceback
        traceback.print_exc()
def analyze_startup_cost(input_path, output_dir):
    """
    分析 program_startup_cost.csv，按指定字段分组并计算统计量
    参数:
        input_path (str): 输入文件路径
        output_dir (str): 输出目录
    """
    try:
        # 读取数据
        df = pd.read_csv(input_path)
        
        if df.empty:
            print("警告：输入文件为空，无法进行分析。")
            return
        
        # 按 node, proc, iteration, comm_type 分组
        grouped = df.groupby(['node', 'proc', 'iteration', 'comm_type'])['avg_comm_time'] \
                   .agg(sum='sum') \
                   .reset_index()
        
        # 重命名列（可选）
        grouped.columns = [
            'node', 'proc', 'iteration', 'comm_type',
            'sum_avg_comm_time'
        ]
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        output_path = os.path.join(output_dir, 'node_proc_iteration_comm_type_stats.csv')
        
        # 保存结果
        grouped.to_csv(output_path, index=False)
        print(f"分析结果已保存至: {output_path}")
        print(f"分组统计记录数: {len(grouped)}")
        
        return grouped
    
    except Exception as e:
        print(f"分析过程中出错: {str(e)}")
        import traceback
        traceback.print_exc()

import pandas as pd


# 从最终的CSV文件中读取数据
def read_program_startup_data(csv_path):
    """
    从program_startup_code.csv读取数据并构建查询映射
    
    参数:
        csv_path (str): CSV文件路径
        
    返回:
        tuple: (包含原始数据的DataFrame, 查询映射字典)
    """
    # 读取CSV文件
    df = pd.read_csv(csv_path)
    
    # 创建查询映射字典
    time_map = {}
    for _, row in df.iterrows():
        # 构建唯一键：元组(node, proc, iteration, comm_type, total_size)
        key = (
            int(row['node']),
            int(row['proc']),
            int(row['iteration']),
            int(row['comm_type']),
            float(row['total_size'])  # 保持float类型以匹配可能的浮点数值
        )
        # 存储对应的通信时间
        time_map[key] = float(row['avg_comm_time'])
    
    return time_map


if __name__ == '__main__':
    # 原始路径设置
    # base_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_data\analysis_new1-16nodes_data\analysis_for_different_iteration\4node"
    # target_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_data\analysis_new1-16nodes_data\analysis_for_different_iteration\all_predict_precision"
    
    base_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_for_sendrecv\analysis_1-16nodes_data\16node"
    target_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_for_sendrecv\analysis_1-16nodes_data\16node\get_program_startup_cost"

    # 指定新结果的输出目录（可与target_dir相同）
    end_dir = target_dir  # 或指定新目录
    
    # 步骤1: 处理数据生成summary.csv
    process_data(base_dir, target_dir)
    
    # 步骤2: 分析summary.csv生成新结果
    summary_path = os.path.join(target_dir, 'summary.csv')
    analyze_summary(summary_path, end_dir)

    # 步骤3: 分析新结果生成统计结果
    input_path = os.path.join(end_dir, 'program_startup_cost.csv')
    analyze_startup_cost(input_path, end_dir)