import pandas as pd
import os


# 删除指定文件夹下所有 _processed.csv 结尾的文件
def delete_processed_files(folder_path):
    """删除指定文件夹下所有 _processed.csv 结尾的文件"""
    for file in os.listdir(folder_path):
        if file.endswith('_processed.csv'):
            file_path = os.path.join(folder_path, file)
            try:
                os.remove(file_path)
                print(f"已删除文件: {file_path}")
            except Exception as e:
                print(f"删除文件 {file_path} 时出错: {str(e)}")

# 处理lammps_cu生成的数据文件，首先进行一些基本的处理，生成processed.csv文件
# 处理后的结果： 
# 1.comm_type 只有点对点通信 
# 2.会计算相应的total_size = send_size * send_count 
# 3.统计相同comm_type下total_size情况下的出现次数
def process_log_file(file_path):
    try:
        # 读取CSV文件
        df = pd.read_csv(file_path)
        
        # 验证必要的列是否存在
        required_columns = ['sendsize', 'sendcount', 'comm_type']
        if not all(col in df.columns for col in required_columns):
            raise ValueError("CSV文件缺少必要的列")
        
        # 先过滤comm_type
        filtered_df = df[(df['comm_type'] >= 50) & (df['comm_type'] <= 57)]
        # 先只考虑51
        # filtered_df = df[df['comm_type'] == 51]
        
        

        # 添加total_size列
        filtered_df['total_size'] = filtered_df['sendsize'] * filtered_df['sendcount']
        
        # 按comm_type分组计算每个total_size值出现的次数
        filtered_df['appearance_time'] = filtered_df.groupby(['comm_type', 'total_size'])['total_size'].transform('count')


        # 先只考虑出现次数大于1的
        # filtered_df = filtered_df[filtered_df['appearance_time'] > 1]

        
        # 生成输出文件名
        output_path = file_path.replace('.csv', '_processed.csv')
        
        # 保存处理后的数据
        filtered_df.to_csv(output_path, index=False)
        
        return output_path
    except pd.errors.EmptyDataError:
        print(f"错误：文件 {file_path} 为空")
        return None
    except pd.errors.ParserError:
        print(f"错误：文件 {file_path} 格式不正确")
        return None
    except Exception as e:
        print(f"处理文件 {file_path} 时发生错误: {str(e)}")
        return None


# 遍历文件夹下面的所有文件
# 1. 首先删除之前生成的 _processed.csv 文件 
# 2. 对所有文件进行处理
def process_folder(folder_path):
    """处理指定文件夹下的所有CSV文件"""
    if not os.path.exists(folder_path):
        print(f"错误：路径 {folder_path} 不存在")
        return
    
    # 首先删除所有已处理的文件
    delete_processed_files(folder_path)
    
    # 处理所有CSV文件
    for file in os.listdir(folder_path):
        # 先处理log-0.csv
        # if file.endswith('.csv') and not file.endswith('_processed.csv'):
        if file.endswith('log-0.csv') and not file.endswith('_processed.csv'):
            file_path = os.path.join(folder_path, file)
            output_path = process_log_file(file_path)
            if output_path:
                print(f"处理完成: {file}")
                print(f"输出文件: {output_path}")
# 处理一个集合里面的所有 文件夹
def process_folders(folders):
    for folder in folders:
        process_folder(folder)

if __name__ == "__main__":
    # 这里可以指定要处理的文件夹路径
    # folder_path = r"f:\PostGraduate\Point-to-Point-DATA\tiny-nodes-by-hzpProfile\3439153-1node-8proc-0-w-50"
    # folder_path = r"f:\PostGraduate\Point-to-Point-DATA\tiny-nodes-by-hzpProfile\3458116-2node-32proc-0-w-100"
    # process_folder(folder_path)

    # 指定根目录
    # root_path = r"f:\PostGraduate\Point-to-Point-DATA\small-nodes"
    # root_path = r"F:/PostGraduate/Point-to-Point-DATA\big-nodes/LAMMPS"
    root_path = r"F:/PostGraduate/Point-to-Point-DATA\small-nodes/3493299-2node-32proc-0-w-10"
    
    
    # 遍历根目录下的所有子文件夹
    for root, dirs, files in os.walk(root_path):
        # 如果当前文件夹包含CSV文件，则处理该文件夹
        # if any(file.endswith('.csv') for file in files):

        # 目前先处理log-0.csv
        if any(file.endswith('log-0.csv') for file in files):
            print(f"\n正在处理文件夹: {root}")
            process_folder(root)