import os
import pandas as pd

def aggregate_comm_data(base_dir, target_dir):
    """
    Aggregates communication data for comm_type 51 from CSV files
    in a directory and its subdirectories.

    Args:
        base_dir (str): The base directory to search for CSV files.
        target_dir (str): The directory to save the aggregated data.
    """
    all_comm_data = []
    processed_files = 0
    files_with_comm_51 = 0
    
    print(f"开始搜索目录: {base_dir}")
    
    # Recursively walk through the directory
    for root, _, files in os.walk(base_dir):
        print(f"正在处理目录: {root}")
        for file in files:
            if file.endswith('.csv'):
                file_path = os.path.join(root, file)
                processed_files += 1
                print(f"处理文件 {processed_files}: {file}")
                
                try:
                    # Read the CSV file
                    df = pd.read_csv(file_path)
                    print(f"  - 文件行数: {len(df)}")
                    print(f"  - 列名: {list(df.columns)}")
                    
                    # Check if required columns exist
                    required_cols = ['comm_type', 'total_size', 'avg_comm_time']
                    if not all(col in df.columns for col in required_cols):
                        print(f"  - 跳过文件: 缺少必需的列")
                        continue

                    # Check unique comm_type values
                    unique_comm_types = df['comm_type'].unique()
                    print(f"  - comm_type 唯一值: {unique_comm_types}")
                    
                    # Filter for comm_type == 51
                    comm_51_df = df[df['comm_type'] == '51'].copy()
                    print(f"  - comm_type=51 的行数: {len(comm_51_df)}")

                    # Append the selected columns to our list
                    if not comm_51_df.empty:
                        files_with_comm_51 += 1
                        all_comm_data.append(comm_51_df[required_cols])
                        print(f"  - 已添加 {len(comm_51_df)} 行数据")

                except pd.errors.EmptyDataError:
                    print(f"  - 跳过空文件: {file}")
                    continue
                except Exception as e:
                    print(f"  - 处理文件出错 {file_path}: {e}")

    print(f"总结:")
    print(f"处理的CSV文件总数: {processed_files}")
    print(f"包含comm_type=51数据的文件数: {files_with_comm_51}")
    print(f"收集到的数据块数: {len(all_comm_data)}")

    if not all_comm_data:
        print("No data found for comm_type 51.")
        return

    # Concatenate all dataframes into a single one
    final_df = pd.concat(all_comm_data, ignore_index=True)

    # Ensure the target directory exists
    os.makedirs(target_dir, exist_ok=True)

    # Define the output file path
    output_filename = 'lammps_51_real_comm_time.csv'
    output_path = os.path.join(target_dir, output_filename)

    # Save the final dataframe to a new CSV file
    final_df.to_csv(output_path, index=False)
    print(f"Successfully aggregated data and saved to {output_path}")
    print(f"Total records found: {len(final_df)}")


if __name__ == '__main__':
    # 设置输入和输出目录
    base_dir = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\code\ML_Predict\results_with_cache\weak_scaling\500config\1-128node"
    target_dir = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\code\ML_Predict\utils\get_lammps_51_real_comm_time_result"
    
    # 执行数据聚合
    aggregate_comm_data(base_dir, target_dir)
