from pickle import NONE
from webbrowser import get
# 修复相对导入路径
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from process import process_log_file, process_folder, delete_processed_files,process_folders
from get_total_size_count import get_total_size_count
from get_clop_o_and_l_from_excel import read_excel_c_and_l_data

import pandas as pd
import matplotlib.pyplot as plt
import glob
import numpy as np
import re  # 模式匹配

from get_program_startup_cost import analyze_startup_cost, analyze_summary, process_data,read_program_startup_data
from extra_params_form_csv_name import extract_params_from_filename
import shared_vars

# 导入ML预测器
from utils.ml_predictor import predict_ml_comm_time, get_ml_predictor

# 获取c-lop-prediction结果
def get_C_LOP_Prediction_results(comm_size,comm_type):

    # 注释掉，考虑全部的通信
    # if(comm_type == 55):
    #     return 0

    # lammps 模型参数-> 注意单位
    O = 1.076268
    L = 0.328143 * 0.001

    return O + L * comm_size

# 获取ML预测结果（替代C-LOP预测，用于comm_type == 51）
def get_ML_Prediction_results(comm_size, comm_type):
    """
    使用机器学习模型预测通信延迟
    对于comm_type == 51，使用4种通信类型的平均预测值
    """
    try:
        # 使用4种通信类型的平均预测值
        return predict_ml_comm_time(comm_size, comm_type=None)
    except Exception as e:
        print(f"ML预测出错，使用备用C-LOP预测: {e}")
        # 备用方案：使用原始C-LOP预测
        O = 1.076268
        L = 0.328143 * 0.001
        return O + L * comm_size

def get_C_LOP_Prediction_results(comm_size,comm_type,O,L):

    # 注释掉，考虑全部的通信
    # if(comm_type == 55):
    #     return 0

    # lammps 模型参数-> 注意单位
    # O = 1.076268
    L = L * 0.001

    return O + L * comm_size

def get_non_block_prediction(comm_size,O,L):

    L = L * 0.001

    return O + L * comm_size


def get_ML_Prediciton_results_for_single_time(file,row,comm_size,comm_type,O,L,time_map):
    """ML预测单次结果（替代原C-LOP单次预测）"""
    # if comm_type == 51 and row['count'] > 1:
    if comm_type == 51:
        # 使用ML模型预测，而不是C-LOP
        return get_ML_Prediction_results(row['total_size'], row['comm_type'])

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")

    if not time_map.get(key):
        return 0
    return time_map.get(key)

# 保留原有的C-LOP函数以兼容其他通信类型
def get_C_LOP_Prediciton_results_for_single_time(file,row,comm_size,comm_type,O,L,time_map):
    # if comm_type == 51 and row['count'] > 1:
    if comm_type == 51:
        return get_C_LOP_Prediction_results(row['total_size'],row['comm_type'],O,L)

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")

    if not time_map.get(key):
        return 0
    return time_map.get(key)
def get_ML_Prediction_results_And_Startup_Cost(file,row,comm_size,comm_type,O,L,time_map):
    """ML预测结果加启动成本（替代原C-LOP预测）"""
    # if comm_type == 51 and row['count'] > 1:
    if comm_type == 51:
        # 使用ML模型预测，而不是C-LOP
        return get_ML_Prediction_results(row['total_size'], row['comm_type']) * row['count']

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key)

# 保留原有的C-LOP函数以兼容其他通信类型
def get_C_LOP_Prediction_results_And_Startup_Cost(file,row,comm_size,comm_type,O,L,time_map):

    if comm_type == 51 and row['count'] > 1:
        return get_C_LOP_Prediction_results(row['total_size'],row['comm_type'],O,L) * row['count']

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key)




def get_prediction_results_for_non_block_for_single_time(file,row,comm_size,comm_type,config_map_for_non_block):
    # 匹配模式：数字+"node-" + 数字+"proc"
    match = re.search(r'(\d+node-\d+proc)', file)
    if not match:
        print(f"{file}中没有找到匹配模式 xnode-yproc")
        return None
    xnode_xproc = match.group(1)
    # 获取配置信息
    config_info = config_map_for_non_block.get(xnode_xproc)
    if not config_info:
        print(f"配置映射中未找到 {xnode_xproc} 的配置")
    O = config_info['o']
    L = config_info['l']
    print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")
    return get_non_block_prediction(comm_size,O,L)

def get_prediction_results_for_non_block(file,row,comm_size,comm_type,config_map_for_non_block):
    # 匹配模式：数字+"node-" + 数字+"proc"
    match = re.search(r'(\d+node-\d+proc)', file)
    if not match:
        print(f"{file}中没有找到匹配模式 xnode-yproc")
        return None
    xnode_xproc = match.group(1)
    # 获取配置信息
    config_info = config_map_for_non_block.get(xnode_xproc)
    if not config_info:
        print(f"配置映射中未找到 {xnode_xproc} 的配置")
    O = config_info['o']
    L = config_info['l']
    print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")
    return get_non_block_prediction(comm_size,O,L) * row['count']

def get_prediction_results_for_single_time_from_time_map(file,row,comm_size,comm_type,time_map):   
    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key)

def get_prediction_results_from_time_map(file,row,comm_size,comm_type,time_map):
    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'),params.get('proc'),params.get('iteration'),int(comm_type),comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key) * row['count']

# 根据comm_size 和 count 以及设置 好的 C-lop 参数，计算C-lop的预测结果
def add_c_lop_prediction_column_to_csv(results_dir):
    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"正在处理文件: {file}")
                df = pd.read_csv(file)
                
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue
                
                # 对每一行单独计算c_lop_prediction
                df['c_lop_prediction'] = df.apply(
                    lambda row: get_C_LOP_Prediction_results(row['total_size'],row['comm_type']) * row['count'], 
                    axis=1
                )
                
                # 计算c_lop_prediction的总和
                total_prediction = df['c_lop_prediction'].sum()
                
                # 创建新行，包含总和信息
                new_row = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction'],
                    'count': [''],
                    'c_lop_prediction': [total_prediction]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'c_lop_prediction']].head())
                print("\n最后一行（总和）：")
                print(df[['total_size', 'count', 'c_lop_prediction']].iloc[-1])
                
                # 保存文件
                output_file = file
                df.to_csv(output_file, index=False)
                print(f"已成功添加 c_lop_prediction 列到文件: {file}")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None



# 根据comm_size 和 count 以及设置 好的 C-lop 参数，计算C-lop的预测结果
# 对于以下几种情况，使用time_map方式获取数据
# 1. comm_type = 50
# 2. comm_type = 51 and avg_appearance_time = 1
# 3. comm_type = 55 -> 后续可以使用机器学习进行优化
def add_c_lop_prediction_column_to_csv_use_time_map(results_dir,config_map,time_map):

    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"====================================正在处理文件==================================\n {file}\n")

                # 匹配模式：数字+"node-" + 数字+"proc"
                match = re.search(r'(\d+node-\d+proc)', file)
                if not match:
                    print(f"{file}中没有找到匹配模式 xnode-yproc")
                    return None
                xnode_xproc = match.group(1)
                # 获取配置信息
                config_info = config_map.get(xnode_xproc)
                if not config_info:
                    print(f"配置映射中未找到 {xnode_xproc} 的配置")
                    continue
                O = config_info['o']
                L = config_info['l']
                print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")


                df = pd.read_csv(file)
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue


                # 对每一行单独计算单次ML预测（comm_type == 51使用ML，其他使用原方法）
                df['c_lop_prediction_for_one_time'] = df.apply(
                    lambda row: get_ML_Prediciton_results_for_single_time(file,row,row['total_size'],row['comm_type'],O,L,time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_for_single_time_from_time_map(file,row,row['total_size'],row['comm_type'],time_map),
                    axis=1
                )
                
                # 对每一行单独计算ML预测（comm_type == 51使用ML，其他使用原方法）
                df['c_lop_prediction_for_total_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_And_Startup_Cost(file,row,row['total_size'],row['comm_type'],O,L,time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_from_time_map(file,row,row['total_size'],row['comm_type'],time_map), 
                    axis=1
                )

                # 根据不同消息大小计算出不同情况下的误差
                df['precision_error'] = df.apply(
                    lambda row: abs(row['total_real_comm_time'] - row['c_lop_prediction_for_total_time']) / row['total_real_comm_time'] * 100, 
                    axis=1
                )
                # 计算中位数误差
                df['median_error'] = df.apply(    
                    lambda row: abs(row['median_comm_time']-row['c_lop_prediction_for_total_time'])/row['median_comm_time'] * 100,
                    axis=1
                )
                
                # 计算c_lop_prediction的总和
                total_prediction = df['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time = df['total_real_comm_time'].sum()
                total_median_comm_time = df['median_comm_time'].sum()

                total_real_comm_time_for_comm_type_50 = df[df['comm_type'] == 50]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_50 = df[df['comm_type'] == 50]['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_51 = df[df['comm_type'] == 51]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51 = df[df['comm_type'] == 51]['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_55 = df[df['comm_type'] == 55]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_55 = df[df['comm_type'] == 55]['c_lop_prediction_for_total_time'].sum()
                
                # 计算各个类型下的median_comm_time
                total_median_time_for_comm_type_50 = df[df['comm_type'] == 50]['median_comm_time'].sum()
                total_median_time_for_comm_type_51 = df[df['comm_type'] == 51]['median_comm_time'].sum()
                total_median_time_for_comm_type_55 = df[df['comm_type'] == 55]['median_comm_time'].sum()

                #统计 comm_type == 51 并且 count > 1 的总和
                total_median_time_for_comm_type_51_count_gt_1 = df[(df['comm_type'] == 51) & (df['count'] > 1)]['median_comm_time'].sum()
                total_real_comm_time_for_comm_type_51_count_gt_1 = df[(df['comm_type'] == 51) & (df['count'] > 1)]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51_count_gt_1 = df[(df['comm_type'] == 51) & (df['count'] > 1)]['c_lop_prediction_for_total_time'].sum()
                

                # 创建新行，包含总和信息
                new_row_comm_type_50 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_50'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_50],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_50],
                    'median_comm_time':[total_median_time_for_comm_type_50],
                    'percentage': [total_real_comm_time_for_comm_type_50 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_50-total_real_comm_time_for_comm_type_50) / total_real_comm_time_for_comm_type_50 * 100]
                    
                })
                new_row_comm_type_51 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_51'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_51],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_51],
                    'median_comm_time':[total_median_time_for_comm_type_51],
                    'percentage': [total_real_comm_time_for_comm_type_51 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_51-total_real_comm_time_for_comm_type_51) / total_real_comm_time_for_comm_type_51 * 100],
                    'median_error': [abs(total_median_time_for_comm_type_51-total_prediction_for_comm_type_51)/total_median_time_for_comm_type_51 * 100]
                })

                new_row_comm_type_51_count_gt_1 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_51_count_gt_1'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_51_count_gt_1],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_51_count_gt_1],
                    'median_comm_time':[total_median_time_for_comm_type_51_count_gt_1],
                    'percentage': [total_real_comm_time_for_comm_type_51_count_gt_1 / total_real_comm_time * 100],
                    'precision_error': [abs(total_real_comm_time_for_comm_type_51_count_gt_1-total_prediction_for_comm_type_51_count_gt_1) / total_real_comm_time_for_comm_type_51_count_gt_1 * 100],
                    'median_error': [abs(total_median_time_for_comm_type_51_count_gt_1-total_prediction_for_comm_type_51_count_gt_1)/total_median_time_for_comm_type_51_count_gt_1 * 100]
                })
                new_row_comm_type_55 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_55'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_55],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_55],
                    'median_comm_time':[total_median_time_for_comm_type_55],
                    'percentage': [total_real_comm_time_for_comm_type_55 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_55-total_real_comm_time_for_comm_type_55) / total_real_comm_time_for_comm_type_55 * 100]
                })

                new_row_total = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction],
                    'total_real_comm_time':[total_real_comm_time],
                    'median_comm_time':[total_median_comm_time],
                    'precision_error': [abs(total_prediction-total_real_comm_time) / total_real_comm_time * 100]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row_comm_type_50], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_51], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_51_count_gt_1], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_55], ignore_index=True)
                df = pd.concat([df, new_row_total], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'c_lop_prediction_for_total_time']].head())
                # print("\n最后一行（总和）：")
                # print(df[['total_size', 'count', 'c_lop_prediction']].iloc[-1])
                
                # 保存文件
                output_file = file
                df.to_csv(output_file, index=False)
                print(f"\n已成功添加 c_lop_prediction 列到文件\n")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None



# 根据comm_size 和 count 以及设置 好的 C-lop 参数，计算C-lop的预测结果
def add_c_lop_prediction_column_to_csv(results_dir,config_map,config_map_for_non_block,time_map):

    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"====================================正在处理文件==================================\n {file}\n")

                # 匹配模式：数字+"node-" + 数字+"proc"
                match = re.search(r'(\d+node-\d+proc)', file)
                if not match:
                    print(f"{file}中没有找到匹配模式 xnode-yproc")
                    return None
                xnode_xproc = match.group(1)
                # 获取配置信息
                config_info = config_map.get(xnode_xproc)
                if not config_info:
                    print(f"配置映射中未找到 {xnode_xproc} 的配置")
                    continue
                O = config_info['o']
                L = config_info['l']
                print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")


                df = pd.read_csv(file)
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue


                # 对每一行单独计算单次ML预测（comm_type == 51使用ML，其他使用原方法）
                df['c_lop_prediction_for_one_time'] = df.apply(
                    lambda row: get_ML_Prediciton_results_for_single_time(file,row,row['total_size'],row['comm_type'],O,L,time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_for_single_time_from_time_map(file,row,row['total_size'],row['comm_type'],time_map),
                    axis=1
                )
                
                # 对每一行单独计算ML预测（comm_type == 51使用ML，其他使用原方法）
                df['c_lop_prediction_for_total_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_And_Startup_Cost(file,row,row['total_size'],row['comm_type'],O,L,time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_from_time_map(file,row,row['total_size'],row['comm_type'],time_map), 
                    axis=1
                )

                # 根据不同消息大小计算出不同情况下的误差
                df['precision_error'] = df.apply(
                    lambda row: abs(row['total_real_comm_time'] - row['c_lop_prediction_for_total_time']) / row['total_real_comm_time'] * 100, 
                    axis=1
                )

                
                # 计算c_lop_prediction的总和
                total_prediction = df['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time = df['total_real_comm_time'].sum()

                total_real_comm_time_for_comm_type_50 = df[df['comm_type'] == 50]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_50 = df[df['comm_type'] == 50]['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_51 = df[df['comm_type'] == 51]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51 = df[df['comm_type'] == 51]['c_lop_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_55 = df[df['comm_type'] == 55]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_55 = df[df['comm_type'] == 55]['c_lop_prediction_for_total_time'].sum()

                # 计算median误差
                total_median_time_for_comm_type_50 = df[df['comm_time'] == 50]['median_comm_time'].sum()
                total_median_time_for_comm_type_51 = df[df['comm_time'] == 51]['median_comm_time'].sum()
                total_median_time_for_comm_type_55 = df[df['comm_time'] == 55]['median_comm_time'].sum()

                #统计 count > 1 的次数
                total_median_time_for_comm_type_51_count_gt_1 = df[(df['comm_time'] == 51) & (df['count'] > 1)]['median_comm_time'].sum()
                total_real_comm_time_for_comm_type_51_count_gt_1 = df[(df['comm_time'] == 51) & (df['count'] > 1)]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51_count_gt_1 = df[(df['comm_time'] == 51) & (df['count'] > 1)]['c_lop_prediction_for_total_time'].sum()
                
                # 创建新行，包含总和信息
                new_row_comm_type_50 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_50'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_50],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_50],
                    'median_comm_time':[total_median_time_for_comm_type_50],
                    'percentage': [total_real_comm_time_for_comm_type_50 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_50-total_real_comm_time_for_comm_type_50) / total_real_comm_time_for_comm_type_50 * 100]
                })
                new_row_comm_type_51 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_51'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_51],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_51],
                    'median_comm_time':[total_median_time_for_comm_type_51],
                    'percentage': [total_real_comm_time_for_comm_type_51 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_51-total_real_comm_time_for_comm_type_51) / total_real_comm_time_for_comm_type_51 * 100],
                    'median_error': [abs(total_prediction_for_comm_type_51-total_median_time_for_comm_type_51) / total_median_time_for_comm_type_51 * 100],
                })
                new_row_comm_type_51_count_gt_1 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_51_gt_1'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_51_count_gt_1],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_51_count_gt_1],
                    'median_comm_time':[total_median_time_for_comm_type_51],
                    'percentage': [abs(total_real_comm_time_for_comm_type_51_count_gt_1-total_prediction_for_comm_type_51_count_gt_1) / total_real_comm_time_for_comm_type_51_count_gt_1 * 100],
                    'median_error': [abs(total_real_comm_time_for_comm_type_51_count_gt_1-total_median_time_for_comm_type_51_count_gt_1) / total_real_comm_time_for_comm_type_51 * 100]
                })
                new_row_comm_type_55 = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction_comm_type_55'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction_for_comm_type_55],
                    'total_real_comm_time':[total_real_comm_time_for_comm_type_55],
                    'median_comm_time':[total_median_time_for_comm_type_55],
                    'percentage': [total_real_comm_time_for_comm_type_55 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_55-total_real_comm_time_for_comm_type_55) / total_real_comm_time_for_comm_type_55 * 100]
                })

                new_row_total = pd.DataFrame({
                    'comm_type': ['Total_C-lop_prediction'],
                    'count': [''],
                    'c_lop_prediction_for_total_time': [total_prediction],
                    'total_real_comm_time':[total_real_comm_time],
                    'precision_error': [abs(total_prediction-total_real_comm_time) / total_real_comm_time * 100]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row_comm_type_50], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_51], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_55], ignore_index=True)
                df = pd.concat([df, new_row_total], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'c_lop_prediction_for_total_time']].head())
                # print("\n最后一行（总和）：")
                # print(df[['total_size', 'count', 'c_lop_prediction']].iloc[-1])
                
                # 保存文件
                output_file = file
                df.to_csv(output_file, index=False)
                print(f"\n已成功添加 c_lop_prediction 列到文件\n")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None

# 返回一个map，key为配置名称，value为对应的o和l值.
# value 也是一个map，key为o和l，value为对应的值
# key: 1node-8proc
# value: {'o':0.1724,'l':0.6829}
def c_lop_prediction_IN_all_kinds_of_o_l(config_path):

    df,config_map = read_excel_c_and_l_data(config_path)

    return config_map
   


if __name__ == "__main__":


    


    # folders = shared_vars.cross_cabinet_2node
    folders = shared_vars.weak_scaling_folder_100config_folder_node128

    # 创建保存结果的目录
    # 保存的是根据p/ocessed.csv文件 生成的统计comm_type、count 的结果
    # 之后会根据这个目录使用C-lop-Prediction 进行预测
    # results_dir = "F:/PostGraduate/Point-to-Point-DATA/deal-data-code/C-lop-Prediction/analysis_results_for_all_condition"
    results_dir = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\analysis_weak_scaling\analysis_for_100atom_per_proc\128node"






    # 1. 处理指定目录下的csv文件，生成新的processed文件，添加有total_size 和 appearance_time 列

    # # root_path = r"F:/PostGraduate/Point-to-Point-DATA/tiny-nodes-by-hzpProfile"
    # root_path = ""
    # # 遍历根目录下的所有子文件夹
    # if root_path is not None and os.path.exists(root_path):
    #     for root, dirs, files in os.walk(root_path):
    #         # 如果当前文件夹包含CSV文件，则处理该文件夹
    #         if any(file.endswith('.csv') for file in files):
    #             print(f"\n正在处理文件夹: {root}")
    #             process_folder(root)

    print("=====================================1.process_folders===================================")
    process_folders(folders)
    
    # 2. 根据处理后的文件，统计获取 total_size 和 对应的 appearance_time
    print("====================================2.get_total_size_count================================")
    get_total_size_count(folders, results_dir)

    # 3. 先获得o_l参数: 通过config_map
    print("==================================3.get_config_map===================================")
    clop_excel_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\dataset.xlsx"
    non_block_clop_excel_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\non_block_dataset.xlsx"
    config_map = c_lop_prediction_IN_all_kinds_of_o_l(clop_excel_path)
    print("\n解析后的配置映射:")
    for config, values in config_map.items():
        print(f"配置 {config}: o={values['o']}, l={values['l']}")

    config_map_for_non_block = c_lop_prediction_IN_all_kinds_of_o_l(non_block_clop_excel_path)
    print("\n解析后的非阻塞配置映射:")
    for config, values in config_map_for_non_block.items():
        print(f"配置 {config}: o={values['o']}, l={values['l']}")


    # 4. 获取程序启动开销表,主要包含以下内容: 
    # 1. comm_type = 50
    # 2. comm_type = 51 and mean_appearance_time = 1
    # 3. comm_type = 55  -> 后续可以使用机器学习的方式建立模型进行预测
    print("====================================4.get_time_map================================")
    grogram_csv_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\program_startup_cost.csv"
    time_map = read_program_startup_data(grogram_csv_path)
    print("\n程序启动开销表:")
    for key, value in time_map.items():
        print(f"{key}: {value}")

    # 5. 根据已经生成csv文件，添加新的列  c_lop_prediction
    print("==================================5.add_c_lop_prediction_column_to_csv================================")
    # 只考虑阻塞通信
    # add_c_lop_prediction_column_to_csv(results_dir,config_map,NONE,time_map)
    # 考虑非阻塞通信
    # add_c_lop_prediction_column_to_csv(results_dir,config_map,config_map_for_non_block,time_map)

    # 使用time_map的方式获取非阻塞 以及 comm_type = 50 的时间
    add_c_lop_prediction_column_to_csv_use_time_map(results_dir,config_map,time_map)

