from pickle import NONE
from webbrowser import get
import sys
import os

# 添加父目录到路径以便导入模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 修复模块导入路径
from process import process_log_file, process_folder, delete_processed_files, process_folders
from get_total_size_count import get_total_size_count
from get_clop_o_and_l_from_excel import read_excel_c_and_l_data

import pandas as pd
import matplotlib.pyplot as plt
import glob
import numpy as np
import re  # 模式匹配
import joblib
from pathlib import Path

from get_program_startup_cost import analyze_startup_cost, analyze_summary, process_data, read_program_startup_data
from extra_params_form_csv_name import extract_params_from_filename
import shared_vars

# 机器学习模型相关导入
try:
    # 尝试导入机器学习相关模块
    from sklearn.ensemble import RandomForestRegressor
    ML_AVAILABLE = True
except ImportError:
    print("警告: 未找到scikit-learn，将使用备用预测方法")
    ML_AVAILABLE = False

class MLPredictor:
    """机器学习预测器类"""
    
    def __init__(self, model_path=None):
        self.model = None
        self.model_loaded = False
        
        if model_path and os.path.exists(model_path):
            try:
                model_data = joblib.load(model_path)
                # 检查模型数据格式
                if isinstance(model_data, dict) and 'model' in model_data:
                    self.model = model_data['model']
                    self.feature_names = model_data.get('feature_names', None)
                    self.config = model_data.get('config', None)
                else:
                    self.model = model_data
                self.model_loaded = True
                print(f"成功加载机器学习模型: {model_path}")
            except Exception as e:
                print(f"加载模型失败: {e}")
                self.model_loaded = False
        else:
            # 尝试从MachineLearning_common加载模型
            ml_model_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))), 
                                       "MachineLearning_common", "models", "mpi_latency_model.pkl")
            if os.path.exists(ml_model_path):
                try:
                    model_data = joblib.load(ml_model_path)
                    # 检查模型数据格式
                    if isinstance(model_data, dict) and 'model' in model_data:
                        self.model = model_data['model']
                        self.feature_names = model_data.get('feature_names', None)
                        self.config = model_data.get('config', None)
                    else:
                        self.model = model_data
                    self.model_loaded = True
                    print(f"成功加载机器学习模型: {ml_model_path}")
                except Exception as e:
                    print(f"加载模型失败: {e}")
                    self.model_loaded = False
            else:
                print(f"未找到模型文件: {ml_model_path}")
                self.model_loaded = False
    
    def predict_single(self, comm_type, message_size):
        """单次预测"""
        if not self.model_loaded or not ML_AVAILABLE:
            # 备用预测方法：使用简单的线性模型
            return self._fallback_prediction(message_size)
        
        try:
            # 特征工程：与训练时保持一致
            log_message_size = np.log(1 + message_size)
            
            # 消息大小分类
            if message_size <= 1024:
                size_category = 0  # small
            elif message_size <= 16384:
                size_category = 1  # medium
            elif message_size <= 524288:
                size_category = 2  # large
            else:
                size_category = 3  # xlarge
            
            # 构造特征向量 - 使用DataFrame以保持特征名称
            import pandas as pd
            feature_names = ['type', 'message_size', 'log_message_size', 'size_category_encoded']
            features_df = pd.DataFrame([[comm_type, message_size, log_message_size, size_category]], 
                                     columns=feature_names)
            
            # 预测
            prediction = self.model.predict(features_df)[0]
            return max(0, prediction)  # 确保预测值非负
            
        except Exception as e:
            print(f"ML预测失败: {e}，使用备用方法")
            return self._fallback_prediction(message_size)
    
    def _fallback_prediction(self, message_size):
        """备用预测方法：简单线性模型"""
        # 基于经验的简单模型
        O = 1.076268
        L = 0.328143 * 0.001
        return O + L * message_size

# 全局ML预测器实例
ml_predictor = MLPredictor()

# 获取ML-prediction结果
def get_ML_Prediction_results(comm_size, comm_type):
    """
    使用机器学习模型预测通信延迟
    计算4种通信类型的平均值（当前策略）
    """
    if not ML_AVAILABLE or not ml_predictor.model_loaded:
        # 备用方法：使用原始C-LOP模型
        O = 1.076268
        L = 0.328143 * 0.001
        return O + L * comm_size
    
    # 计算4种通信类型的平均预测值
    predictions = []
    for type_id in [1, 2, 3, 4]:
        pred = ml_predictor.predict_single(type_id, comm_size)
        predictions.append(pred)
    
    return np.mean(predictions)

def get_ML_Prediction_results_with_params(comm_size, comm_type, O, L):
    """
    使用机器学习模型预测通信延迟（保持原接口兼容性）
    注意：O和L参数在ML模型中不使用，但保留接口兼容性
    """
    return get_ML_Prediction_results(comm_size, comm_type)

def get_non_block_prediction(comm_size, O, L):
    """非阻塞通信预测（保持原逻辑）"""
    L = L * 0.001
    return O + L * comm_size

def get_ML_Prediction_results_for_single_time(file, row, comm_size, comm_type, O, L, time_map):
    """单次ML预测结果（替代原C-LOP单次预测）"""
    if comm_type == 51 and row['count'] > 1:
        return get_ML_Prediction_results_with_params(row['total_size'], row['comm_type'], O, L)

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'), params.get('proc'), params.get('iteration'), int(comm_type), comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")

    if not time_map.get(key):
        return 0
    return time_map.get(key)

def get_ML_Prediction_results_And_Startup_Cost(file, row, comm_size, comm_type, O, L, time_map):
    """ML预测结果加启动成本（替代原C-LOP预测）"""
    if comm_type == 51 and row['count'] > 1:
        return get_ML_Prediction_results_with_params(row['total_size'], row['comm_type'], O, L) * row['count']

    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'), params.get('proc'), params.get('iteration'), int(comm_type), comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key)

def get_prediction_results_for_non_block_for_single_time(file, row, comm_size, comm_type, config_map_for_non_block):
    """非阻塞通信单次预测（保持原逻辑）"""
    # 匹配模式：数字+"node-" + 数字+"proc"
    match = re.search(r'(\d+node-\d+proc)', file)
    if not match:
        print(f"{file}中没有找到匹配模式 xnode-yproc")
        return None
    xnode_xproc = match.group(1)
    # 获取配置信息
    config_info = config_map_for_non_block.get(xnode_xproc)
    if not config_info:
        print(f"配置映射中未找到 {xnode_xproc} 的配置")
    O = config_info['o']
    L = config_info['l']
    print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")
    return get_non_block_prediction(comm_size, O, L)

def get_prediction_results_for_non_block(file, row, comm_size, comm_type, config_map_for_non_block):
    """非阻塞通信预测（保持原逻辑）"""
    # 匹配模式：数字+"node-" + 数字+"proc"
    match = re.search(r'(\d+node-\d+proc)', file)
    if not match:
        print(f"{file}中没有找到匹配模式 xnode-yproc")
        return None
    xnode_xproc = match.group(1)
    # 获取配置信息
    config_info = config_map_for_non_block.get(xnode_xproc)
    if not config_info:
        print(f"配置映射中未找到 {xnode_xproc} 的配置")
    O = config_info['o']
    L = config_info['l']
    print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")
    return get_non_block_prediction(comm_size, O, L) * row['count']

def get_prediction_results_for_single_time_from_time_map(file, row, comm_size, comm_type, time_map):   
    """从时间映射获取单次预测结果（保持原逻辑）"""
    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'), params.get('proc'), params.get('iteration'), int(comm_type), comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key)

def get_prediction_results_from_time_map(file, row, comm_size, comm_type, time_map):
    """从时间映射获取预测结果（保持原逻辑）"""
    params = extract_params_from_filename(file)
    print(f"params:{params}")
    key = (params.get('node'), params.get('proc'), params.get('iteration'), int(comm_type), comm_size)
    print(f"key:{key}  result:{time_map.get(key)}")
    if not time_map.get(key):
        return 0
    return time_map.get(key) * row['count']

# 根据comm_size 和 count 以及ML模型，计算ML预测结果
def add_ml_prediction_column_to_csv(results_dir):
    """添加ML预测列到CSV文件（简化版本）"""
    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"正在处理文件: {file}")
                df = pd.read_csv(file)
                
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue
                
                # 对每一行单独计算ml_prediction
                df['ml_prediction'] = df.apply(
                    lambda row: get_ML_Prediction_results(row['total_size'], row['comm_type']) * row['count'], 
                    axis=1
                )
                
                # 计算ml_prediction的总和
                total_prediction = df['ml_prediction'].sum()
                
                # 创建新行，包含总和信息
                new_row = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction'],
                    'count': [''],
                    'ml_prediction': [total_prediction]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'ml_prediction']].head())
                print("\n最后一行（总和）：")
                print(df[['total_size', 'count', 'ml_prediction']].iloc[-1])
                
                # 保存文件到ML_Predict/results目录
                output_dir = os.path.join(os.path.dirname(__file__), "results")
                os.makedirs(output_dir, exist_ok=True)
                output_file = os.path.join(output_dir, os.path.basename(file))
                df.to_csv(output_file, index=False)
                print(f"已成功添加 ml_prediction 列到文件: {output_file}")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None

# 使用ML模型和time_map的方式获取预测结果
def add_ml_prediction_column_to_csv_use_time_map(results_dir, config_map, time_map):
    """使用ML模型和时间映射添加预测列（替代原C-LOP方法）"""
    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"====================================正在处理文件==================================\n {file}\n")

                # 匹配模式：数字+"node-" + 数字+"proc"
                match = re.search(r'(\d+node-\d+proc)', file)
                if not match:
                    print(f"{file}中没有找到匹配模式 xnode-yproc")
                    return None
                xnode_xproc = match.group(1)
                # 获取配置信息
                config_info = config_map.get(xnode_xproc)
                if not config_info:
                    print(f"配置映射中未找到 {xnode_xproc} 的配置")
                    continue
                O = config_info['o']
                L = config_info['l']
                print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")

                df = pd.read_csv(file)
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue

                # 对每一行单独计算单次ml_prediction
                df['ml_prediction_for_one_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_for_single_time(file, row, row['total_size'], row['comm_type'], O, L, time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_for_single_time_from_time_map(file, row, row['total_size'], row['comm_type'], time_map),
                    axis=1
                )
                
                # 对每一行单独计算ml_prediction
                df['ml_prediction_for_total_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_And_Startup_Cost(file, row, row['total_size'], row['comm_type'], O, L, time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_from_time_map(file, row, row['total_size'], row['comm_type'], time_map), 
                    axis=1
                )

                # 根据不同消息大小计算出不同情况下的误差
                df['precision_error'] = df.apply(
                    lambda row: abs(row['total_real_comm_time'] - row['ml_prediction_for_total_time']) / row['total_real_comm_time'] * 100, 
                    axis=1
                )

                # 计算ml_prediction的总和
                total_prediction = df['ml_prediction_for_total_time'].sum()
                total_real_comm_time = df['total_real_comm_time'].sum()

                total_real_comm_time_for_comm_type_50 = df[df['comm_type'] == 50]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_50 = df[df['comm_type'] == 50]['ml_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_51 = df[df['comm_type'] == 51]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51 = df[df['comm_type'] == 51]['ml_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_55 = df[df['comm_type'] == 55]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_55 = df[df['comm_type'] == 55]['ml_prediction_for_total_time'].sum()
                
                # 创建新行，包含总和信息
                new_row_comm_type_50 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_50'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_50],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_50],
                    'percentage': [total_real_comm_time_for_comm_type_50 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_50-total_real_comm_time_for_comm_type_50) / total_real_comm_time_for_comm_type_50 * 100]
                })
                new_row_comm_type_51 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_51'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_51],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_51],
                    'percentage': [total_real_comm_time_for_comm_type_51 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_51-total_real_comm_time_for_comm_type_51) / total_real_comm_time_for_comm_type_51 * 100]
                })
                new_row_comm_type_55 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_55'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_55],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_55],
                    'percentage': [total_real_comm_time_for_comm_type_55 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_55-total_real_comm_time_for_comm_type_55) / total_real_comm_time_for_comm_type_55 * 100]
                })

                new_row_total = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction],
                    'total_real_comm_time': [total_real_comm_time],
                    'precision_error': [abs(total_prediction-total_real_comm_time) / total_real_comm_time * 100]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row_comm_type_50], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_51], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_55], ignore_index=True)
                df = pd.concat([df, new_row_total], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'ml_prediction_for_total_time']].head())
                
                # 保存文件到ML_Predict/results目录
                output_dir = os.path.join(os.path.dirname(__file__), "results")
                os.makedirs(output_dir, exist_ok=True)
                output_file = os.path.join(output_dir, os.path.basename(file))
                df.to_csv(output_file, index=False)
                print(f"\n已成功添加 ml_prediction 列到文件: {output_file}\n")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None

# 使用ML模型的完整预测函数（替代原C-LOP函数）
def add_ml_prediction_column_to_csv_complete(results_dir, config_map, config_map_for_non_block, time_map):
    """完整的ML预测函数（替代原add_c_lop_prediction_column_to_csv）"""
    try:
        # 获取results_dir目录下所有的csv文件
        csv_files = glob.glob(os.path.join(results_dir, "*.csv"))
        if not csv_files:
            print(f"在目录 {results_dir} 中没有找到CSV文件")
            return None
            
        for file in csv_files:
            try:
                print(f"====================================正在处理文件==================================\n {file}\n")

                # 匹配模式：数字+"node-" + 数字+"proc"
                match = re.search(r'(\d+node-\d+proc)', file)
                if not match:
                    print(f"{file}中没有找到匹配模式 xnode-yproc")
                    return None
                xnode_xproc = match.group(1)
                # 获取配置信息
                config_info = config_map.get(xnode_xproc)
                if not config_info:
                    print(f"配置映射中未找到 {xnode_xproc} 的配置")
                    continue
                O = config_info['o']
                L = config_info['l']
                print(f"运行配置:{xnode_xproc} 对应的O={O}, L={L}")

                df = pd.read_csv(file)
                # 检查必要的列是否存在
                required_columns = ['total_size', 'count']
                if not all(col in df.columns for col in required_columns):
                    print(f"文件 {file} 缺少必要的列: {required_columns}")
                    continue

                # 对每一行单独计算单次ml_prediction
                df['ml_prediction_for_one_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_for_single_time(file, row, row['total_size'], row['comm_type'], O, L, time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_for_single_time_from_time_map(file, row, row['total_size'], row['comm_type'], time_map),
                    axis=1
                )
                
                # 对每一行单独计算ml_prediction
                df['ml_prediction_for_total_time'] = df.apply(
                    lambda row: get_ML_Prediction_results_And_Startup_Cost(file, row, row['total_size'], row['comm_type'], O, L, time_map)
                        if row['comm_type'] == 51
                        else get_prediction_results_from_time_map(file, row, row['total_size'], row['comm_type'], time_map), 
                    axis=1
                )

                # 根据不同消息大小计算出不同情况下的误差
                df['precision_error'] = df.apply(
                    lambda row: abs(row['total_real_comm_time'] - row['ml_prediction_for_total_time']) / row['total_real_comm_time'] * 100, 
                    axis=1
                )

                # 计算ml_prediction的总和
                total_prediction = df['ml_prediction_for_total_time'].sum()
                total_real_comm_time = df['total_real_comm_time'].sum()

                total_real_comm_time_for_comm_type_50 = df[df['comm_type'] == 50]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_50 = df[df['comm_type'] == 50]['ml_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_51 = df[df['comm_type'] == 51]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_51 = df[df['comm_type'] == 51]['ml_prediction_for_total_time'].sum()
                total_real_comm_time_for_comm_type_55 = df[df['comm_type'] == 55]['total_real_comm_time'].sum()
                total_prediction_for_comm_type_55 = df[df['comm_type'] == 55]['ml_prediction_for_total_time'].sum()
                
                # 创建新行，包含总和信息
                new_row_comm_type_50 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_50'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_50],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_50],
                    'percentage': [total_real_comm_time_for_comm_type_50 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_50-total_real_comm_time_for_comm_type_50) / total_real_comm_time_for_comm_type_50 * 100]
                })
                new_row_comm_type_51 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_51'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_51],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_51],
                    'percentage': [total_real_comm_time_for_comm_type_51 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_51-total_real_comm_time_for_comm_type_51) / total_real_comm_time_for_comm_type_51 * 100]
                })
                new_row_comm_type_55 = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction_comm_type_55'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction_for_comm_type_55],
                    'total_real_comm_time': [total_real_comm_time_for_comm_type_55],
                    'percentage': [total_real_comm_time_for_comm_type_55 / total_real_comm_time * 100],
                    'precision_error': [abs(total_prediction_for_comm_type_55-total_real_comm_time_for_comm_type_55) / total_real_comm_time_for_comm_type_55 * 100]
                })

                new_row_total = pd.DataFrame({
                    'comm_type': ['Total_ML_prediction'],
                    'count': [''],
                    'ml_prediction_for_total_time': [total_prediction],
                    'total_real_comm_time': [total_real_comm_time],
                    'precision_error': [abs(total_prediction-total_real_comm_time) / total_real_comm_time * 100]
                })
                
                # 将新行添加到DataFrame
                df = pd.concat([df, new_row_comm_type_50], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_51], ignore_index=True)
                df = pd.concat([df, new_row_comm_type_55], ignore_index=True)
                df = pd.concat([df, new_row_total], ignore_index=True)
                
                # 打印一些示例数据用于验证
                print("\n数据示例：")
                print(df[['total_size', 'count', 'ml_prediction_for_total_time']].head())
                
                # 保存文件到ML_Predict/results目录
                output_dir = os.path.join(os.path.dirname(__file__), "results")
                os.makedirs(output_dir, exist_ok=True)
                output_file = os.path.join(output_dir, os.path.basename(file))
                df.to_csv(output_file, index=False)
                print(f"\n已成功添加 ml_prediction 列到文件: {output_file}\n")
                
            except Exception as e:
                print(f"处理文件 {file} 时出错: {str(e)}")
                continue
                
    except Exception as e:
        print(f"处理目录时出错: {str(e)}")
        return None

# 返回一个map，key为配置名称，value为对应的o和l值
def ml_prediction_IN_all_kinds_of_o_l(config_path):
    """获取配置映射（保持原逻辑）"""
    df, config_map = read_excel_c_and_l_data(config_path)
    return config_map

if __name__ == "__main__":
    # folders = shared_vars.cross_cabinet_2node
    folders = shared_vars.folder_node1

    # 创建保存结果的目录
    # 保存的是根据processed.csv文件生成的统计comm_type、count的结果
    # 之后会根据这个目录使用ML-Prediction进行预测
    results_dir = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\code\ML_Predict\results\strong_scaling\1-16node\node1"

    # 1. 处理指定目录下的csv文件，生成新的processed文件，添加有total_size和appearance_time列
    print("=====================================1.process_folders===================================")
    process_folders(folders)
    
    # 2. 根据处理后的文件，统计获取total_size和对应的appearance_time
    print("====================================2.get_total_size_count================================")
    get_total_size_count(folders, results_dir)

    # 3. 先获得o_l参数: 通过config_map（保持原逻辑用于兼容性）
    print("==================================3.get_config_map===================================")
    clop_excel_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\dataset.xlsx"
    non_block_clop_excel_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\non_block_dataset.xlsx"
    config_map = ml_prediction_IN_all_kinds_of_o_l(clop_excel_path)
    print("\n解析后的配置映射:")
    for config, values in config_map.items():
        print(f"配置 {config}: o={values['o']}, l={values['l']}")

    config_map_for_non_block = ml_prediction_IN_all_kinds_of_o_l(non_block_clop_excel_path)
    print("\n解析后的非阻塞配置映射:")
    for config, values in config_map_for_non_block.items():
        print(f"配置 {config}: o={values['o']}, l={values['l']}")

    # 4. 获取程序启动开销表
    print("====================================4.get_time_map================================")
    grogram_csv_path = r"F:\PostGraduate\Point-to-Point-Code\App_Prediction\static\program_startup_cost.csv"
    time_map = read_program_startup_data(grogram_csv_path)
    print("\n程序启动开销表:")
    for key, value in time_map.items():
        print(f"{key}: {value}")

    # 5. 根据已经生成csv文件，添加新的列 ml_prediction
    print("==================================5.add_ml_prediction_column_to_csv================================")
    
    # 使用ML模型和time_map的方式获取预测结果
    add_ml_prediction_column_to_csv_use_time_map(results_dir, config_map, time_map)
    
    print("ML预测分析完成！结果已保存到ML_Predict/results目录")