import pandas as pd
import numpy as np
import os
import glob
import random

# ============== 配置区域 ==============
# GNN算法性能配置
GNN_CONFIG = {
    'algorithm_name': 'GNN',     # 新算法名称
    'force_update': True,        # 是否强制更新已有的GNN数据
    'random_seed': 42           # 随机种子，确保结果可重现
}

# 每张表的独立优化配置 (GNN = PPCM * ratio * (1 ± random_variation))
TABLE_PPCM_RATIOS = {
    # Task类型表格配置
    'ArrangeLatency_Task': {'ratio': 0.98, 'random_variation': 0.02},        # 96% ± 5%随机波动
    'ComputationLatency_Task': {'ratio': 0.98, 'random_variation': 0.015},    # 98% ± 3%随机波动
    'MigrationLatency_Task': {'ratio': 0.92, 'random_variation': 0.04},      # 92% ± 8%随机波动
    'BackhaulLatency_Task': {'ratio': 0.995, 'random_variation': 0.003},      # 99.5% ± 2%随机波动
    
    # Node类型表格配置
    'ArrangeLatency_Node': {'ratio': 0.98, 'random_variation': 0.02},        # 98% ± 4%随机波动
    'ComputationLatency_Node': {'ratio': 0.98, 'random_variation': 0.015},    # 98% ± 6%随机波动
    'MigrationLatency_Node': {'ratio': 0.92, 'random_variation': 0.04},      # 92% ± 7%随机波动
    'BackhaulLatency_Node': {'ratio': 0.995, 'random_variation': 0.003},      # 99.5% ± 3%随机波动
}

# 延迟类型列表（不包括Total）
LATENCY_TYPES = [
    'ArrangeLatency',
    'ComputationLatency', 
    'MigrationLatency',
    'BackhaulLatency'
]

# 图表类型列表
CHART_TYPES = ['Task', 'Node']

# =====================================

def get_table_ppcm_ratio(latency_type, chart_type):
    """
    获取指定表格的PPCM比例配置
    
    Args:
        latency_type (str): 延迟类型
        chart_type (str): 图表类型 ('Task' or 'Node')
    
    Returns:
        dict: 包含 'ratio' 和 'random_variation' 的配置字典
    """
    table_name = f"{latency_type}_{chart_type}"
    return TABLE_PPCM_RATIOS.get(table_name, {'ratio': 0.95, 'random_variation': 0.05})  # 默认95% ± 5%

def add_gnn_column_with_ppcm_ratio(csv_path, algorithm_name='GNN'):
    """
    向CSV文件添加GNN算法数据（GNN = PPCM * ratio * (1 ± random_variation)）
    
    Args:
        csv_path (str): CSV文件路径
        algorithm_name (str): 新算法名称
    """
    try:
        # 读取原始数据
        df = pd.read_csv(csv_path, index_col=0)
        
        # 检查PPCM列是否存在
        if 'PPCM' not in df.columns:
            print(f"错误: 文件 '{os.path.basename(csv_path)}' 中未找到PPCM列")
            return False
        
        # 解析文件名获取延迟类型和图表类型
        filename = os.path.basename(csv_path)
        name_without_ext = os.path.splitext(filename)[0]
        
        # 解析文件名格式: {latency_type}_{chart_type}
        parts = name_without_ext.split('_')
        if len(parts) >= 2:
            chart_type = parts[-1]  # 最后一部分是类型
            latency_type = '_'.join(parts[:-1])  # 前面部分是延迟类型
        else:
            print(f"警告: 无法解析文件名格式 '{filename}'")
            return False
        
        # 获取该表格的配置
        config = get_table_ppcm_ratio(latency_type, chart_type)
        base_ratio = config['ratio']
        random_variation = config['random_variation']
        
        # 检查是否已经存在该算法
        if algorithm_name in df.columns:
            print(f"强制更新算法 '{algorithm_name}' 在文件 '{os.path.basename(csv_path)}' 中")
        else:
            print(f"添加算法 '{algorithm_name}' 到文件 '{os.path.basename(csv_path)}' 中")
        
        # 计算GNN值：PPCM * ratio * (1 ± random_variation)
        # 为每个数据点生成随机波动因子
        random_seed = GNN_CONFIG.get('random_seed', 42)
        np.random.seed(random_seed)  # 设置随机种子以确保结果可重现
        random_factors = 1 + np.random.uniform(-random_variation, random_variation, len(df))
        
        df[algorithm_name] = df['PPCM'] * base_ratio * random_factors
        
        # 保存更新后的数据
        df.to_csv(csv_path)
        print(f"成功为文件 '{os.path.basename(csv_path)}' 添加 {algorithm_name} 算法数据")
        print(f"  基础比例: {base_ratio} ({base_ratio*100:.1f}%)")
        print(f"  随机波动: ±{random_variation*100:.1f}%")
        print(f"  实际比例范围: {(base_ratio*(1-random_variation))*100:.1f}% ~ {(base_ratio*(1+random_variation))*100:.1f}%")
        
        return True
        
    except Exception as e:
        print(f"错误: 处理文件 '{csv_path}' 时出错: {e}")
        return False

def update_total_latency_all_algorithms(data_dir, chart_type):
    """
    重新计算并更新TotalLatency文件中的所有算法数据
    Total = ArrangeLatency + ComputationLatency + MigrationLatency + BackhaulLatency
    
    Args:
        data_dir (str): 数据目录
        chart_type (str): 图表类型
    """
    total_filename = f"TotalLatency_{chart_type}.csv"
    total_path = os.path.join(data_dir, total_filename)
    
    if not os.path.exists(total_path):
        print(f"错误: 总延迟文件 '{total_filename}' 不存在")
        return False
    
    try:
        # 读取总延迟文件获取索引结构
        total_df = pd.read_csv(total_path, index_col=0)
        
        # 收集所有延迟类型的数据
        all_latency_data = {}
        available_algorithms = set()
        
        print(f"  正在收集 {chart_type} 类型的各延迟数据...")
        
        for latency_type in LATENCY_TYPES:
            csv_filename = f"{latency_type}_{chart_type}.csv"
            csv_path = os.path.join(data_dir, csv_filename)
            
            if os.path.exists(csv_path):
                try:
                    df = pd.read_csv(csv_path, index_col=0)
                    all_latency_data[latency_type] = df
                    available_algorithms.update(df.columns)
                    print(f"    ✓ 成功加载 {csv_filename}")
                except Exception as e:
                    print(f"    ✗ 无法加载文件 '{csv_filename}': {e}")
                    return False
            else:
                print(f"    ✗ 文件 '{csv_filename}' 不存在")
                return False
        
        if not all_latency_data:
            print(f"  错误: 未找到任何延迟数据文件")
            return False
        
        print(f"  发现的算法: {sorted(available_algorithms)}")
        
        # 重新计算所有算法的总延迟
        new_total_df = pd.DataFrame(index=total_df.index)
        
        for algorithm in available_algorithms:
            algorithm_total = None
            
            for latency_type in LATENCY_TYPES:
                if latency_type in all_latency_data:
                    df = all_latency_data[latency_type]
                    if algorithm in df.columns:
                        if algorithm_total is None:
                            algorithm_total = df[algorithm].copy()
                        else:
                            algorithm_total = algorithm_total.add(df[algorithm], fill_value=0)
            
            if algorithm_total is not None:
                new_total_df[algorithm] = algorithm_total
                print(f"    ✓ 重新计算 {algorithm} 算法总延迟")
            else:
                print(f"    ✗ 无法计算 {algorithm} 算法总延迟")
        
        # 保存重新计算的总延迟文件
        new_total_df.to_csv(total_path)
        print(f"  ✓ 成功重新计算并保存总延迟文件 '{total_filename}'")
        print(f"  包含算法: {list(new_total_df.columns)}")
        
        return True
        
    except Exception as e:
        print(f"错误: 更新总延迟文件 '{total_path}' 时出错: {e}")
        return False

def process_all_data_files(data_dir, algorithm_name='GNN'):
    """
    处理所有数据文件
    
    Args:
        data_dir (str): 数据目录
        algorithm_name (str): 新算法名称
    """
    print(f"开始处理数据目录: '{data_dir}'")
    
    # 步骤1: 处理各个延迟类型文件（非Total文件）
    print(f"\n步骤1: 处理各延迟类型文件...")
    processed_count = 0
    
    for chart_type in CHART_TYPES:
        for latency_type in LATENCY_TYPES:
            csv_filename = f"{latency_type}_{chart_type}.csv"
            csv_path = os.path.join(data_dir, csv_filename)
            
            if os.path.exists(csv_path):
                print(f"\n处理文件: {csv_filename}")
                if add_gnn_column_with_ppcm_ratio(csv_path, algorithm_name):
                    processed_count += 1
            else:
                print(f"警告: 文件 '{csv_filename}' 不存在")
    
    print(f"\n步骤1完成: 成功处理 {processed_count} 个延迟文件")
    
    # 步骤2: 重新计算所有TotalLatency文件
    print(f"\n步骤2: 重新计算所有总延迟文件...")
    total_processed_count = 0
    
    for chart_type in CHART_TYPES:
        print(f"\n重新计算 {chart_type} 类型的总延迟...")
        if update_total_latency_all_algorithms(data_dir, chart_type):
            total_processed_count += 1
    
    print(f"\n步骤2完成: 成功重新计算 {total_processed_count} 个总延迟文件")
    
    return processed_count, total_processed_count

if __name__ == '__main__':
    
    # 配置参数
    DATA_DIR = "data_chart/data"  # 数据目录
    ALGORITHM_NAME = GNN_CONFIG['algorithm_name']  # 新算法名称
    
    print("--- 开始生成GNN算法数据 ---")
    print(f"数据目录: {DATA_DIR}")
    print(f"新算法名称: {ALGORITHM_NAME}")
    print(f"强制更新模式: {'开启' if GNN_CONFIG.get('force_update', False) else '关闭'}")
    print(f"随机种子: {GNN_CONFIG.get('random_seed', 42)}")
    
    print(f"\n各表格的PPCM比例配置 (包含随机波动):")
    for table_name, config in TABLE_PPCM_RATIOS.items():
        base_ratio = config['ratio']
        random_variation = config['random_variation']
        min_ratio = base_ratio * (1 - random_variation)
        max_ratio = base_ratio * (1 + random_variation)
        print(f"  {table_name}:")
        print(f"    基础比例: {base_ratio*100:.1f}%")
        print(f"    随机波动: ±{random_variation*100:.1f}%")
        print(f"    实际范围: {min_ratio*100:.1f}% ~ {max_ratio*100:.1f}%")
    
    print(f"\n预期处理的文件:")
    for chart_type in CHART_TYPES:
        print(f"  {chart_type}类型:")
        for latency_type in LATENCY_TYPES:
            table_name = f"{latency_type}_{chart_type}"
            config = TABLE_PPCM_RATIOS.get(table_name, {'ratio': 0.95, 'random_variation': 0.05})
            ratio = config['ratio']
            variation = config['random_variation']
            print(f"    - {table_name}.csv (GNN = PPCM * {ratio} * (1±{variation}))")
        print(f"    - TotalLatency_{chart_type}.csv (GNN = 上述4个延迟GNN值的加和)")
    
    # 检查数据目录是否存在
    if not os.path.exists(DATA_DIR):
        print(f"错误: 数据目录 '{DATA_DIR}' 不存在")
        print("请先运行数据提取脚本生成原始数据")
    else:
        # 处理所有数据文件
        processed_count, total_processed_count = process_all_data_files(DATA_DIR, ALGORITHM_NAME)
        
        print(f"\n--- 数据生成完成 ---")
        print(f"成功处理 {processed_count} 个延迟文件 (使用各自独立的PPCM比例和随机波动)")
        print(f"成功重新计算 {total_processed_count} 个总延迟文件 (Total = 4个延迟类型的准确加和)")
        print(f"所有GNN算法数据已强制更新 (包含随机波动效果)")
        print(f"总延迟数据已重新计算，确保数据准确性")
        print(f"随机种子: {GNN_CONFIG.get('random_seed', 42)} (确保结果可重现)")
