import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import numpy as np
import re
from pathlib import Path


def read_excel_config(excel_path):
    """
    读取Excel配置文件，获取不同xnode-yproc对应的O和L值
    返回格式: {'1node-2proc': {'o': value, 'l': value}, ...}
    """
    try:
        df = pd.read_excel(excel_path)
        config_map = {}
        
        # 假设Excel文件有列: config, o, l
        # 根据实际Excel文件结构调整列名
        for _, row in df.iterrows():
            config_key = row['config']  # 例如: '1node-2proc'
            config_map[config_key] = {
                'o': row['o'],
                'l': row['l']
            }
        
        return config_map
    except Exception as e:
        print(f"读取Excel配置文件时出错: {e}")
        return {}


def extract_node_proc_from_filename(filename):
    """
    从文件名中提取xnode和yproc信息
    支持格式: xnode-yproc-cnxxxx-xiteration-zzzz 或 xnode-yproc-cn[xxx-xxx]-xiteration-zzz
    """
    # 匹配模式: 数字+node-数字+proc
    pattern = r'(\d+)node-(\d+)proc'
    match = re.search(pattern, filename)
    
    if match:
        x_node = int(match.group(1))
        y_proc = int(match.group(2))
        return x_node, y_proc, f"{x_node}node-{y_proc}proc"
    
    return None, None, None


def collect_csv_files(base_dir):
    """
    收集所有CSV文件并按xnode-yproc分组
    返回格式: {'1node-2proc': [file_paths], ...}
    """
    grouped_files = {}
    
    # 遍历所有子文件夹
    for subfolder in os.listdir(base_dir):
        subfolder_path = os.path.join(base_dir, subfolder)
        
        if not os.path.isdir(subfolder_path):
            continue
            
        # 检查子文件夹名称是否符合xnode格式
        if not re.match(r'\d+node', subfolder):
            continue
            
        print(f"处理子文件夹: {subfolder}")
        
        # 查找该子文件夹下的所有CSV文件
        csv_pattern = os.path.join(subfolder_path, "*.csv")
        csv_files = glob.glob(csv_pattern)
        
        for csv_file in csv_files:
            filename = os.path.basename(csv_file)
            x_node, y_proc, config_key = extract_node_proc_from_filename(filename)
            
            if config_key:
                if config_key not in grouped_files:
                    grouped_files[config_key] = []
                grouped_files[config_key].append(csv_file)
    
    return grouped_files


def process_csv_group(csv_files, config_key):
    """
    处理同一组的CSV文件，合并数据并筛选
    """
    all_data = []
    
    for csv_file in csv_files:
        try:
            df = pd.read_csv(csv_file)
            
            # 检查必要的列是否存在
            required_columns = ['comm_type', 'total_size', 'avg_comm_time']
            if not all(col in df.columns for col in required_columns):
                print(f"  文件 {csv_file} 缺少必要的列，跳过")
                print(f"  实际列: {list(df.columns)}")
                continue
                
            # 添加文件来源信息
            df['source_file'] = os.path.basename(csv_file)
            all_data.append(df)
            
        except Exception as e:
            print(f"  读取文件 {csv_file} 时出错: {e}")
            continue
    
    if not all_data:
        print(f"  配置 {config_key}: 没有有效的CSV文件")
        return None
    
    # 合并所有数据
    combined_df = pd.concat(all_data, ignore_index=True)
    print(f"  配置 {config_key}: 合并后总数据量 {len(combined_df)} 条")
    
    # 显示数据统计信息
    if 'comm_type' in combined_df.columns and len(combined_df) > 0:
        comm_types = combined_df['comm_type'].unique()
        print(f"  可用的comm_type值: {sorted([str(x) for x in comm_types])}")
        
        # 尝试不同的comm_type匹配方式
        comm_55_count_int = len(combined_df[combined_df['comm_type'] == 55])
        comm_55_count_str = len(combined_df[combined_df['comm_type'] == '55'])
        print(f"  comm_type == 55 (整数) 的数据量: {comm_55_count_int}")
        print(f"  comm_type == '55' (字符串) 的数据量: {comm_55_count_str}")
    
    if 'total_size' in combined_df.columns and len(combined_df) > 0:
        if len(combined_df) > 0:
            size_stats = combined_df['total_size'].describe()
            print(f"  total_size统计: min={size_stats['min']}, max={size_stats['max']}, mean={size_stats['mean']:.2f}")
            size_gt_20_count = len(combined_df[combined_df['total_size'] > 20])
            print(f"  total_size > 20 的数据量: {size_gt_20_count}")
        else:
            print(f"  total_size统计: 数据为空")
    
    # 筛选条件
    # 1. comm_type == 55 (尝试字符串和整数两种方式)
    filtered_df = combined_df[(combined_df['comm_type'] == 55) | (combined_df['comm_type'] == '55') ]
    print(f"  步骤1筛选后(comm_type == 55 或 '55'): {len(filtered_df)} 条数据")
    
    # 2. total_size > 0
    filtered_df = filtered_df[filtered_df['total_size'] > 0]
    print(f"  步骤2筛选后(total_size > 0): {len(filtered_df)} 条数据")
    
    if filtered_df.empty:
        print(f"  配置 {config_key} 筛选后没有数据")
        return None
    
    print(f"  配置 {config_key}: 最终筛选后有 {len(filtered_df)} 条数据")
    return filtered_df


def create_scatter_plot(data, config_key, config_map, target_dir):
    """
    创建散点图并添加拟合直线
    """
    if data is None or data.empty:
        return
    
    plt.figure(figsize=(10, 8))
    
    # 创建散点图
    plt.scatter(data['total_size'], data['avg_comm_time'], 
               alpha=0.6, s=50, label='Actual Data')
    
    # 获取配置参数 -> 绘制C-lop直线

    # config_info = config_map.get(config_key)
    # if config_info:
    #     O = config_info['o']
    #     L = config_info['l']
        
    #     # 创建拟合直线
    #     x_min, x_max = data['total_size'].min(), data['total_size'].max()
    #     x_line = np.linspace(x_min, x_max, 100)
    #     y_line = O + L * x_line
        
    #     plt.plot(x_line, y_line, 'r-', linewidth=2, 
    #             label=f'C-LoP Model: y = {O:.6f} + {L:.6f}x')
    # else:
    #     print(f"未找到配置 {config_key} 的O和L参数")
    
    # 设置图表属性
    plt.xlabel('Total Size', fontsize=12)
    plt.ylabel('avg_comm_time', fontsize=12)
    plt.title(f'avg_comm_time - total_size\nConfiguration: {config_key}', fontsize=14)
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 保存图片
    output_filename = f"{config_key}_scatter_plot.png"
    output_path = os.path.join(target_dir, output_filename)
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"已保存图片: {output_path}")


def create_summary_report(grouped_files, target_dir):
    """
    创建汇总报告
    """
    report_path = os.path.join(target_dir, "analysis_summary.txt")
    
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("=== 数据分析汇总报告 ===\n\n")
        f.write(f"总共发现 {len(grouped_files)} 个配置组:\n\n")
        
        for config_key, files in grouped_files.items():
            f.write(f"配置: {config_key}\n")
            f.write(f"  文件数量: {len(files)}\n")
            f.write(f"  文件列表:\n")
            for file_path in files:
                f.write(f"    - {os.path.basename(file_path)}\n")
            f.write("\n")
    
    print(f"已生成汇总报告: {report_path}")


def inspect_sample_csv(grouped_files):
    """
    检查样本CSV文件的结构
    """
    print("\n=== 检查样本CSV文件结构 ===")
    for config_key, csv_files in list(grouped_files.items())[:3]:  # 只检查前3个配置
        if csv_files:
            sample_file = csv_files[0]
            try:
                df = pd.read_csv(sample_file)
                print(f"\n配置 {config_key} 的样本文件: {os.path.basename(sample_file)}")
                print(f"  数据形状: {df.shape}")
                print(f"  列名: {list(df.columns)}")
                if 'comm_type' in df.columns:
                    print(f"  comm_type唯一值: {sorted(df['comm_type'].unique())}")
                if 'total_size' in df.columns:
                    print(f"  total_size范围: {df['total_size'].min()} - {df['total_size'].max()}")
                print(f"  前5行数据:")
                print(df.head())
            except Exception as e:
                print(f"  读取样本文件失败: {e}")


def main(base_dir, target_dir, excel_path):
    """
    主函数
    """
    print("开始分析...")
    
    # 创建目标目录
    os.makedirs(target_dir, exist_ok=True)
    
    # 读取Excel配置
    print("读取Excel配置文件...")
    config_map = read_excel_config(excel_path)
    print(f"读取到 {len(config_map)} 个配置")
    
    # 收集并分组CSV文件
    print("收集CSV文件...")
    grouped_files = collect_csv_files(base_dir)
    print(f"发现 {len(grouped_files)} 个配置组")
    
    # 检查样本CSV文件
    inspect_sample_csv(grouped_files)
    
    # 生成汇总报告
    create_summary_report(grouped_files, target_dir)
    
    # 处理每个配置组
    for config_key, csv_files in grouped_files.items():
        print(f"\n处理配置组: {config_key}")
        
        # 处理CSV数据
        processed_data = process_csv_group(csv_files, config_key)
        
        # 创建散点图
        create_scatter_plot(processed_data, config_key, config_map, target_dir)
    
    print(f"\n分析完成！结果已保存到: {target_dir}")


if __name__ == "__main__":
    # 配置参数
    BASE_DIR = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_data\analysis_for_all_type_data\1-16nodes-data"
    TARGET_DIR = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_for_non_block\scatter_plots_for_non_block_data"
    EXCEL_PATH = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\static\dataset.xlsx"
    
    # 检查路径是否存在
    if not os.path.exists(BASE_DIR):
        print(f"错误: 基础目录不存在 - {BASE_DIR}")
        exit(1)
    
    if not os.path.exists(EXCEL_PATH):
        print(f"错误: Excel文件不存在 - {EXCEL_PATH}")
        exit(1)
    
    # 运行主函数
    main(BASE_DIR, TARGET_DIR, EXCEL_PATH)