import pandas as pd
import matplotlib.pyplot as plt
import os
import glob
import numpy as np
import re
from pathlib import Path
from sklearn.linear_model import LinearRegression
from scipy import stats


def read_excel_config(excel_path):
    """
    读取Excel配置文件，获取不同xnode-yproc对应的O和L值
    返回格式: {'1node-2proc': {'o': value, 'l': value}, ...}
    """
    try:
        df = pd.read_excel(excel_path)
        config_map = {}
        
        # 假设Excel文件有列: config, o, l
        # 根据实际Excel文件结构调整列名
        for _, row in df.iterrows():
            config_key = row['config']  # 例如: '1node-2proc'
            config_map[config_key] = {
                'o': row['o'],
                'l': row['l']
            }
        
        return config_map
    except Exception as e:
        print(f"读取Excel配置文件时出错: {e}")
        return {}


def extract_node_proc_from_filename(filename):
    """
    从文件名中提取xnode和yproc信息
    支持格式: xnode-yproc-cnxxxx-xiteration-zzzz 或 xnode-yproc-cn[xxx-xxx]-xiteration-zzz
    """
    # 匹配模式: 数字+node-数字+proc
    pattern = r'(\d+)node-(\d+)proc'
    match = re.search(pattern, filename)
    
    if match:
        x_node = int(match.group(1))
        y_proc = int(match.group(2))
        return x_node, y_proc, f"{x_node}node-{y_proc}proc"
    
    return None, None, None


def collect_csv_files(base_dir):
    """
    收集所有CSV文件并按xnode-yproc分组
    返回格式: {'1node-2proc': [file_paths], ...}
    """
    grouped_files = {}
    
    # 遍历所有子文件夹
    for subfolder in os.listdir(base_dir):
        subfolder_path = os.path.join(base_dir, subfolder)
        
        if not os.path.isdir(subfolder_path):
            continue
            
        # 检查子文件夹名称是否符合xnode格式
        if not re.match(r'\d+node', subfolder):
            continue
            
        print(f"处理子文件夹: {subfolder}")
        
        # 查找该子文件夹下的所有CSV文件
        csv_pattern = os.path.join(subfolder_path, "*.csv")
        csv_files = glob.glob(csv_pattern)
        
        for csv_file in csv_files:
            filename = os.path.basename(csv_file)
            x_node, y_proc, config_key = extract_node_proc_from_filename(filename)
            
            if config_key:
                if config_key not in grouped_files:
                    grouped_files[config_key] = []
                grouped_files[config_key].append(csv_file)
    
    return grouped_files


def process_csv_group_for_average(csv_files, config_key):
    """
    处理同一组的CSV文件，合并数据并计算平均值
    返回按total_size分组的平均avg_comm_time
    """
    all_data = []
    
    for csv_file in csv_files:
        try:
            df = pd.read_csv(csv_file)
            
            # 检查必要的列是否存在
            required_columns = ['comm_type', 'total_size', 'avg_comm_time']
            if not all(col in df.columns for col in required_columns):
                print(f"  文件 {csv_file} 缺少必要的列，跳过")
                continue
                
            # 添加文件来源信息
            df['source_file'] = os.path.basename(csv_file)
            all_data.append(df)
            
        except Exception as e:
            print(f"  读取文件 {csv_file} 时出错: {e}")
            continue
    
    if not all_data:
        print(f"  配置 {config_key}: 没有有效的CSV文件")
        return None
    
    # 合并所有数据
    combined_df = pd.concat(all_data, ignore_index=True)
    print(f"  配置 {config_key}: 合并后总数据量 {len(combined_df)} 条")
    
    # 筛选条件
    # 1. comm_type == 55 (尝试字符串和整数两种方式)
    filtered_df = combined_df[(combined_df['comm_type'] == 55) | (combined_df['comm_type'] == '55')]
    print(f"  步骤1筛选后(comm_type == 55 或 '55'): {len(filtered_df)} 条数据")
    
    # 2. total_size > 20 (可选筛选条件)
    filtered_df = filtered_df[filtered_df['total_size'] > 20]
    print(f"  步骤2筛选后(total_size > 20): {len(filtered_df)} 条数据")
    
    if filtered_df.empty:
        print(f"  配置 {config_key} 筛选后没有数据")
        return None
    
    # 按total_size分组并计算平均值
    avg_data = filtered_df.groupby('total_size')['avg_comm_time'].mean().reset_index()
    avg_data = avg_data.sort_values('total_size')
    
    print(f"  配置 {config_key}: 计算平均值后有 {len(avg_data)} 个数据点")
    return avg_data


def perform_linear_regression(data, config_key):
    """
    使用最小二乘法对数据进行线性拟合
    返回拟合参数 o (截距) 和 l (斜率)，以及拟合质量指标
    """
    if data is None or data.empty or len(data) < 2:
        print(f"  配置 {config_key}: 数据不足，无法进行线性拟合")
        return None
    
    # 准备数据
    X = data['total_size'].values.reshape(-1, 1)  # 特征矩阵
    y = data['avg_comm_time'].values  # 目标变量
    
    # 使用scikit-learn进行线性回归
    model = LinearRegression()
    model.fit(X, y)
    
    # 获取拟合参数
    o = model.intercept_  # 截距 (与y轴的交点)
    l = model.coef_[0]    # 斜率
    
    # 计算拟合质量指标
    y_pred = model.predict(X)
    r2_score = model.score(X, y)  # R²决定系数
    
    # 计算均方根误差 (RMSE)
    rmse = np.sqrt(np.mean((y - y_pred) ** 2))
    
    # 计算平均绝对误差 (MAE)
    mae = np.mean(np.abs(y - y_pred))
    
    print(f"  配置 {config_key}: 线性拟合完成")
    print(f"    o (截距) = {o:.6f}")
    print(f"    l (斜率) = {l:.6f}")
    print(f"    R² = {r2_score:.4f}")
    print(f"    RMSE = {rmse:.6f}")
    print(f"    MAE = {mae:.6f}")
    
    return {
        'config': config_key,
        'o': o,
        'l': l,
        'r2_score': r2_score,
        'rmse': rmse,
        'mae': mae,
        'data_points': len(data)
    }


def create_line_plot_with_regression(all_config_data, regression_results, target_dir):
    """
    创建包含所有配置的折线图，并添加拟合直线
    """
    plt.figure(figsize=(16, 12))
    
    # 定义颜色和标记样式
    colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan']
    markers = ['o', 's', '^', 'D', 'v', '<', '>', 'p', '*', 'h']
    
    color_idx = 0
    
    # 为每个配置绘制折线和拟合直线
    for config_key, data in all_config_data.items():
        if data is not None and not data.empty:
            color = colors[color_idx % len(colors)]
            marker = markers[color_idx % len(markers)]
            
            # 绘制实际数据点
            plt.scatter(data['total_size'], data['avg_comm_time'], 
                       color=color, marker=marker, s=50, alpha=0.7,
                       label=f'{config_key} (data)')
            
            # 绘制拟合直线
            regression_result = next((r for r in regression_results if r and r['config'] == config_key), None)
            if regression_result:
                x_min, x_max = data['total_size'].min(), data['total_size'].max()
                x_line = np.linspace(x_min, x_max, 100)
                y_line = regression_result['o'] + regression_result['l'] * x_line
                
                plt.plot(x_line, y_line, color=color, linewidth=2, alpha=0.8,
                        linestyle='--', 
                        label=f'{config_key} (fit: R²={regression_result["r2_score"]:.3f})')
            
            color_idx += 1
    
    # 设置图表属性
    plt.xlabel('Total Size (Bytes)', fontsize=14)
    plt.ylabel('Average Communication Time (μs)', fontsize=14)
    plt.title('Communication Time vs Total Size with Linear Regression Fits\n(comm_type=55, total_size>20)', fontsize=16)
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=8)
    plt.grid(True, alpha=0.3)
    
    # 设置对数坐标轴（如果数据范围很大）
    plt.xscale('log')
    plt.yscale('log')
    
    # 保存图片
    output_filename = "all_configs_with_regression_lines.png"
    output_path = os.path.join(target_dir, output_filename)
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"已保存回归分析图: {output_path}")


def create_individual_regression_plots(all_config_data, regression_results, target_dir):
    """
    为每个配置创建单独的回归分析图
    """
    individual_dir = os.path.join(target_dir, "individual_regression_plots")
    os.makedirs(individual_dir, exist_ok=True)
    
    for config_key, data in all_config_data.items():
        if data is None or data.empty:
            continue
        
        regression_result = next((r for r in regression_results if r and r['config'] == config_key), None)
        if not regression_result:
            continue
            
        plt.figure(figsize=(10, 8))
        
        # 绘制实际数据点
        plt.scatter(data['total_size'], data['avg_comm_time'], 
                   color='blue', s=60, alpha=0.7, label='Actual Data')
        
        # 绘制拟合直线
        x_min, x_max = data['total_size'].min(), data['total_size'].max()
        x_line = np.linspace(x_min, x_max, 100)
        y_line = regression_result['o'] + regression_result['l'] * x_line
        
        plt.plot(x_line, y_line, 'r-', linewidth=2, 
                label=f'Linear Fit: y = {regression_result["o"]:.6f} + {regression_result["l"]:.6f}x')
        
        # 设置图表属性
        plt.xlabel('Total Size (Bytes)', fontsize=12)
        plt.ylabel('Average Communication Time (μs)', fontsize=12)
        plt.title(f'Linear Regression Analysis\nConfiguration: {config_key}\n'
                 f'R² = {regression_result["r2_score"]:.4f}, RMSE = {regression_result["rmse"]:.6f}', 
                 fontsize=14)
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 保存图片
        output_filename = f"{config_key}_regression_analysis.png"
        output_path = os.path.join(individual_dir, output_filename)
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"已保存单独回归分析图: {output_filename}")


def save_regression_results_to_excel(regression_results, output_path):
    """
    将回归分析结果保存到Excel文件
    """
    # 过滤掉None结果
    valid_results = [r for r in regression_results if r is not None]
    
    if not valid_results:
        print("没有有效的回归分析结果可保存")
        return
    
    # 创建DataFrame
    df = pd.DataFrame(valid_results)
    
    # 重新排列列的顺序
    columns_order = ['config', 'o', 'l', 'r2_score', 'rmse', 'mae', 'data_points']
    df = df[columns_order]
    
    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    # 保存到Excel文件
    df.to_excel(output_path, index=False, float_format='%.6f')
    
    print(f"回归分析结果已保存到: {output_path}")
    print(f"共保存了 {len(valid_results)} 个配置的拟合参数")
    
    # 显示结果摘要
    print("\n=== 回归分析结果摘要 ===")
    for result in valid_results:
        print(f"{result['config']}: o={result['o']:.6f}, l={result['l']:.6f}, R²={result['r2_score']:.4f}")


def create_summary_report(all_config_data, regression_results, target_dir):
    """
    创建详细的汇总报告
    """
    report_path = os.path.join(target_dir, "regression_analysis_summary.txt")
    
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("=== 线性回归分析汇总报告 ===\n\n")
        f.write(f"总共处理 {len(all_config_data)} 个配置组:\n\n")
        
        valid_results = [r for r in regression_results if r is not None]
        f.write(f"成功拟合 {len(valid_results)} 个配置\n\n")
        
        f.write("详细结果:\n")
        f.write("-" * 80 + "\n")
        f.write(f"{'配置':<15} {'截距(o)':<12} {'斜率(l)':<12} {'R²':<8} {'RMSE':<10} {'数据点':<8}\n")
        f.write("-" * 80 + "\n")
        
        for result in valid_results:
            f.write(f"{result['config']:<15} {result['o']:<12.6f} {result['l']:<12.6f} "
                   f"{result['r2_score']:<8.4f} {result['rmse']:<10.6f} {result['data_points']:<8}\n")
        
        f.write("\n\n拟合质量分析:\n")
        if valid_results:
            r2_values = [r['r2_score'] for r in valid_results]
            f.write(f"R²平均值: {np.mean(r2_values):.4f}\n")
            f.write(f"R²最大值: {np.max(r2_values):.4f}\n")
            f.write(f"R²最小值: {np.min(r2_values):.4f}\n")
            
            good_fits = [r for r in valid_results if r['r2_score'] > 0.8]
            f.write(f"R² > 0.8 的配置数量: {len(good_fits)}\n")
    
    print(f"已生成详细汇总报告: {report_path}")


def process_all_data_combined(grouped_files):
    """
    处理所有配置的数据，合并成一个大数据集用于全局拟合
    """
    print("\n处理全局数据集: all-node-all-proc")
    all_combined_data = []
    
    for config_key, csv_files in grouped_files.items():
        for csv_file in csv_files:
            try:
                df = pd.read_csv(csv_file)
                
                # 检查必要的列是否存在
                required_columns = ['comm_type', 'total_size', 'avg_comm_time']
                if not all(col in df.columns for col in required_columns):
                    continue
                    
                # 添加配置信息
                df['config'] = config_key
                df['source_file'] = os.path.basename(csv_file)
                all_combined_data.append(df)
                
            except Exception as e:
                continue
    
    if not all_combined_data:
        print("  全局数据集: 没有有效的CSV文件")
        return None
    
    # 合并所有数据
    combined_df = pd.concat(all_combined_data, ignore_index=True)
    print(f"  全局数据集: 合并后总数据量 {len(combined_df)} 条")
    
    # 筛选条件
    # 1. comm_type == 55 (尝试字符串和整数两种方式)
    filtered_df = combined_df[(combined_df['comm_type'] == 55) | (combined_df['comm_type'] == '55')]
    print(f"  步骤1筛选后(comm_type == 55 或 '55'): {len(filtered_df)} 条数据")
    
    # 2. total_size > 20
    filtered_df = filtered_df[filtered_df['total_size'] > 20]
    print(f"  步骤2筛选后(total_size > 20): {len(filtered_df)} 条数据")
    
    if filtered_df.empty:
        print("  全局数据集筛选后没有数据")
        return None
    
    # 按total_size分组并计算平均值
    avg_data = filtered_df.groupby('total_size')['avg_comm_time'].mean().reset_index()
    avg_data = avg_data.sort_values('total_size')
    
    print(f"  全局数据集: 计算平均值后有 {len(avg_data)} 个数据点")
    return avg_data


def main(base_dir, target_dir, excel_path, output_excel_path):
    """
    主函数
    """
    print("开始线性回归分析...")
    
    # 创建目标目录
    os.makedirs(target_dir, exist_ok=True)
    
    # 收集并分组CSV文件
    print("收集CSV文件...")
    grouped_files = collect_csv_files(base_dir)
    print(f"发现 {len(grouped_files)} 个配置组")
    
    # 处理每个配置组，计算平均值
    all_config_data = {}
    for config_key, csv_files in grouped_files.items():
        print(f"\n处理配置组: {config_key}")
        
        # 处理CSV数据并计算平均值
        avg_data = process_csv_group_for_average(csv_files, config_key)
        all_config_data[config_key] = avg_data
    
    # 处理全局数据集 (all-node-all-proc)
    global_data = process_all_data_combined(grouped_files)
    if global_data is not None:
        all_config_data['all-node-all-proc'] = global_data
    
    # 过滤掉没有数据的配置
    valid_configs = {k: v for k, v in all_config_data.items() if v is not None and not v.empty}
    print(f"\n有效配置数量: {len(valid_configs)}")
    
    # 对每个配置进行线性回归分析
    print("\n开始线性回归分析...")
    regression_results = []
    for config_key, data in valid_configs.items():
        print(f"\n分析配置: {config_key}")
        result = perform_linear_regression(data, config_key)
        regression_results.append(result)
    
    # 保存回归结果到Excel文件
    print("\n保存回归分析结果...")
    save_regression_results_to_excel(regression_results, output_excel_path)
    
    # 创建可视化图表
    print("\n创建可视化图表...")
    create_line_plot_with_regression(valid_configs, regression_results, target_dir)
    create_individual_regression_plots(valid_configs, regression_results, target_dir)
    
    # 生成汇总报告
    create_summary_report(all_config_data, regression_results, target_dir)
    
    print(f"\n线性回归分析完成！结果已保存到: {target_dir}")
    print(f"Excel结果文件: {output_excel_path}")


if __name__ == "__main__":
    # 配置参数
    BASE_DIR = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_data\analysis_for_all_type_data\1-16nodes-data"
    TARGET_DIR = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\analysis_for_non_block\regression_analysis"
    EXCEL_PATH = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\static\dataset.xlsx"
    OUTPUT_EXCEL_PATH = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\static\non_block_dataset.xlsx"
    
    # 检查路径是否存在
    if not os.path.exists(BASE_DIR):
        print(f"错误: 基础目录不存在 - {BASE_DIR}")
        exit(1)
    
    # 运行主函数
    main(BASE_DIR, TARGET_DIR, EXCEL_PATH, OUTPUT_EXCEL_PATH)