import os
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')  # 使用非交互式后端，避免Qt问题
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import glob
import re
from typing import Tuple, List, Optional, Dict, Any
import warnings
warnings.filterwarnings('ignore')

import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import shared_vars


def find_data_files_by_proc(base_dir: str) -> Dict[int, List[str]]:
    """
    递归搜索base_dir下所有子文件夹，找到所有匹配 *node-*proc-* 模式的文件夹，
    按进程数进行分组，返回每个进程数对应的log-0_processed.csv文件列表
    
    Args:
        base_dir: 基础目录，包含任意命名的子文件夹
        
    Returns:
        Dict[int, List[str]]: 键为进程数，值为该进程数对应的所有log-0_processed.csv文件路径列表
    """
    proc_files_dict = {}
    total_files_found = 0
    
    print(f"开始递归搜索目录: {base_dir}")
    print("搜索模式: 任意子文件夹/*node-*proc-*/log-0_processed.csv")
    
    # 递归搜索所有子目录
    for root, dirs, files in os.walk(base_dir):
        for file in files:
            if file == "log-0_processed.csv":
                # 获取包含该文件的目录名
                parent_dir = os.path.basename(root)
                
                # 尝试从目录名中提取进程数
                proc_match = re.search(r'(\d+)node-(\d+)proc', parent_dir)
                if proc_match:
                    node_count = int(proc_match.group(1))
                    proc_count = int(proc_match.group(2))
                    
                    file_path = os.path.join(root, file)
                    
                    # 按进程数分组
                    if proc_count not in proc_files_dict:
                        proc_files_dict[proc_count] = []
                    
                    proc_files_dict[proc_count].append(file_path)
                    total_files_found += 1
                    
                    # 显示找到的文件信息
                    relative_path = os.path.relpath(file_path, base_dir)
                    print(f"  找到文件: {relative_path} -> {proc_count} procs")
    
    # 打印统计信息
    print(f"\n=== 文件发现统计 ===")
    print(f"总共找到 {total_files_found} 个 log-0_processed.csv 文件")
    print(f"涉及 {len(proc_files_dict)} 种不同的进程数:")
    
    for proc_count in sorted(proc_files_dict.keys()):
        file_count = len(proc_files_dict[proc_count])
        print(f"  {proc_count} procs: {file_count} 个文件")
    
    return proc_files_dict


def load_and_filter_data(file_paths: List[str], comm_type: int = 51, min_appearance: int = 20) -> pd.DataFrame:
    """
    Load all CSV files and filter data based on specified criteria.
    
    Args:
        file_paths: List of file paths to CSV files
        comm_type: Communication type to filter (default: 51)
        min_appearance: Minimum appearance time to filter (default: 20)
        
    Returns:
        Filtered DataFrame with combined data
    """
    all_data = []
    processed_files = 0
    total_records_before = 0
    total_records_after = 0
    
    print(f"Processing {len(file_paths)} files...")
    
    for i, file_path in enumerate(file_paths):
        try:
            # Read CSV file with efficient memory usage
            df = pd.read_csv(file_path, dtype={
                'comm_id': 'int32',
                'comm_size': 'int32', 
                'comm_type': 'int16',
                'rank': 'int32',
                'root': 'int32',
                'time_stamp': 'float64',
                'comm_time(us)': 'float64',
                'sendsize': 'int64',
                'sendcount': 'int32',
                'recvsize': 'int64', 
                'recvcount': 'int32',
                'src': 'int32',
                'dst': 'int32',
                'total_size': 'int64',
                'appearance_time': 'int32'
            })
            
            total_records_before += len(df)
            
            # Filter data based on criteria
            filtered_df = df[(df['comm_type'] == comm_type) & (df['appearance_time'] > min_appearance)]
            
            if not filtered_df.empty:
                # Add source file information
                filtered_df = filtered_df.copy()
                filtered_df['source_file'] = file_path
                all_data.append(filtered_df)
                total_records_after += len(filtered_df)
                
            processed_files += 1
            
            # Progress indicator
            if (i + 1) % 10 == 0 or (i + 1) == len(file_paths):
                print(f"Processed {i + 1}/{len(file_paths)} files...")
                
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
            continue
    
    if not all_data:
        print("Warning: No valid data found after filtering")
        return pd.DataFrame()
    
    # Combine all data efficiently
    print("Combining data...")
    combined_data = pd.concat(all_data, ignore_index=True)
    
    print(f"Data processing summary:")
    print(f"- Files processed: {processed_files}/{len(file_paths)}")
    print(f"- Total records before filtering: {total_records_before:,}")
    print(f"- Total records after filtering: {total_records_after:,}")
    print(f"- Filter criteria: comm_type={comm_type}, appearance_time>{min_appearance}")
    print(f"- Combined data shape: {combined_data.shape}")
    
    return combined_data


def perform_linear_regression(data: pd.DataFrame) -> Tuple[float, float, float, LinearRegression]:
    """
    对数据执行线性回归分析
    
    Args:
        data: 包含total_size和comm_time(us)列的DataFrame
        
    Returns:
        Tuple of (L, O, R², fitted_model)
    """
    # Extract features and target
    X = data['total_size'].values.reshape(-1, 1)
    y = data['comm_time(us)'].values
    
    # Remove any NaN or infinite values
    mask = np.isfinite(X.flatten()) & np.isfinite(y)
    X = X[mask]
    y = y[mask]
    
    if len(X) == 0:
        raise ValueError("No valid data points for regression")
    
    # Perform linear regression
    model = LinearRegression()
    model.fit(X, y)
    
    # Get coefficients
    L = model.coef_[0]  # Slope
    O = model.intercept_  # Intercept
    
    # Calculate R²
    y_pred = model.predict(X)
    r2 = r2_score(y, y_pred)
    
    return L, O, r2, model


def analyze_single_proc_group(file_paths: List[str], proc_count: int, comm_type: int = 51, 
                             min_appearance: int = 20) -> Tuple[pd.DataFrame, float, float, float]:
    """
    分析单个进程数组的所有文件
    
    Args:
        file_paths: 该进程数组的所有log-0_processed.csv文件路径
        proc_count: 进程数
        comm_type: 通信类型过滤条件
        min_appearance: 最小出现次数过滤条件
        
    Returns:
        Tuple[DataFrame, L, O, R²]: 过滤后的数据和线性回归结果
    """
    print(f"\n=== 分析 {proc_count} procs 组 ===")
    print(f"包含 {len(file_paths)} 个文件")
    
    # 加载和过滤数据
    filtered_data = load_and_filter_data(file_paths, comm_type, min_appearance)
    
    if filtered_data.empty:
        print(f"警告: {proc_count} procs 组没有有效数据")
        return filtered_data, 0.0, 0.0, 0.0
    
    # 执行线性回归
    try:
        L, O, r2, model = perform_linear_regression(filtered_data)
        print(f"{proc_count} procs 线性回归结果: L={L:.6f}, O={O:.6f}, R²={r2:.6f}")
        return filtered_data, L, O, r2
    except Exception as e:
        print(f"错误: {proc_count} procs 组线性回归失败: {e}")
        return filtered_data, 0.0, 0.0, 0.0


def plot_regression_results(data: pd.DataFrame, L: float, O: float, r2: float, 
                          target_dir: str, proc_count: int, filename: str = "regression_plot.png",
                          sample_size: int = 10000) -> None:
    """
    Create and save a plot showing the data points and fitted line.
    """
    try:
        # Create target directory if it doesn't exist
        os.makedirs(target_dir, exist_ok=True)
        
        # Sample data if too large for plotting
        plot_data = data
        if len(data) > sample_size:
            plot_data = data.sample(n=sample_size, random_state=42)
            print(f"Sampling {sample_size} points from {len(data)} total points for plotting")
        
        # Create the plot
        plt.figure(figsize=(14, 10))
        
        # Main scatter plot
        ax1 = plt.gca()
        ax1.scatter(plot_data['total_size'], plot_data['comm_time(us)'], 
                   alpha=0.7, s=25, color='blue', label='Data Points')
        
        # Plot fitted line starting from x=0 to show complete relationship
        x_max = data['total_size'].max()
        x_range = np.linspace(0, x_max, 100)
        y_fitted = L * x_range + O
        ax1.plot(x_range, y_fitted, 'r-', linewidth=3, 
                 label=f'Fitted Line: y = {L:.6f}x + {O:.6f}')
        
        # Customize main plot
        ax1.set_xlabel('Total Size (bytes)', fontsize=12)
        ax1.set_ylabel('Communication Time (μs)', fontsize=12)
        ax1.set_title(f'Linear Regression: {proc_count} Procs - Communication Time vs Total Size\nR² = {r2:.4f}', fontsize=14)
        ax1.legend(fontsize=11)
        ax1.grid(True, alpha=0.3)
        
        # Set axes to start from origin (0,0)
        ax1.set_xlim(left=0)
        ax1.set_ylim(bottom=0)
        
        # Add statistics text box
        stats_text = f'Proc Count: {proc_count}\nData Points: {len(data):,}\nL = {L:.6f}\nO = {O:.6f}\nR² = {r2:.6f}'
        ax1.text(0.02, 0.98, stats_text, transform=ax1.transAxes, 
                 verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
        
        # Save plot
        output_path = os.path.join(target_dir, filename)
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"Regression plot saved to: {output_path}")
        
    except Exception as e:
        print(f"Warning: Could not create regression plot for {proc_count} procs: {e}")


def save_analysis_results(data: pd.DataFrame, L: float, O: float, r2: float, 
                         target_dir: str, proc_count: int, filename: str = "analysis_results.txt") -> None:
    """
    Save analysis results to a text file.
    """
    os.makedirs(target_dir, exist_ok=True)
    
    output_path = os.path.join(target_dir, filename)
    
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write(f"Communication Data Analysis Results ({proc_count} Procs)\n")
        f.write("=" * 60 + "\n\n")
        
        f.write(f"Process Configuration:\n")
        f.write(f"- Process count: {proc_count}\n")
        f.write(f"- Total data points: {len(data)}\n")
        f.write(f"- Filter criteria: comm_type=51 AND appearance_time>20\n")
        f.write(f"- Total size range: {data['total_size'].min()} - {data['total_size'].max()} bytes\n")
        f.write(f"- Comm time range: {data['comm_time(us)'].min():.3f} - {data['comm_time(us)'].max():.3f} us\n\n")
        
        f.write(f"Linear Regression Results:\n")
        f.write(f"- Equation: comm_time(us) = total_size * L + O\n")
        f.write(f"- L (slope): {L:.6f}\n")
        f.write(f"- O (intercept): {O:.6f}\n")
        f.write(f"- R^2 score: {r2:.6f}\n\n")
        
        f.write(f"Model Interpretation:\n")
        f.write(f"- Each byte of data adds approximately {L:.6f} us to communication time\n")
        f.write(f"- Base communication overhead is approximately {O:.6f} us\n")
        f.write(f"- The model explains {r2*100:.2f}% of the variance in communication time\n")
    
    print(f"Analysis results saved to: {output_path}")


def create_summary_statistics(data: pd.DataFrame, target_dir: str) -> None:
    """
    Create additional summary statistics and save to CSV.
    """
    try:
        os.makedirs(target_dir, exist_ok=True)
        
        # Basic statistics
        summary_stats = data[['total_size', 'comm_time(us)', 'appearance_time']].describe()
        
        # Save to CSV
        summary_path = os.path.join(target_dir, "summary_statistics.csv")
        summary_stats.to_csv(summary_path)
        
        # Additional analysis by total_size bins
        data_copy = data.copy()
        data_copy['size_bin'] = pd.cut(data_copy['total_size'], bins=10, labels=False)
        bin_analysis = data_copy.groupby('size_bin').agg({
            'total_size': ['min', 'max', 'mean'],
            'comm_time(us)': ['mean', 'std', 'count']
        }).round(3)
        
        bin_path = os.path.join(target_dir, "size_bin_analysis.csv")
        bin_analysis.to_csv(bin_path)
        
        print(f"Summary statistics saved to: {summary_path}")
        print(f"Size bin analysis saved to: {bin_path}")
        
    except Exception as e:
        print(f"Warning: Could not create summary statistics: {e}")


def plot_total_size(data: pd.DataFrame, target_dir: str) -> None:
    """绘制数据总大小分布直方图并保存"""
    try:
        os.makedirs(target_dir, exist_ok=True)
        plt.figure(figsize=(8, 6))
        plt.hist(data['total_size'], bins=50, alpha=0.7, color='blue', edgecolor='black')
        plt.xlabel('Total Size (bytes)')
        plt.ylabel('Frequency')
        plt.title('Distribution of Total Size')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plot_path = os.path.join(target_dir, "total_size_distribution.png")
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Total size distribution saved to: {plot_path}")
    except Exception as e:
        print(f"Warning: Could not create total size distribution plot: {e}")


def plot_comm_time(data: pd.DataFrame, target_dir: str) -> None:
    """绘制通信时间分布直方图并保存"""
    try:
        os.makedirs(target_dir, exist_ok=True)
        plt.figure(figsize=(8, 6))
        plt.hist(data['comm_time(us)'], bins=50, alpha=0.7, color='green', edgecolor='black')
        plt.xlabel('Communication Time (us)')
        plt.ylabel('Frequency')
        plt.title('Distribution of Communication Time')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plot_path = os.path.join(target_dir, "comm_time_distribution.png")
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Communication time distribution saved to: {plot_path}")
    except Exception as e:
        print(f"Warning: Could not create communication time distribution plot: {e}")


def plot_appearance_time(data: pd.DataFrame, target_dir: str) -> None:
    """绘制出现时间分布直方图并保存"""
    try:
        os.makedirs(target_dir, exist_ok=True)
        plt.figure(figsize=(8, 6))
        plt.hist(data['appearance_time'], bins=30, alpha=0.7, color='orange', edgecolor='black')
        plt.xlabel('Appearance Time')
        plt.ylabel('Frequency')
        plt.title('Distribution of Appearance Time')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plot_path = os.path.join(target_dir, "appearance_time_distribution.png")
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Appearance time distribution saved to: {plot_path}")
    except Exception as e:
        print(f"Warning: Could not create appearance time distribution plot: {e}")


def plot_log_scatter(data: pd.DataFrame, target_dir: str) -> None:
    """绘制对数尺度散点图并保存"""
    try:
        os.makedirs(target_dir, exist_ok=True)
        plt.figure(figsize=(8, 6))
        sample_data = data.sample(n=min(5000, len(data)), random_state=42)
        
        plt.scatter(sample_data['total_size'], sample_data['comm_time(us)'], 
                   alpha=0.6, s=10, color='purple')
        
        plt.xscale('log')
        plt.yscale('log')
        plt.xlabel('Total Size (bytes)')
        plt.ylabel('Communication Time (us)')
        plt.title('Communication Time vs. Total Size (Log Scale)')
        plt.grid(True, alpha=0.3)
        plt.tight_layout()
        plot_path = os.path.join(target_dir, "log_scale_scatter.png")
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.close()
        print(f"Log-scale scatter plot saved to: {plot_path}")
    except Exception as e:
        print(f"Warning: Could not create log-scale scatter plot: {e}")


def create_detailed_plots(data: pd.DataFrame, L: float, O: float, target_dir: str) -> None:
    """创建详细的分析图表"""
    plot_total_size(data, target_dir)
    plot_comm_time(data, target_dir)
    plot_appearance_time(data, target_dir)
    plot_log_scatter(data, target_dir)


def validate_data_quality(data: pd.DataFrame) -> Dict[str, Any]:
    """
    Validate data quality and return quality metrics.
    """
    quality_metrics = {}
    
    # Check for missing values
    quality_metrics['missing_values'] = data.isnull().sum().to_dict()
    
    # Check for negative values in size and time columns
    quality_metrics['negative_total_size'] = (data['total_size'] < 0).sum()
    quality_metrics['negative_comm_time'] = (data['comm_time(us)'] < 0).sum()
    
    # Check for zero values
    quality_metrics['zero_total_size'] = (data['total_size'] == 0).sum()
    quality_metrics['zero_comm_time'] = (data['comm_time(us)'] == 0).sum()
    
    # Check for outliers (using IQR method)
    Q1_size = data['total_size'].quantile(0.25)
    Q3_size = data['total_size'].quantile(0.75)
    IQR_size = Q3_size - Q1_size
    outliers_size = ((data['total_size'] < (Q1_size - 1.5 * IQR_size)) | 
                    (data['total_size'] > (Q3_size + 1.5 * IQR_size))).sum()
    
    Q1_time = data['comm_time(us)'].quantile(0.25)
    Q3_time = data['comm_time(us)'].quantile(0.75)
    IQR_time = Q3_time - Q1_time
    outliers_time = ((data['comm_time(us)'] < (Q1_time - 1.5 * IQR_time)) | 
                    (data['comm_time(us)'] > (Q3_time + 1.5 * IQR_time))).sum()
    
    quality_metrics['outliers_total_size'] = outliers_size
    quality_metrics['outliers_comm_time'] = outliers_time
    
    # Data range
    quality_metrics['total_size_range'] = [data['total_size'].min(), data['total_size'].max()]
    quality_metrics['comm_time_range'] = [data['comm_time(us)'].min(), data['comm_time(us)'].max()]
    
    return quality_metrics


def save_data_quality_report(quality_metrics: Dict[str, Any], target_dir: str) -> None:
    """
    Save data quality report to file.
    """
    try:
        os.makedirs(target_dir, exist_ok=True)
        
        report_path = os.path.join(target_dir, "data_quality_report.txt")
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write("Data Quality Report\n")
            f.write("=" * 30 + "\n\n")
            
            f.write("Missing Values:\n")
            for col, count in quality_metrics['missing_values'].items():
                f.write(f"  {col}: {count}\n")
            f.write("\n")
            
            f.write("Data Validation:\n")
            f.write(f"  Negative total_size values: {quality_metrics['negative_total_size']}\n")
            f.write(f"  Negative comm_time values: {quality_metrics['negative_comm_time']}\n")
            f.write(f"  Zero total_size values: {quality_metrics['zero_total_size']}\n")
            f.write(f"  Zero comm_time values: {quality_metrics['zero_comm_time']}\n\n")
            
            f.write("Outlier Detection (IQR method):\n")
            f.write(f"  Total size outliers: {quality_metrics['outliers_total_size']}\n")
            f.write(f"  Communication time outliers: {quality_metrics['outliers_comm_time']}\n\n")
            
            f.write("Data Ranges:\n")
            f.write(f"  Total size: {quality_metrics['total_size_range'][0]} - {quality_metrics['total_size_range'][1]} bytes\n")
            f.write(f"  Communication time: {quality_metrics['comm_time_range'][0]:.3f} - {quality_metrics['comm_time_range'][1]:.3f} us\n")
        
        print(f"Data quality report saved to: {report_path}")
        
    except Exception as e:
        print(f"Warning: Could not create data quality report: {e}")


def save_overall_summary(analysis_results: Dict[int, Tuple[float, float, float]], target_dir: str) -> None:
    """
    保存所有进程数组的总体分析摘要
    """
    try:
        summary_path = os.path.join(target_dir, "overall_summary.txt")
        
        with open(summary_path, 'w', encoding='utf-8') as f:
            f.write("Overall Analysis Summary - Proc Aggregation\n")
            f.write("=" * 60 + "\n\n")
            
            f.write(f"Analysis completed for {len(analysis_results)} process groups:\n\n")
            
            f.write("Process Group Results:\n")
            f.write("-" * 50 + "\n")
            f.write(f"{'Proc Count':<12} {'L (slope)':<15} {'O (intercept)':<15} {'R² Score':<10}\n")
            f.write("-" * 50 + "\n")
            
            for proc_count in sorted(analysis_results.keys()):
                L, O, r2 = analysis_results[proc_count]
                f.write(f"{proc_count:<12} {L:<15.6f} {O:<15.6f} {r2:<10.6f}\n")
            
            f.write("\n")
            f.write("Output Structure:\n")
            for proc_count in sorted(analysis_results.keys()):
                f.write(f"  {proc_count}proc/\n")
                f.write(f"    ├── analysis_results.txt\n")
                f.write(f"    ├── regression_plot.png\n")
                f.write(f"    ├── data_quality_report.txt\n")
                f.write(f"    ├── summary_statistics.csv\n")
                f.write(f"    ├── size_bin_analysis.csv\n")
                f.write(f"    ├── total_size_distribution.png\n")
                f.write(f"    ├── comm_time_distribution.png\n")
                f.write(f"    ├── appearance_time_distribution.png\n")
                f.write(f"    └── log_scale_scatter.png\n")
        
        print(f"Overall summary saved to: {summary_path}")
        
    except Exception as e:
        print(f"Warning: Could not create overall summary: {e}")


def analyze_all_proc_groups(base_dir: str, target_dir: str, comm_type: int = 51, 
                           min_appearance: int = 20) -> Dict[int, Tuple[float, float, float]]:
    """
    主函数：按进程数分组分析所有数据，为每个进程数创建独立的输出文件夹
    
    Args:
        base_dir: 输入目录
        target_dir: 输出目录
        comm_type: 通信类型过滤条件
        min_appearance: 最小出现次数过滤条件
        
    Returns:
        Dict[int, Tuple[float, float, float]]: 每个进程数的分析结果 (L, O, R²)
    """
    print("=" * 80)
    print("开始按进程数分组的通信数据分析")
    print("=" * 80)
    print(f"输入目录: {base_dir}")
    print(f"输出目录: {target_dir}")
    print(f"过滤条件: comm_type={comm_type}, min_appearance={min_appearance}")
    
    # Step 1: 按进程数发现和分组文件
    print("\n1. 按进程数发现和分组文件...")
    proc_files_dict = find_data_files_by_proc(base_dir)
    
    if not proc_files_dict:
        raise ValueError(f"在 {base_dir} 中没有找到任何匹配的数据文件")
    
    # 创建主输出目录
    os.makedirs(target_dir, exist_ok=True)
    
    # Step 2: 为每个进程数组进行独立分析
    analysis_results = {}
    
    for proc_count in sorted(proc_files_dict.keys()):
        file_paths = proc_files_dict[proc_count]
        
        print(f"\n2.{proc_count}. 分析 {proc_count} procs 组...")
        
        # 为该进程数创建输出目录
        proc_output_dir = os.path.join(target_dir, f"{proc_count}proc")
        os.makedirs(proc_output_dir, exist_ok=True)
        
        try:
            # 分析该进程数组
            filtered_data, L, O, r2 = analyze_single_proc_group(
                file_paths, proc_count, comm_type, min_appearance
            )
            
            if not filtered_data.empty:
                # 保存分析结果
                save_analysis_results(filtered_data, L, O, r2, proc_output_dir, proc_count)
                
                # 创建可视化
                plot_regression_results(filtered_data, L, O, r2, proc_output_dir, proc_count)
                
                # 创建详细图表
                create_detailed_plots(filtered_data, L, O, proc_output_dir)
                
                # 数据质量验证
                quality_metrics = validate_data_quality(filtered_data)
                save_data_quality_report(quality_metrics, proc_output_dir)
                
                # 创建统计摘要
                create_summary_statistics(filtered_data, proc_output_dir)
                
                # 保存结果
                analysis_results[proc_count] = (L, O, r2)
                
                print(f"✓ {proc_count} procs 分析完成，结果保存到: {proc_output_dir}")
            else:
                print(f"✗ {proc_count} procs 没有有效数据，跳过")
                
        except Exception as e:
            print(f"✗ {proc_count} procs 分析失败: {e}")
            continue
    
    # Step 3: 生成总体摘要
    print(f"\n3. 生成总体分析摘要...")
    save_overall_summary(analysis_results, target_dir)
    
    print(f"\n" + "=" * 80)
    print(f"按进程数分组分析完成!")
    print(f"成功分析了 {len(analysis_results)} 个进程数组")
    print(f"结果保存在: {target_dir}")
    for proc_count in sorted(analysis_results.keys()):
        L, O, r2 = analysis_results[proc_count]
        print(f"  {proc_count}proc/: L={L:.6f}, O={O:.6f}, R²={r2:.6f}")
    print("=" * 80)
    
    return analysis_results


if __name__ == "__main__":
    # 设置输入输出目录
    base_dir = r"F:\PostGraduate\Point-to-Point-DATA\WEAK_SCALING\weak_scaling_data_from_new_config"
    target_dir = r"F:\PostGraduate\Point-to-Point-Code\C-lop-Prediction\code\get_lammps_gap\weak_scaling_form_proc_FOR_500atom_per_proc"

    # Configuration parameters
    comm_type = 51     # Communication type to filter
    min_appearance = 20  # Minimum appearance time to filter
    
    try:
        print("Communication Data Analysis Tool - Proc Aggregation")
        print("=" * 80)
        print(f"Base directory: {base_dir}")
        print(f"Target directory: {target_dir}")
        print(f"Communication type filter: {comm_type}")
        print(f"Minimum appearance time: {min_appearance}")
        print("=" * 80)
        
        # 执行按进程数分组的完整分析
        analysis_results = analyze_all_proc_groups(
            base_dir=base_dir, 
            target_dir=target_dir,
            comm_type=comm_type,
            min_appearance=min_appearance
        )
        
        print(f"\n分析完成! 共处理了 {len(analysis_results)} 个进程数组")
        
    except Exception as e:
        print(f"Error during analysis: {e}")
        import traceback
        traceback.print_exc()
        raise