import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import glob
import re
from typing import Tuple, List, Optional
import warnings
warnings.filterwarnings('ignore')

import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import shared_vars



def find_data_files(base_dir: str, node_limit: Optional[int] = None, proc_limit: Optional[int] = None) -> List[str]:
    """
    Find all log-0_processed.csv files in subdirectories matching the pattern.
    支持三种文件结构：
    1. 父目录模式：父目录/cn[...]/Xnode-Yproc-cn[...]-.../ 目录下的文件（带过滤）
    2. cn目录模式：直接在 cn[...] 目录下搜索其子目录中的文件（带过滤）
    3. 直接文件夹模式：直接给定具体文件夹路径，直接添加文件（无过滤）
    
    Args:
        base_dir: Base directory to search in (可以是父目录、cn[...]目录或具体文件夹)
        node_limit: Minimum number of nodes to include (None for no limit)
        proc_limit: Minimum number of processes to include (None for no limit)
        
    Returns:
        List of file paths to log-0_processed.csv files
    """
    data_files = []
    base_dir_name = os.path.basename(base_dir)
    
    # 检查是否直接包含 log-0_processed.csv 文件（直接文件夹模式）
    log_file_path = os.path.join(base_dir, "log-0_processed.csv")
    if os.path.exists(log_file_path):
        print(f"检测到直接文件夹模式: {base_dir_name}")
        print(f"直接添加文件: {log_file_path}")
        data_files.append(log_file_path)
        print(f"Found {len(data_files)} data files (直接文件夹模式，无过滤)")
        return data_files
    
    # 检查 base_dir 本身是否是 cn[...] 或 cn数字 格式的目录（cn目录模式）
    elif re.match(r'cn(\[.*\]|\d+)', base_dir_name):
        print(f"检测到 cn 目录模式: {base_dir_name}")
        print("在该目录下搜索子目录中的 log-0_processed.csv 文件...")
        
        # 在 cn 目录下搜索子目录
        for root, dirs, files in os.walk(base_dir):
            for file in files:
                if file == "log-0_processed.csv":
                    parent_dir = os.path.basename(root)
                    
                    # 如果是在 cn 目录的子目录中找到文件
                    if root != base_dir:  # 不是在 cn 目录本身，而是在其子目录中
                        print(f"找到文件在子目录: {parent_dir}")
                        
                        # 从子目录名提取节点数和进程数进行过滤
                        node_match = re.search(r'(\d+)node', parent_dir)
                        proc_match = re.search(r'(\d+)proc', parent_dir)
                        
                        if node_match and proc_match:
                            node_count = int(node_match.group(1))
                            proc_count = int(proc_match.group(1))
                            
                            print(f"提取到节点数: {node_count}, 进程数: {proc_count}")
                            
                            # 应用节点和进程数限制
                            # if node_limit is not None and node_count <= node_limit:
                            #     print(f"跳过：节点数 {node_count} <= 限制 {node_limit}")
                            #     continue
                            #############################################################
                            #                     进程数限制
                            # if proc_limit is not None and proc_count <= proc_limit:
                            #     print(f"跳过：进程数 {proc_count} <= 限制 {proc_limit}")
                            #     continue
                            
                            # 检查目录是否匹配模式（修复拼写错误）
                            # if (re.match(r'\d+node-\d+proc-cn.*-\d+iteration-.*', parent_dir)):
                            #     data_files.append(os.path.join(root, file))
                            #     print(f"添加文件: {os.path.join(root, file)}")
                            # else:
                            #     print(f"目录格式不匹配: {parent_dir}")
                            data_files.append(os.path.join(root, file))
                        else:
                            # 如果子目录名不包含节点和进程信息，记录警告但仍添加
                            print(f"警告：子目录 {parent_dir} 不包含节点/进程信息，仍添加文件")
                            data_files.append(os.path.join(root, file))
    
    # 父目录模式：递归搜索子目录
    else:
        print(f"检测到父目录模式，开始递归搜索: {base_dir}")
        for root, dirs, files in os.walk(base_dir):
            for file in files:
                if file == "log-0_processed.csv":
                    parent_dir = os.path.basename(root)
                    
                    # 检查原有结构：从目录名提取节点数和进程数
                    node_match = re.search(r'(\d+)node', parent_dir)
                    proc_match = re.search(r'(\d+)proc', parent_dir)
                    
                    if node_match and proc_match:
                        node_count = int(node_match.group(1))
                        proc_count = int(proc_match.group(1))
                        
                        # 应用节点和进程数限制
                        # if node_limit is not None and node_count <= node_limit:
                        #     continue
                        # if proc_limit is not None and proc_count <= proc_limit:
                        #     continue
                        
                        # 检查目录是否匹配原有模式（修复拼写错误）
                        # if (re.match(r'\d+node-\d+proc-cn.*-\d+iteration-.*', parent_dir)):
                        if (re.match(r'\d+node-\d+proc-.*', parent_dir)):
                            data_files.append(os.path.join(root, file))
    
    print(f"Found {len(data_files)} data files (node_limit: {node_limit}, proc_limit: {proc_limit})")
    return data_files


def load_and_filter_data(file_paths: List[str], comm_type: int = 51, min_appearance: int = 20) -> pd.DataFrame:
    """
    Load all CSV files and filter data based on specified criteria.
    
    Args:
        file_paths: List of file paths to CSV files
        comm_type: Communication type to filter (default: 51)
        min_appearance: Minimum appearance time to filter (default: 20)
        
    Returns:
        Filtered DataFrame with combined data
    """
    all_data = []
    processed_files = 0
    total_records_before = 0
    total_records_after = 0
    
    print(f"Processing {len(file_paths)} files...")
    
    for i, file_path in enumerate(file_paths):
        try:
            # Read CSV file with efficient memory usage
            df = pd.read_csv(file_path, dtype={
                'comm_id': 'int32',
                'comm_size': 'int32', 
                'comm_type': 'int16',
                'rank': 'int32',
                'root': 'int32',
                'time_stamp': 'float64',
                'comm_time(us)': 'float64',
                'sendsize': 'int64',
                'sendcount': 'int32',
                'recvsize': 'int64', 
                'recvcount': 'int32',
                'src': 'int32',
                'dst': 'int32',
                'total_size': 'int64',
                'appearance_time': 'int32'
            })
            
            total_records_before += len(df)
            
            # Filter data based on criteria
            filtered_df = df[(df['comm_type'] == comm_type) & (df['appearance_time'] > min_appearance)]
            
            if not filtered_df.empty:
                # Add source file information
                filtered_df = filtered_df.copy()
                filtered_df['source_file'] = file_path
                all_data.append(filtered_df)
                total_records_after += len(filtered_df)
                
            processed_files += 1
            
            # Progress indicator
            if (i + 1) % 10 == 0 or (i + 1) == len(file_paths):
                print(f"Processed {i + 1}/{len(file_paths)} files...")
                
        except Exception as e:
            print(f"Error processing {file_path}: {e}")
            continue
    
    if not all_data:
        raise ValueError("No valid data found after filtering")
    
    # Combine all data efficiently
    print("Combining data...")
    combined_data = pd.concat(all_data, ignore_index=True)
    
    print(f"Data processing summary:")
    print(f"- Files processed: {processed_files}/{len(file_paths)}")
    print(f"- Total records before filtering: {total_records_before:,}")
    print(f"- Total records after filtering: {total_records_after:,}")
    print(f"- Filter criteria: comm_type={comm_type}, appearance_time>{min_appearance}")
    print(f"- Combined data shape: {combined_data.shape}")
    
    return combined_data


def perform_linear_regression(data: pd.DataFrame) -> Tuple[float, float, float, LinearRegression]:
    """
    Perform linear regression to fit comm_time(us) = total_size * L + O.
    
    Args:
        data: DataFrame containing total_size and comm_time(us) columns
        
    Returns:
        Tuple of (L, O, R², fitted_model)
    """
    # Extract features and target
    X = data['total_size'].values.reshape(-1, 1)
    y = data['comm_time(us)'].values
    
    # Remove any NaN or infinite values
    mask = np.isfinite(X.flatten()) & np.isfinite(y)
    X = X[mask]
    y = y[mask]
    
    if len(X) == 0:
        raise ValueError("No valid data points for regression")
    
    # Perform linear regression
    model = LinearRegression()
    model.fit(X, y)
    
    # Get coefficients
    L = model.coef_[0]  # Slope
    O = model.intercept_  # Intercept
    
    # Calculate R²
    y_pred = model.predict(X)
    r2 = r2_score(y, y_pred)
    
    print(f"Linear Regression Results:")
    print(f"L (slope): {L:.6f}")
    print(f"O (intercept): {O:.6f}")
    print(f"R² score: {r2:.6f}")
    print(f"Equation: comm_time(us) = {L:.6f} * total_size + {O:.6f}")
    
    return L, O, r2, model


def plot_regression_results(data: pd.DataFrame, L: float, O: float, r2: float, 
                          target_dir: str, filename: str = "regression_plot.png",
                          sample_size: int = 10000) -> None:
    """
    Create and save a plot showing the data points and fitted line.
    
    Args:
        data: DataFrame containing the data
        L: Slope coefficient
        O: Intercept coefficient
        r2: R² score
        target_dir: Directory to save the plot
        filename: Name of the output file
        sample_size: Number of data points to plot (for performance with large datasets)
    """
    # Create target directory if it doesn't exist
    os.makedirs(target_dir, exist_ok=True)
    
    # Sample data if too large for plotting
    plot_data = data
    if len(data) > sample_size:
        plot_data = data.sample(n=sample_size, random_state=42)
        print(f"Sampling {sample_size} points from {len(data)} total points for plotting")
    
    # Create the plot
    plt.figure(figsize=(14, 10))
    
    # Create subplot layout
    fig, (ax1) = plt.subplots(1, 1, figsize=(14, 10))
    # fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 12))
    
    # Main scatter plot
    ax1.scatter(plot_data['total_size'], plot_data['comm_time(us)'], 
               alpha=0.6, s=15, color='blue', label='Data Points')
    
    # Plot fitted line starting from x=0 to show complete relationship
    x_max = data['total_size'].max()
    x_range = np.linspace(0, x_max, 100)
    y_fitted = L * x_range + O
    ax1.plot(x_range, y_fitted, 'r-', linewidth=3, 
             label=f'Fitted Line: y = {L:.6f}x + {O:.6f}')
    
    # Customize main plot
    ax1.set_xlabel('Total Size (bytes)', fontsize=12)
    ax1.set_ylabel('Communication Time (μs)', fontsize=12)
    ax1.set_title(f'Linear Regression: Communication Time vs Total Size', fontsize=14)
    ax1.legend(fontsize=11)
    ax1.grid(True, alpha=0.3)
    
    # Set axes to start from origin (0,0)
    ax1.set_xlim(left=0)
    ax1.set_ylim(bottom=0)
    
    # Add statistics text box
    # stats_text = f'Total Data Points: {len(data):,}\nPlotted Points: {len(plot_data):,}\nL = {L:.6f}\nO = {O:.6f}\nR^2 = {r2:.6f}'
    # ax1.text(0.02, 0.98, stats_text, transform=ax1.transAxes, 
    #          verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.8))
    
    # Residual plot
    # y_pred = L * plot_data['total_size'] + O
    # residuals = plot_data['comm_time(us)'] - y_pred
    
    # ax2.scatter(plot_data['total_size'], residuals, alpha=0.6, s=15, color='green')
    # ax2.axhline(y=0, color='red', linestyle='--', linewidth=2)
    # ax2.set_xlabel('Total Size (bytes)', fontsize=12)
    # ax2.set_ylabel('Residuals (μs)', fontsize=12)
    # ax2.set_title('Residual Plot', fontsize=14)
    # ax2.grid(True, alpha=0.3)
    
    # plt.tight_layout()
    
    # Save plot
    output_path = os.path.join(target_dir, filename)
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"Regression plot saved to: {output_path}")


def save_analysis_results(data: pd.DataFrame, L: float, O: float, r2: float, 
                         target_dir: str, filename: str = "analysis_results.txt") -> None:
    """
    Save analysis results to a text file.
    
    Args:
        data: DataFrame containing the analyzed data
        L: Slope coefficient
        O: Intercept coefficient
        r2: R² score
        target_dir: Directory to save the results
        filename: Name of the output file
    """
    os.makedirs(target_dir, exist_ok=True)
    
    output_path = os.path.join(target_dir, filename)
    
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write("Communication Data Analysis Results\n")
        f.write("=" * 40 + "\n\n")
        
        f.write(f"Data Summary:\n")
        f.write(f"- Total data points: {len(data)}\n")
        f.write(f"- Filter criteria: comm_type=51 AND appearance_time>20\n")
        f.write(f"- Total size range: {data['total_size'].min()} - {data['total_size'].max()} bytes\n")
        f.write(f"- Comm time range: {data['comm_time(us)'].min():.3f} - {data['comm_time(us)'].max():.3f} us\n\n")
        
        f.write(f"Linear Regression Results:\n")
        f.write(f"- Equation: comm_time(us) = total_size * L + O\n")
        f.write(f"- L (slope): {L:.6f}\n")
        f.write(f"- O (intercept): {O:.6f}\n")
        f.write(f"- R^2 score: {r2:.6f}\n\n")
        
        f.write(f"Model Interpretation:\n")
        f.write(f"- Each byte of data adds approximately {L:.6f} us to communication time\n")
        f.write(f"- Base communication overhead is approximately {O:.6f} us\n")
        f.write(f"- The model explains {r2*100:.2f}% of the variance in communication time\n")
    
    print(f"Analysis results saved to: {output_path}")


def analyze_communication_data(base_dir: str, target_dir: str, node_limit: Optional[int] = None, 
                             proc_limit: Optional[int] = None, comm_type: int = 51, 
                             min_appearance: int = 20) -> Tuple[float, float]:
    """
    Main function to perform complete analysis of communication data.
    
    Args:
        base_dir: Base directory containing the data files
        target_dir: Directory to save results and plots
        node_limit: Maximum number of nodes to include (None for no limit)
        proc_limit: Maximum number of processes to include (None for no limit)
        comm_type: Communication type to filter (default: 51)
        min_appearance: Minimum appearance time to filter (default: 20)
        
    Returns:
        Tuple of (L, O) coefficients
    """
    print("Starting communication data analysis...")
    print(f"Configuration: node_limit={node_limit}, proc_limit={proc_limit}")
    print(f"Filter criteria: comm_type={comm_type}, min_appearance={min_appearance}")
    
    # Step 1: Find all data files
    print("\n1. Finding data files...")
    file_paths = find_data_files(base_dir, node_limit, proc_limit)
    
    if not file_paths:
        raise ValueError(f"No data files found in {base_dir}")
    
    # Step 2: Load and filter data
    print("\n2. Loading and filtering data...")
    filtered_data = load_and_filter_data(file_paths, comm_type, min_appearance)
    
    # Step 3: Perform linear regression
    print("\n3. Performing linear regression...")
    L, O, r2, model = perform_linear_regression(filtered_data)
    
    # Step 4: Create visualization
    print("\n4. Creating visualization...")
    plot_regression_results(filtered_data, L, O, r2, target_dir)
    
    # Step 5: Save detailed results
    print("\n5. Saving analysis results...")
    save_analysis_results(filtered_data, L, O, r2, target_dir)
    
    print(f"\nAnalysis completed successfully!")
    print(f"Final Results: L = {L:.6f}, O = {O:.6f}")
    
    return L, O


def create_summary_statistics(data: pd.DataFrame, target_dir: str) -> None:
    """
    Create additional summary statistics and save to CSV.
    
    Args:
        data: DataFrame containing the analyzed data
        target_dir: Directory to save the summary
    """
    os.makedirs(target_dir, exist_ok=True)
    
    # Basic statistics
    summary_stats = data[['total_size', 'comm_time(us)', 'appearance_time']].describe()
    
    # Save to CSV
    summary_path = os.path.join(target_dir, "summary_statistics.csv")
    summary_stats.to_csv(summary_path)
    
    # Additional analysis by total_size bins
    data_copy = data.copy()
    data_copy['size_bin'] = pd.cut(data_copy['total_size'], bins=10, labels=False)
    bin_analysis = data_copy.groupby('size_bin').agg({
        'total_size': ['min', 'max', 'mean'],
        'comm_time(us)': ['mean', 'std', 'count']
    }).round(3)
    
    bin_path = os.path.join(target_dir, "size_bin_analysis.csv")
    bin_analysis.to_csv(bin_path)
    
    print(f"Summary statistics saved to: {summary_path}")
    print(f"Size bin analysis saved to: {bin_path}")

def plot_total_size(data: pd.DataFrame, target_dir: str) -> None:
    """绘制数据总大小分布直方图并保存"""
    os.makedirs(target_dir, exist_ok=True)
    plt.figure(figsize=(8, 6))
    plt.hist(data['total_size'], bins=50, alpha=0.7, color='blue', edgecolor='black')
    plt.xlabel('Total Size (bytes)')
    plt.ylabel('Frequency')
    plt.title('Distribution of Total Size')
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plot_path = os.path.join(target_dir, "total_size_distribution.png")
    plt.savefig(plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"Total size distribution saved to: {plot_path}")

def plot_comm_time(data: pd.DataFrame, target_dir: str) -> None:
    """绘制通信时间分布直方图并保存"""
    os.makedirs(target_dir, exist_ok=True)
    plt.figure(figsize=(8, 6))
    plt.hist(data['comm_time(us)'], bins=50, alpha=0.7, color='green', edgecolor='black')
    plt.xlabel('Communication Time (us)')
    plt.ylabel('Frequency')
    plt.title('Distribution of Communication Time')
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plot_path = os.path.join(target_dir, "comm_time_distribution.png")
    plt.savefig(plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"Communication time distribution saved to: {plot_path}")

def plot_appearance_time(data: pd.DataFrame, target_dir: str) -> None:
    """绘制出现时间分布直方图并保存"""
    os.makedirs(target_dir, exist_ok=True)
    plt.figure(figsize=(8, 6))
    plt.hist(data['appearance_time'], bins=30, alpha=0.7, color='orange', edgecolor='black')
    plt.xlabel('Appearance Time')
    plt.ylabel('Frequency')
    plt.title('Distribution of Appearance Time')
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plot_path = os.path.join(target_dir, "appearance_time_distribution.png")
    plt.savefig(plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"Appearance time distribution saved to: {plot_path}")

def plot_log_scatter(data: pd.DataFrame, target_dir: str) -> None:
    """绘制对数尺度散点图并保存"""
    os.makedirs(target_dir, exist_ok=True)
    plt.figure(figsize=(8, 6))
    sample_data = data.sample(n=min(5000, len(data)), random_state=42)
    plt.scatter(sample_data['total_size'], sample_data['comm_time(us)'], 
                alpha=0.6, s=10, color='purple')
    plt.xscale('log')
    plt.yscale('log')
    plt.xlabel('Total Size (bytes)')
    plt.ylabel('Communication Time (us)')
    plt.title('Communication Time vs. Total Size')
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plot_path = os.path.join(target_dir, "log_scale_scatter.png")
    plt.savefig(plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    print(f"Log-scale scatter plot saved to: {plot_path}")

# 使用示例
def create_detailed_plots(data: pd.DataFrame, L: float, O: float, target_dir: str) -> None:
    plot_total_size(data, target_dir)
    plot_comm_time(data, target_dir)
    plot_appearance_time(data, target_dir)
    plot_log_scatter(data, target_dir)


def validate_data_quality(data: pd.DataFrame) -> dict:
    """
    Validate data quality and return quality metrics.
    
    Args:
        data: DataFrame to validate
        
    Returns:
        Dictionary containing quality metrics
    """
    quality_metrics = {}
    
    # Check for missing values
    quality_metrics['missing_values'] = data.isnull().sum().to_dict()
    
    # Check for negative values in size and time columns
    quality_metrics['negative_total_size'] = (data['total_size'] < 0).sum()
    quality_metrics['negative_comm_time'] = (data['comm_time(us)'] < 0).sum()
    
    # Check for zero values
    quality_metrics['zero_total_size'] = (data['total_size'] == 0).sum()
    quality_metrics['zero_comm_time'] = (data['comm_time(us)'] == 0).sum()
    
    # Check for outliers (using IQR method)
    Q1_size = data['total_size'].quantile(0.25)
    Q3_size = data['total_size'].quantile(0.75)
    IQR_size = Q3_size - Q1_size
    outliers_size = ((data['total_size'] < (Q1_size - 1.5 * IQR_size)) | 
                    (data['total_size'] > (Q3_size + 1.5 * IQR_size))).sum()
    
    Q1_time = data['comm_time(us)'].quantile(0.25)
    Q3_time = data['comm_time(us)'].quantile(0.75)
    IQR_time = Q3_time - Q1_time
    outliers_time = ((data['comm_time(us)'] < (Q1_time - 1.5 * IQR_time)) | 
                    (data['comm_time(us)'] > (Q3_time + 1.5 * IQR_time))).sum()
    
    quality_metrics['outliers_total_size'] = outliers_size
    quality_metrics['outliers_comm_time'] = outliers_time
    
    # Data range
    quality_metrics['total_size_range'] = [data['total_size'].min(), data['total_size'].max()]
    quality_metrics['comm_time_range'] = [data['comm_time(us)'].min(), data['comm_time(us)'].max()]
    
    return quality_metrics


def save_data_quality_report(quality_metrics: dict, target_dir: str) -> None:
    """
    Save data quality report to file.
    
    Args:
        quality_metrics: Dictionary containing quality metrics
        target_dir: Directory to save the report
    """
    os.makedirs(target_dir, exist_ok=True)
    
    report_path = os.path.join(target_dir, "data_quality_report.txt")
    
    with open(report_path, 'w', encoding='utf-8') as f:
        f.write("Data Quality Report\n")
        f.write("=" * 30 + "\n\n")
        
        f.write("Missing Values:\n")
        for col, count in quality_metrics['missing_values'].items():
            f.write(f"  {col}: {count}\n")
        f.write("\n")
        
        f.write("Data Validation:\n")
        f.write(f"  Negative total_size values: {quality_metrics['negative_total_size']}\n")
        f.write(f"  Negative comm_time values: {quality_metrics['negative_comm_time']}\n")
        f.write(f"  Zero total_size values: {quality_metrics['zero_total_size']}\n")
        f.write(f"  Zero comm_time values: {quality_metrics['zero_comm_time']}\n\n")
        
        f.write("Outlier Detection (IQR method):\n")
        f.write(f"  Total size outliers: {quality_metrics['outliers_total_size']}\n")
        f.write(f"  Communication time outliers: {quality_metrics['outliers_comm_time']}\n\n")
        
        f.write("Data Ranges:\n")
        f.write(f"  Total size: {quality_metrics['total_size_range'][0]} - {quality_metrics['total_size_range'][1]} bytes\n")
        f.write(f"  Communication time: {quality_metrics['comm_time_range'][0]:.3f} - {quality_metrics['comm_time_range'][1]:.3f} us\n")
    
    print(f"Data quality report saved to: {report_path}")


if __name__ == "__main__":
    # Define directories and parameters
    # base_dir = r"F:\PostGraduate\Point-to-Point-DATA\1-16nodes-data"  # Directory containing the data folders
    # target_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\code\get_lammps_gap\analysis_results"  # Directory to save results

    # 数据地址 -> 直接给定相应的文件夹地址
    # base_dir = r"F:\PostGraduate\Point-to-Point-DATA\big-nodes\LAMMPS\1024node-16384proc-cn99999-100iteration-512-s-100-3427930"  # Directory containing the data folders
    # target_dir = r"F:\PostGraduate\Point-to-Point-DATA\deal-data-code\C-lop-Prediction\code\get_lammps_gap\big_node\analysis_results_for_1024_node_100_iteration"  # Directory to save results
    
    # 数据地址 -> 使用 shared_vars.py 中的数据进行导入
    base_dir = r"F:\PostGraduate\Point-to-Point-DATA\1-128-data"
    target_dir = r"F:\PostGraduate\Point-to-Point-Code\C-lop-Prediction\code\get_lammps_gap\strong_scaling\1-128node_include_2proc"  # Directory to save results


    # Configuration parameters
    node_limit = 4  # Set to limit nodes (e.g., 16 for min 16 nodes), None for no limit
    proc_limit = 4  # Set to limit processes (e.g., 64 for max 64 processes), None for no limit
    
    comm_type = 51     # Communication type to filter
    min_appearance = 20  # Minimum appearance time to filter
    
    try:
        print("Communication Data Analysis Tool")
        print("=" * 50)
        print(f"Base directory: {base_dir}")
        print(f"Target directory: {target_dir}")
        print(f"Node limit: {node_limit}")
        print(f"Process limit: {proc_limit}")
        print(f"Communication type filter: {comm_type}")
        print(f"Minimum appearance time: {min_appearance}")
        print("=" * 50)
        
        # Perform the complete analysis
        L, O = analyze_communication_data(
            base_dir=base_dir, 
            target_dir=target_dir,
            node_limit=node_limit,
            proc_limit=proc_limit,
            comm_type=comm_type,
            min_appearance=min_appearance
        )
        
        # Load the filtered data again for additional analysis
        print("\n6. Creating additional analysis...")
        file_paths = find_data_files(base_dir, node_limit, proc_limit)
        filtered_data = load_and_filter_data(file_paths, comm_type, min_appearance)
        
        # Data quality validation
        print("\n7. Validating data quality...")
        quality_metrics = validate_data_quality(filtered_data)
        save_data_quality_report(quality_metrics, target_dir)
        
        # Create summary statistics
        print("\n8. Creating summary statistics...")
        create_summary_statistics(filtered_data, target_dir)
        
        # Create detailed plots
        print("\n9. Creating detailed plots...")
        create_detailed_plots(filtered_data, L, O, target_dir)
        
        # Final summary
        print(f"\n" + "="*60)
        print(f"ANALYSIS COMPLETED SUCCESSFULLY")
        print(f"="*60)
        print(f"Data processed:")
        print(f"  - Total data points: {len(filtered_data):,}")
        print(f"  - Files processed: {len(file_paths)}")
        print(f"  - Filter: comm_type={comm_type}, appearance_time>{min_appearance}")
        print(f"")
        print(f"Linear regression results:")
        print(f"  - L (slope): {L:.6f}")
        print(f"  - O (intercept): {O:.6f}")
        print(f"  - Equation: comm_time(us) = {L:.6f} * total_size + {O:.6f}")
        print(f"")
        print(f"Output files saved to: {target_dir}")
        print(f"  - regression_plot.png")
        print(f"  - distribution_plots.png")
        print(f"  - analysis_results.txt")
        print(f"  - data_quality_report.txt")
        print(f"  - summary_statistics.csv")
        print(f"  - size_bin_analysis.csv")
        print(f"="*60)
        
    except Exception as e:
        print(f"Error during analysis: {e}")
        import traceback
        traceback.print_exc()
        raise
