# -*- coding: utf-8 -*-
"""
LHS Sampling Validation: Correct Coverage & Independence Analysis for Multiple Datasets
Author: Tongguang Si
"""

import os
from pathlib import Path

import pandas as pd
import numpy as np
import ast
from scipy.stats import kstest, beta, truncnorm, pearsonr, spearmanr, t
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
import seaborn as sns
import dcor  # pip install dcor
from sklearn.feature_selection import mutual_info_regression
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import Voronoi
import argparse

from osc05.static.constants import Commons

# ===============================
# 0. Setup
# ===============================
# os.makedirs("output/validation_lhs", exist_ok=True)
# 读取数据
output_dir_sampling = Path("output") / "sampling"
# 创建目录：可以创建多级目录，目录存在时，可以直接覆盖创建
output_dir_sampling.mkdir(parents=True, exist_ok=True)


# 写入图片和csv
output_dir_validation_lhs = Path("output") / "validation_lhs"
# 创建目录：可以创建多级目录，目录存在时，可以直接覆盖创建
output_dir_validation_lhs.mkdir(parents=True, exist_ok=True)


# ===============================
# 1. Load Data Functions for Multiple Datasets
# ===============================

def get_dataset_files():
    """获取所有16个数据集的CSV文件"""

    files = []
    for size in Commons.sample_sizes:
        for seed in Commons.seeds:
            short_filename = f"lhs_sampling_results_{size}_{seed}.csv"
            filename = output_dir_sampling / short_filename
            if os.path.exists(filename):
                files.append({
                    'filename': filename,
                    'sample_size': size,
                    'seed': seed,
                    'label': f"Size{size}_Seed{seed}"
                })

    print(f"Found {len(files)} dataset files")
    return files


def load_single_dataset(file_info):
    """加载单个数据集"""
    df = pd.read_csv(file_info['filename'])

    dur_cols = [c for c in df.columns if c.endswith("_DUR")]
    dly_cols = [c for c in df.columns if c.endswith("_DLV_DLY")]

    def parse_list_column(series):
        out = []
        for v in series:
            try:
                lst = ast.literal_eval(str(v))
                if isinstance(lst, list):
                    out.append(lst)
                else:
                    out.append([])
            except:
                out.append([])
        return out

    crn = parse_list_column(df["CRN_FAL"])
    rain = parse_list_column(df["RAIN_HOLD"])
    wind = parse_list_column(df["WIND_HOLD"])

    return df, dur_cols, dly_cols, crn, rain, wind


def print_with_info(message, file_info, level=0):
    """带seed和样本容量信息的打印函数"""
    indent = "  " * level
    prefix = f"[Seed:{file_info['seed']}, Size:{file_info['sample_size']}]"
    print(f"{prefix}{indent}{message}")


# ===============================
# 2. Define True Distributions (MUST match sampling_val_gpt.py)
# ===============================

def verify_distribution_parameters(file_info):
    """Verify distribution parameters consistency with sampling program"""
    print_with_info("=== Distribution Parameters Verification ===", file_info)

    # Duration distribution verification
    print_with_info("Duration distributions (PERT -> Beta):", file_info, 1)
    special_floors = ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR']
    for j, col in enumerate(['FLR01_DUR', 'FLR02_DUR', 'FLR03_DUR', 'FLR04_DUR', 'FLR18_DUR']):  # Show first 5 only
        if col in special_floors:
            a_pert, c_pert = 5.0, 9.0
            status = "Special"
        else:
            a_pert, c_pert = 4.0, 8.0
            status = "Standard"
        print_with_info(f"{col}: {status} PERT[{a_pert}, {c_pert}] -> Beta(α=2, β=4)", file_info, 2)

    # Delay distribution verification
    print_with_info("Delay distribution:", file_info, 1)
    print_with_info("TruncNormal(μ=-4, σ=2, trunc=[-10, 10])", file_info, 2)
    print_with_info("Standardized: a_trunc=-3, b_trunc=7", file_info, 2)


def transform_duration_to_prob_space(X, dur_cols):
    """Transform duration samples to probability space U = F(X)"""
    n_samples, n_features = X.shape
    U = np.zeros_like(X, dtype=float)

    for j, col in enumerate(dur_cols):
        x = X[:, j]
        # Determine PERT parameters based on column name
        if col in ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR']:
            a_pert, c_pert = 5.0, 9.0
        else:
            a_pert, c_pert = 4.0, 8.0

        # PERT -> Beta(α=2, β=4) scaled to [a, c]
        alpha, beta_param = 2.0, 4.0
        z = np.clip((x - a_pert) / (c_pert - a_pert), 0.0, 1.0)
        U[:, j] = beta.cdf(z, alpha, beta_param)

    return U


def transform_delay_to_prob_space(X):
    """Transform delay samples to probability space U = F(X)"""
    mu, sigma = -4.0, 2.0
    low, upp = -10.0, 10.0
    a_trunc = (low - mu) / sigma  # -3
    b_trunc = (upp - mu) / sigma  # 7

    U = truncnorm.cdf(X, a=a_trunc, b=b_trunc, loc=mu, scale=sigma)
    return U


# ===============================
# 3. Enhanced LHS Coverage Analysis in Probability Space
# ===============================

def enhanced_spatial_metrics(U, label, file_info):
    """Enhanced spatial filling analysis"""
    print_with_info(f"Enhanced Spatial Metrics for {label}:", file_info, 1)

    # Nearest neighbor distance analysis
    nbrs = NearestNeighbors(n_neighbors=2).fit(U)
    distances, _ = nbrs.kneighbors(U)
    nn_dist = distances[:, 1]  # Nearest neighbor distances

    # Spatial uniformity metrics
    mean_nn = np.mean(nn_dist)
    std_nn = np.std(nn_dist)
    cv_nn = std_nn / mean_nn  # Coefficient of variation

    print_with_info(f"Nearest Neighbor distances:", file_info, 2)
    print_with_info(f"Mean = {mean_nn:.4f}, Std = {std_nn:.4f}", file_info, 3)
    print_with_info(f"Coefficient of Variation = {cv_nn:.4f}", file_info, 3)

    # Ideal LHS nearest neighbor distance (in d-dimensional unit hypercube)
    n_samples, n_dims = U.shape
    ideal_nn_distance = (1.0 / n_samples) ** (1.0 / n_dims)
    print_with_info(f"Ideal NN distance for LHS: {ideal_nn_distance:.4f}", file_info, 2)
    print_with_info(f"Actual/Ideal ratio: {mean_nn / ideal_nn_distance:.4f}", file_info, 2)

    return {
        'mean_nn_distance': mean_nn,
        'cv_nn_distance': cv_nn,
        'ideal_nn_distance': ideal_nn_distance
    }


def plot_voronoi_diagram(U, label, file_info):
    """Plot Voronoi diagram for spatial distribution analysis"""
    if U.shape[1] >= 2:
        # Use PCA for 2D visualization
        pca = PCA(n_components=2)
        U_2d = pca.fit_transform(U)

        # Compute Voronoi diagram
        vor = Voronoi(U_2d)

        plt.figure(figsize=(10, 8))
        from scipy.spatial import voronoi_plot_2d
        voronoi_plot_2d(vor, show_vertices=False, show_points=True, point_size=15)
        plt.scatter(U_2d[:, 0], U_2d[:, 1], c='red', s=20, alpha=0.8)
        plt.title(f"{label} - Voronoi Diagram\n(Seed: {file_info['seed']}, Sample Size: {file_info['sample_size']})")
        plt.xlabel("PC1")
        plt.ylabel("PC2")
        plt.grid(True, alpha=0.3)

        plt.savefig(output_dir_validation_lhs / f"{file_info['label']}/{label}_Voronoi.png", dpi=300, bbox_inches='tight')
        plt.show()


def validate_lhs_coverage(U, label, file_info):
    """
    Validate LHS coverage in probability space [0,1]^d
    Only for LHS-sampled variables: Duration and Delivery Delay
    """
    print_with_info(f"=== LHS Coverage Validation for {label} ===", file_info)
    n_samples, n_features = U.shape
    print_with_info(f"LHS Sampling Dimensions: {n_features}, Samples: {n_samples}", file_info, 1)

    # 1. KS test for uniformity in probability space
    ks_pvals = [kstest(U[:, i], 'uniform').pvalue for i in range(n_features)]
    mean_p = np.mean(ks_pvals)
    min_p = np.min(ks_pvals)
    max_p = np.max(ks_pvals)
    prop_pass = np.mean(np.array(ks_pvals) > 0.05)

    print_with_info(f"KS Test Results (Probability Space Uniformity):", file_info, 1)
    print_with_info(f"Mean p-value = {mean_p:.4f}, Range = [{min_p:.4f}, {max_p:.4f}]", file_info, 2)
    print_with_info(f"Proportion passing (p > 0.05): {prop_pass:.1%}", file_info, 2)

    # Assess LHS sampling quality
    if mean_p > 0.1 and prop_pass > 0.9:
        quality = "EXCELLENT"
    elif mean_p > 0.05 and prop_pass > 0.8:
        quality = "GOOD"
    else:
        quality = "NEEDS IMPROVEMENT"
    print_with_info(f"LHS Sampling Quality: {quality}", file_info, 2)

    # 2. Distance analysis in probability space
    dists = pdist(U)
    print_with_info(f"Spatial Distribution in Probability Space:", file_info, 1)
    print_with_info(
        f"Pairwise distances - Min: {np.min(dists):.4f}, Mean: {np.mean(dists):.4f}, Max: {np.max(dists):.4f}",
        file_info, 2)

    # 3. Enhanced spatial metrics
    spatial_metrics = enhanced_spatial_metrics(U, label, file_info)

    # 4. Visualizations for LHS coverage validation
    plt.figure(figsize=(15, 5))

    # PCA projection - check multidimensional space uniformity
    plt.subplot(1, 3, 1)
    pca = PCA(n_components=2)
    proj = pca.fit_transform(U)
    plt.scatter(proj[:, 0], proj[:, 1], s=12, alpha=0.6, edgecolor='none')
    plt.title(f"LHS: {label}\nPCA Projection (Seed: {file_info['seed']}, N={file_info['sample_size']})")
    plt.xlabel(f"PC1 ({pca.explained_variance_ratio_[0]:.1%})")
    plt.ylabel(f"PC2 ({pca.explained_variance_ratio_[1]:.1%})")
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(a)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    # Marginal distributions - verify uniformity in each dimension
    plt.subplot(1, 3, 2)
    for i in range(min(5, n_features)):
        plt.hist(U[:, i], bins=20, alpha=0.6, density=True, label=f'Dim {i + 1}')
    plt.axhline(1.0, color='red', linestyle='--', linewidth=1.5, label='Ideal Uniform')
    plt.xlabel('CDF Value')
    plt.ylabel('Density')
    plt.title(f'Marginal Distributions\n(Seed: {file_info["seed"]}, N={file_info["sample_size"]})')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(b)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    # Distance distribution - check spatial filling
    plt.subplot(1, 3, 3)
    plt.hist(dists, bins=30, alpha=0.7, density=True, edgecolor='black')
    plt.xlabel('Pairwise Distance')
    plt.ylabel('Density')
    plt.title(f'Inter-point Distance Distribution\n(Seed: {file_info["seed"]}, N={file_info["sample_size"]})')
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(c)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    plt.tight_layout()
    plt.savefig(output_dir_validation_lhs / f"{file_info['label']}/LHS_Coverage_{label}.png", dpi=300, bbox_inches='tight')
    plt.show()

    # 5. Voronoi diagram - advanced spatial analysis (optional)
    if n_features >= 2:
        plot_voronoi_diagram(U, f"LHS_{label}", file_info)

    return {
        'ks_pvals': ks_pvals,
        'mean_ks_p': mean_p,
        'ks_quality': quality,
        'pairwise_distances': dists,
        'spatial_metrics': spatial_metrics
    }


# ===============================
# 4. Enhanced Independence Analysis with Sampling Error Consideration
# ===============================
def enhanced_independence_analysis(df, n_samples=1000, file_info=None):
    """Enhanced independence analysis with sampling error consideration"""
    print_with_info("=== Enhanced Independence Analysis with Sampling Error Consideration ===", file_info)

    # Calculate sampling error metrics
    se_correlation = 1.0 / np.sqrt(n_samples - 1)
    critical_value_95 = 1.96 * se_correlation
    critical_value_99 = 2.576 * se_correlation

    print_with_info(f"Sample size: {n_samples}", file_info, 1)
    print_with_info(f"Correlation standard error: {se_correlation:.4f}", file_info, 1)
    print_with_info(f"95% confidence interval for zero correlation: ±{critical_value_95:.4f}", file_info, 1)
    print_with_info(f"99% confidence interval for zero correlation: ±{critical_value_99:.4f}", file_info, 1)

    cols = df.columns.tolist()
    n = len(cols)

    pearson_mat = np.eye(n)
    spearman_mat = np.eye(n)
    dcor_mat = np.eye(n)
    mi_mat = np.eye(n)
    p_values = np.eye(n)

    # Calculate all correlation metrics
    for i in range(n):
        for j in range(i + 1, n):
            x = df.iloc[:, i].values
            y = df.iloc[:, j].values

            # Pearson correlation with p-value
            r, p_val = pearsonr(x, y)
            pearson_mat[i, j] = pearson_mat[j, i] = r
            p_values[i, j] = p_values[j, i] = p_val

            # Spearman
            rho, _ = spearmanr(x, y)
            spearman_mat[i, j] = spearman_mat[j, i] = rho

            # Distance Correlation
            dc = dcor.distance_correlation(x, y)
            dcor_mat[i, j] = dcor_mat[j, i] = dc

            # Mutual Information
            mi = mutual_info_regression(x.reshape(-1, 1), y)[0]
            mi_mat[i, j] = mi_mat[j, i] = mi

    # Statistical significance analysis
    print_with_info(f"📊 Statistical Significance Analysis:", file_info, 1)
    significant_pairs = []
    for i in range(n):
        for j in range(i + 1, n):
            r = pearson_mat[i, j]
            p_val = p_values[i, j]

            significant_95 = abs(r) > critical_value_95
            significant_99 = abs(r) > critical_value_99

            if significant_95:
                sig_level = "**" if significant_99 else "*"
                status = f"r = {r:.3f} {sig_level} (p = {p_val:.3f})"
                significant_pairs.append((cols[i], cols[j], r, p_val))
            else:
                status = f"r = {r:.3f} (p = {p_val:.3f}) - not significant"

            print_with_info(f"{cols[i]:15} vs {cols[j]:15}: {status}", file_info, 2)

    # Detailed analysis of 10 variable pairs
    print_with_info(f"🔍 Detailed Analysis of 10 Variable Pairs:", file_info, 1)
    pairs = [
        (0, 1, "Duration_mean vs Delay_mean", "Construction progress vs Material supply"),
        (0, 2, "Duration_mean vs Tower_count", "Construction progress vs Equipment reliability"),
        (0, 3, "Duration_mean vs Rain_ratio", "Construction progress vs Rain impact"),
        (0, 4, "Duration_mean vs Wind_ratio", "Construction progress vs Wind impact"),
        (1, 2, "Delay_mean vs Tower_count", "Material supply vs Equipment reliability"),
        (1, 3, "Delay_mean vs Rain_ratio", "Material supply vs Rain impact"),
        (1, 4, "Delay_mean vs Wind_ratio", "Material supply vs Wind impact"),
        (2, 3, "Tower_count vs Rain_ratio", "Equipment failure vs Rain weather"),
        (2, 4, "Tower_count vs Wind_ratio", "Equipment failure vs Wind weather"),
        (3, 4, "Rain_ratio vs Wind_ratio", "Different weather type impacts")
    ]

    for i, j, label, meaning in pairs:
        r = pearson_mat[i, j]
        p_val = p_values[i, j]
        significant = abs(r) > critical_value_95

        if significant:
            status = "⚠️ POTENTIALLY CORRELATED" if abs(r) > critical_value_99 else "⚠️ WEAKLY CORRELATED"
        else:
            status = "✅ INDEPENDENT (within sampling error)"

        print_with_info(f"{label:35} | {meaning:45} | r = {r:6.3f} | {status}", file_info, 2)

    # Plot and save correlation matrices with significance markers
    mats = [pearson_mat, spearman_mat, dcor_mat, mi_mat]
    names = ["Pearson", "Spearman", "DistanceCorr", "MutualInfo"]

    for name, mat in zip(names, mats):
        plt.figure(figsize=(8, 6))
        mask = np.triu(np.ones_like(mat, dtype=bool), k=1)

        # Create annotation matrix with significance stars
        annot_mat = np.empty_like(mat, dtype=object)
        for i in range(n):
            for j in range(n):
                if i == j:
                    annot_mat[i, j] = "1.000"
                elif i < j:
                    if name == "Pearson" and abs(mat[i, j]) > critical_value_95:
                        star = "**" if abs(mat[i, j]) > critical_value_99 else "*"
                        annot_mat[i, j] = f"{mat[i, j]:.3f}{star}"
                    else:
                        annot_mat[i, j] = f"{mat[i, j]:.3f}"
                else:
                    annot_mat[i, j] = ""

        sns.heatmap(mat, annot=annot_mat, fmt="", cmap="coolwarm", center=0,
                    xticklabels=cols, yticklabels=cols, mask=mask,
                    cbar_kws={'label': f'{name} Correlation'})

        # 添加seed和样本容量信息到标题
        if file_info:
            title_suffix = f"\n(Seed: {file_info['seed']}, Sample Size: {file_info['sample_size']})"
        else:
            title_suffix = f"\n(Sample Size: {n_samples})"

        plt.title(f"{name} Correlation Matrix\n(* p<0.05, ** p<0.01){title_suffix}")
        plt.tight_layout()
        plt.savefig(output_dir_validation_lhs / f"{file_info['label']}/{name}_matrix_enhanced.png", dpi=300)
        plt.show()

        pd.DataFrame(mat, index=cols, columns=cols).to_csv(output_dir_validation_lhs / f"{file_info['label']}/{name}_matrix.csv")

    return {
        'correlation_matrices': dict(zip(names, mats)),
        'p_values': p_values,
        'standard_error': se_correlation,
        'critical_values': {
            '95%': critical_value_95,
            '99%': critical_value_99
        },
        'significant_pairs': significant_pairs
    }


# ===============================
# 5. Enhanced Results Interpretation
# ===============================
def interpret_results(summary, independence_results, res_dur, res_dly, file_info):
    """Enhanced results interpretation with clear scope separation"""
    print_with_info("=" * 70, file_info)
    print_with_info("ENHANCED RESULTS INTERPRETATION", file_info)
    print_with_info("=" * 70, file_info)

    print_with_info(f"Dataset: {file_info['filename']}", file_info, 1)
    print_with_info(f"Seed: {file_info['seed']}, Sample Size: {file_info['sample_size']}", file_info, 1)
    print_with_info("NOTE: Independence analysis includes non-LHS derived features.", file_info, 1)
    print_with_info("      Only Duration and Delay are subject to LHS quality assessment.", file_info, 1)
    print_with_info("=" * 70, file_info)

    # 🟢 LHS COVERAGE QUALITY ASSESSMENT (Duration & Delay only)
    print_with_info(f"🟢 LHS COVERAGE QUALITY ASSESSMENT", file_info)
    print_with_info(f"Scope: Duration & Delivery Delay sampling only", file_info, 1)
    print_with_info(f"Duration Sampling (18 dimensions):", file_info, 1)
    ks_dur = summary['LHS_Mean_KS_p_Duration']
    dur_quality = "EXCELLENT" if ks_dur > 0.1 else "GOOD" if ks_dur > 0.05 else "NEEDS IMPROVEMENT"
    print_with_info(f"• Quality: {dur_quality} (mean KS p = {ks_dur:.4f})", file_info, 2)
    print_with_info(f"• {summary['LHS_Prop_KS_gt_0.05_Duration']:.1%} of dimensions pass uniformity test", file_info, 2)

    print_with_info(f"Delay Sampling (18 dimensions):", file_info, 1)
    ks_dly = summary['LHS_Mean_KS_p_Delay']
    dly_quality = "EXCELLENT" if ks_dly > 0.1 else "GOOD" if ks_dly > 0.05 else "NEEDS IMPROVEMENT"
    print_with_info(f"• Quality: {dly_quality} (mean KS p = {ks_dly:.4f})", file_info, 2)
    print_with_info(f"• {summary['LHS_Prop_KS_gt_0.05_Delay']:.1%} of dimensions pass uniformity test", file_info, 2)

    # Spatial filling assessment
    print_with_info(f"📈 SPATIAL FILLING ASSESSMENT (LHS Sampling):", file_info)
    dur_ratio = summary['LHS_NN_ratio_Duration']
    dly_ratio = summary['LHS_NN_ratio_Delay']
    print_with_info(f"Duration: Actual/Ideal NN distance ratio = {dur_ratio:.3f}", file_info, 1)
    print_with_info(f"Delay:    Actual/Ideal NN distance ratio = {dly_ratio:.3f}", file_info, 1)
    print_with_info(f"Note: Ratio close to 1.0 indicates good spatial distribution", file_info, 1)

    # 🔵 INDEPENDENCE ASSESSMENT (All 5 variable types)
    print_with_info(f"🔵 INDEPENDENCE ASSESSMENT", file_info)
    print_with_info(f"Scope: All 5 variable types (including derived features)", file_info, 1)

    max_abs_corr = summary['CORR_Max_abs_correlation']
    se_corr = summary['CORR_Standard_Error']

    print_with_info(f"Maximum absolute Pearson correlation: {max_abs_corr:.3f}", file_info, 1)
    print_with_info(f"Correlation standard error (N={file_info['sample_size']}): ±{se_corr:.3f}", file_info, 1)
    print_with_info(
        f"95% confidence interval for zero correlation: ±{independence_results['critical_values']['95%']:.3f}",
        file_info, 1)

    # Independence judgment based on sampling error
    if max_abs_corr < se_corr:
        indep_quality = "VERY HIGH"
        explanation = "All correlations within 1 standard error"
    elif max_abs_corr < independence_results['critical_values']['95%']:
        indep_quality = "HIGH"
        explanation = "All correlations within 95% confidence interval"
    elif max_abs_corr < 0.3:
        indep_quality = "MODERATE"
        explanation = "Some small correlations detected"
    else:
        indep_quality = "LOW"
        explanation = "Substantial correlations present"

    print_with_info(f"Independence quality: {indep_quality}", file_info, 1)
    print_with_info(f"Explanation: {explanation}", file_info, 1)

    # Significant pairs analysis
    sig_pairs = independence_results['significant_pairs']
    if len(sig_pairs) == 0:
        print_with_info(f"No statistically significant correlations found at 95% confidence level", file_info, 1)
    else:
        print_with_info(f"Statistically significant pairs ({len(sig_pairs)}):", file_info, 1)
        for var1, var2, r, p in sig_pairs:
            print_with_info(f"• {var1} vs {var2}: r = {r:.3f}, p = {p:.3f}", file_info, 2)

    # Mutual information analysis
    mi_mat = independence_results['correlation_matrices']['MutualInfo']
    max_mi = np.max(mi_mat - np.eye(5))
    print_with_info(f"💡 MUTUAL INFORMATION ANALYSIS:", file_info)
    print_with_info(f"Maximum mutual information: {max_mi:.4f}", file_info, 1)
    if max_mi < 0.01:
        print_with_info(f"→ Mutual information close to 0, confirming no real information dependency", file_info, 1)

    # 🎯 OVERALL RECOMMENDATIONS
    print_with_info(f"🎯 OVERALL RECOMMENDATIONS:", file_info)
    if (ks_dur > 0.05 and ks_dly > 0.05 and
            max_abs_corr < independence_results['critical_values']['95%'] and
            len(sig_pairs) == 0):
        print_with_info("✅ EXCELLENT: Both LHS sampling and variable independence are ACCEPTABLE", file_info, 1)
        print_with_info("✅ LHS sampling quality meets requirements for simulation", file_info, 1)
        print_with_info("✅ Variables are effectively independent for practical purposes", file_info, 1)
    else:
        print_with_info("⚠️  Some aspects may need attention:", file_info, 1)
        if ks_dur <= 0.05 or ks_dly <= 0.05:
            print_with_info("- LHS Sampling: Consider increasing samples for duration/delay variables", file_info, 2)
        if len(sig_pairs) > 0 or max_abs_corr >= 0.3:
            print_with_info("- Independence: Review potentially correlated variable pairs", file_info, 2)


# ===============================
# 6. Main Execution for Multiple Datasets
# ===============================
def analyze_multiple_datasets():
    """分析所有16个数据集"""
    # 获取所有数据集文件
    dataset_files = get_dataset_files()

    if not dataset_files:
        print("No dataset files found! Please generate datasets first.")
        return

    all_summaries = []

    for file_info in dataset_files:
        print(f"\n{'=' * 80}")
        print(f"ANALYZING DATASET: {file_info['filename']}")
        print(f"Sample Size: {file_info['sample_size']}, Seed: {file_info['seed']}")
        print(f"{'=' * 80}")

        try:
            # 为每个数据集创建单独的输出目录
            os.makedirs(output_dir_validation_lhs / f"{file_info['label']}", exist_ok=True)

            # 加载数据
            df, dur_cols, dly_cols, crn, rain, wind = load_single_dataset(file_info)

            # 验证分布参数
            verify_distribution_parameters(file_info)

            print_with_info("=" * 60, file_info)
            print_with_info("ANALYSIS SCOPE DEFINITION", file_info)
            print_with_info("=" * 60, file_info)
            print_with_info("🟢 LHS Sampling Quality Validation: Duration & Delivery Delay only", file_info, 1)
            print_with_info("🔵 Derived Feature Correlation Analysis: All 5 variable types", file_info, 1)
            print_with_info("=" * 60, file_info)

            # 🟢 LHS SAMPLING QUALITY VALIDATION (ONLY for true LHS variables)
            print_with_info("🟢 " + "=" * 50, file_info)
            print_with_info("🟢 LHS SAMPLING QUALITY VALIDATION", file_info)
            print_with_info("🟢 Scope: Duration (18D) & Delivery Delay (18D) only", file_info, 1)
            print_with_info("🟢 " + "=" * 50, file_info)

            X_dur = df[dur_cols].values
            X_dly = df[dly_cols].values

            U_dur = transform_duration_to_prob_space(X_dur, dur_cols)
            U_dly = transform_delay_to_prob_space(X_dly)

            # Validate ONLY these two LHS-sampled variables
            res_dur = validate_lhs_coverage(U_dur, "Duration_LHS", file_info)
            res_dly = validate_lhs_coverage(U_dly, "Delay_LHS", file_info)

            # 🔵 DERIVED FEATURE CORRELATION ANALYSIS (NOT part of LHS validation!)
            print_with_info("🔵 " + "=" * 50, file_info)
            print_with_info("🔵 DERIVED FEATURE CORRELATION ANALYSIS", file_info)
            print_with_info("🔵 Scope: All 5 variable types (including non-LHS features)", file_info, 1)
            print_with_info("🔵 " + "=" * 50, file_info)

            # Feature extraction for correlation analysis
            tower_count = np.array([len(x) for x in crn])
            rain_ratio = np.array([np.mean(x) if len(x) > 0 else 0.0 for x in rain])
            wind_ratio = np.array([np.mean(x) if len(x) > 0 else 0.0 for x in wind])

            features = pd.DataFrame({
                "Duration_mean": df[dur_cols].mean(axis=1),
                "Delay_mean": df[dly_cols].mean(axis=1),
                "Tower_count": tower_count,
                "Rain_ratio": rain_ratio,
                "Wind_ratio": wind_ratio
            })

            independence_results = enhanced_independence_analysis(features, n_samples=file_info['sample_size'],
                                                                  file_info=file_info)

            # Save summary metrics with clear scope labels
            summary = {
                'filename': file_info['filename'],
                'sample_size': file_info['sample_size'],
                'seed': file_info['seed'],
                # 🟢 LHS Sampling Quality Metrics
                "LHS_Mean_KS_p_Duration": res_dur['mean_ks_p'],
                "LHS_Mean_KS_p_Delay": res_dly['mean_ks_p'],
                "LHS_Min_KS_p_Duration": np.min(res_dur['ks_pvals']),
                "LHS_Min_KS_p_Delay": np.min(res_dly['ks_pvals']),
                "LHS_Prop_KS_gt_0.05_Duration": np.mean(np.array(res_dur['ks_pvals']) > 0.05),
                "LHS_Prop_KS_gt_0.05_Delay": np.mean(np.array(res_dly['ks_pvals']) > 0.05),
                "LHS_NN_ratio_Duration": res_dur['spatial_metrics']['mean_nn_distance'] / res_dur['spatial_metrics'][
                    'ideal_nn_distance'],
                "LHS_NN_ratio_Delay": res_dly['spatial_metrics']['mean_nn_distance'] / res_dly['spatial_metrics'][
                    'ideal_nn_distance'],

                # 🔵 Correlation Analysis Metrics
                "CORR_Max_abs_correlation": np.max(
                    np.abs(independence_results['correlation_matrices']['Pearson'] - np.eye(5))),
                "CORR_Significant_pairs_count": len(independence_results['significant_pairs']),
                "CORR_Standard_Error": independence_results['standard_error']
            }

            all_summaries.append(summary)

            # Enhanced results interpretation with scope clarification
            interpret_results(summary, independence_results, res_dur, res_dly, file_info)

            # 保存单个数据集的结果
            pd.DataFrame([summary]).to_csv(output_dir_validation_lhs / f"{file_info['label']}/summary_metrics_enhanced.csv", index=False)

            print_with_info(
                f"Enhanced summary metrics saved to output/{file_info['label']}/summary_metrics_enhanced.csv",
                file_info)

        except Exception as e:
            print(f"❌ Error analyzing {file_info['filename']}: {e}")
            continue

    # 保存所有数据集的汇总结果
    if all_summaries:
        all_summaries_df = pd.DataFrame(all_summaries)
        all_summaries_df.to_csv("output/all_datasets_summary.csv", index=False)
        print(f"\n✅ All datasets analysis completed!")
        print(f"✅ Individual results saved to respective output directories")
        print(f"✅ Combined summary saved to output/all_datasets_summary.csv")
    else:
        print("❌ No datasets were successfully analyzed")


def main():
    parser = argparse.ArgumentParser(description='LHS Sampling Validation for Multiple Datasets')
    parser.add_argument('--mode', choices=['single', 'multiple'], default='multiple',
                        help='Analysis mode: single (analyze one file) or multiple (analyze 16 datasets)')
    args = parser.parse_args()

    if args.mode == 'single':
        # 这里可以保留原来的单文件分析逻辑
        print("Single file analysis mode not implemented in this version.")
        print("Please use multiple mode to analyze all 16 datasets.")
    else:
        analyze_multiple_datasets()


if __name__ == "__main__":
    main()