# -*- coding: utf-8 -*-
"""
LHS Sampling Validation: Correct Coverage & Independence Analysis
Author: Tongguang Si
"""

import os
import pandas as pd
import numpy as np
import ast
from scipy.stats import kstest, beta, truncnorm, pearsonr, spearmanr, t
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
import seaborn as sns
import dcor  # pip install dcor
from sklearn.feature_selection import mutual_info_regression
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import Voronoi

# ===============================
# 0. Setup
# ===============================
os.makedirs("output", exist_ok=True)
INPUT_FILE = "lhs_sampling_results.csv"

# ===============================
# 1. Load Data
# ===============================
df = pd.read_csv(INPUT_FILE)

dur_cols = [c for c in df.columns if c.endswith("_DUR")]
dly_cols = [c for c in df.columns if c.endswith("_DLV_DLY")]


def parse_list_column(series):
    out = []
    for v in series:
        try:
            lst = ast.literal_eval(str(v))
            if isinstance(lst, list):
                out.append(lst)
            else:
                out.append([])
        except:
            out.append([])
    return out


crn = parse_list_column(df["CRN_FAL"])
rain = parse_list_column(df["RAIN_HOLD"])
wind = parse_list_column(df["WIND_HOLD"])


# ===============================
# 2. Define True Distributions (MUST match sampling_val_gpt.py)
# ===============================

def verify_distribution_parameters():
    """Verify distribution parameters consistency with sampling program"""
    print("\n=== Distribution Parameters Verification ===")

    # Duration distribution verification
    print("Duration distributions (PERT -> Beta):")
    special_floors = ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR']
    for j, col in enumerate(dur_cols[:5]):  # Show first 5 only
        if col in special_floors:
            a_pert, c_pert = 5.0, 9.0
            status = "Special"
        else:
            a_pert, c_pert = 4.0, 8.0
            status = "Standard"
        print(f"  {col}: {status} PERT[{a_pert}, {c_pert}] -> Beta(α=2, β=4)")

    # Delay distribution verification
    print("\nDelay distribution:")
    print("  TruncNormal(μ=-4, σ=2, trunc=[-10, 10])")
    print("  Standardized: a_trunc=-3, b_trunc=7")

    # Verify data ranges
    print(f"\nData range verification:")
    print(f"  Duration: min={df[dur_cols].min().min():.2f}, max={df[dur_cols].max().max():.2f}")
    print(f"  Delay: min={df[dly_cols].min().min():.2f}, max={df[dly_cols].max().max():.2f}")


def transform_duration_to_prob_space(X, dur_cols):
    """Transform duration samples to probability space U = F(X)"""
    n_samples, n_features = X.shape
    U = np.zeros_like(X, dtype=float)

    for j, col in enumerate(dur_cols):
        x = X[:, j]
        # Determine PERT parameters based on column name
        if col in ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR']:
            a_pert, c_pert = 5.0, 9.0
        else:
            a_pert, c_pert = 4.0, 8.0

        # PERT -> Beta(α=2, β=4) scaled to [a, c]
        alpha, beta_param = 2.0, 4.0
        z = np.clip((x - a_pert) / (c_pert - a_pert), 0.0, 1.0)
        U[:, j] = beta.cdf(z, alpha, beta_param)

    return U


def transform_delay_to_prob_space(X):
    """Transform delay samples to probability space U = F(X)"""
    mu, sigma = -4.0, 2.0
    low, upp = -10.0, 10.0
    a_trunc = (low - mu) / sigma  # -3
    b_trunc = (upp - mu) / sigma  # 7

    U = truncnorm.cdf(X, a=a_trunc, b=b_trunc, loc=mu, scale=sigma)
    return U


# ===============================
# 3. Enhanced LHS Coverage Analysis in Probability Space
# ===============================

def enhanced_spatial_metrics(U, label):
    """Enhanced spatial filling analysis"""
    print(f"\n  Enhanced Spatial Metrics for {label}:")

    # Nearest neighbor distance analysis
    nbrs = NearestNeighbors(n_neighbors=2).fit(U)
    distances, _ = nbrs.kneighbors(U)
    nn_dist = distances[:, 1]  # Nearest neighbor distances

    # Spatial uniformity metrics
    mean_nn = np.mean(nn_dist)
    std_nn = np.std(nn_dist)
    cv_nn = std_nn / mean_nn  # Coefficient of variation

    print(f"    Nearest Neighbor distances:")
    print(f"      Mean = {mean_nn:.4f}, Std = {std_nn:.4f}")
    print(f"      Coefficient of Variation = {cv_nn:.4f}")

    # Ideal LHS nearest neighbor distance (in d-dimensional unit hypercube)
    n_samples, n_dims = U.shape
    ideal_nn_distance = (1.0 / n_samples) ** (1.0 / n_dims)
    print(f"    Ideal NN distance for LHS: {ideal_nn_distance:.4f}")
    print(f"    Actual/Ideal ratio: {mean_nn / ideal_nn_distance:.4f}")

    return {
        'mean_nn_distance': mean_nn,
        'cv_nn_distance': cv_nn,
        'ideal_nn_distance': ideal_nn_distance
    }


def plot_voronoi_diagram(U, label):
    """Plot Voronoi diagram for spatial distribution analysis"""
    if U.shape[1] >= 2:
        # Use PCA for 2D visualization
        pca = PCA(n_components=2)
        U_2d = pca.fit_transform(U)

        # Compute Voronoi diagram
        vor = Voronoi(U_2d)

        plt.figure(figsize=(10, 8))
        from scipy.spatial import voronoi_plot_2d
        voronoi_plot_2d(vor, show_vertices=False, show_points=True, point_size=15)
        plt.scatter(U_2d[:, 0], U_2d[:, 1], c='red', s=20, alpha=0.8)
        plt.title(f"{label} - Voronoi Diagram in 2D Projection")
        plt.xlabel("PC1")
        plt.ylabel("PC2")
        plt.grid(True, alpha=0.3)
        plt.savefig(f"output/{label}_Voronoi.png", dpi=300, bbox_inches='tight')
        plt.show()


def validate_lhs_coverage(U, label):
    """
    Validate LHS coverage in probability space [0,1]^d
    Only for LHS-sampled variables: Duration and Delivery Delay
    """
    print(f"\n=== LHS Coverage Validation for {label} ===")
    n_samples, n_features = U.shape
    print(f"  LHS Sampling Dimensions: {n_features}, Samples: {n_samples}")

    # 1. KS test for uniformity in probability space
    ks_pvals = [kstest(U[:, i], 'uniform').pvalue for i in range(n_features)]
    mean_p = np.mean(ks_pvals)
    min_p = np.min(ks_pvals)
    max_p = np.max(ks_pvals)
    prop_pass = np.mean(np.array(ks_pvals) > 0.05)

    print(f"  KS Test Results (Probability Space Uniformity):")
    print(f"    Mean p-value = {mean_p:.4f}, Range = [{min_p:.4f}, {max_p:.4f}]")
    print(f"    Proportion passing (p > 0.05): {prop_pass:.1%}")

    # Assess LHS sampling quality
    if mean_p > 0.1 and prop_pass > 0.9:
        quality = "EXCELLENT"
    elif mean_p > 0.05 and prop_pass > 0.8:
        quality = "GOOD"
    else:
        quality = "NEEDS IMPROVEMENT"
    print(f"    LHS Sampling Quality: {quality}")

    # 2. Distance analysis in probability space
    dists = pdist(U)
    print(f"  Spatial Distribution in Probability Space:")
    print(f"    Pairwise distances - Min: {np.min(dists):.4f}, Mean: {np.mean(dists):.4f}, Max: {np.max(dists):.4f}")

    # 3. Enhanced spatial metrics
    spatial_metrics = enhanced_spatial_metrics(U, label)

    # 4. Visualizations for LHS coverage validation
    plt.figure(figsize=(15, 5))

    # PCA projection - check multidimensional space uniformity
    plt.subplot(1, 3, 1)
    pca = PCA(n_components=2)
    proj = pca.fit_transform(U)
    plt.scatter(proj[:, 0], proj[:, 1], s=12, alpha=0.6, edgecolor='none')
    plt.title(f"LHS: {label}\nPCA Projection in Probability Space")
    plt.xlabel(f"PC1 ({pca.explained_variance_ratio_[0]:.1%})")
    plt.ylabel(f"PC2 ({pca.explained_variance_ratio_[1]:.1%})")
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(a)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    # Marginal distributions - verify uniformity in each dimension
    plt.subplot(1, 3, 2)
    for i in range(min(5, n_features)):
        plt.hist(U[:, i], bins=20, alpha=0.6, density=True, label=f'Dim {i + 1}')
    plt.axhline(1.0, color='red', linestyle='--', linewidth=1.5, label='Ideal Uniform')
    plt.xlabel('CDF Value')
    plt.ylabel('Density')
    plt.title('Marginal Distributions')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(b)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    # Distance distribution - check spatial filling
    plt.subplot(1, 3, 3)
    plt.hist(dists, bins=30, alpha=0.7, density=True, edgecolor='black')
    plt.xlabel('Pairwise Distance')
    plt.ylabel('Density')
    plt.title('Inter-point Distance Distribution')
    plt.grid(True, alpha=0.3)
    plt.text(0.02, 0.98, '(c)', transform=plt.gca().transAxes,
             fontsize=14, fontweight='bold', va='top', ha='left')

    plt.tight_layout()
    plt.savefig(f"output/LHS_Coverage_{label}.png", dpi=300, bbox_inches='tight')
    plt.show()

    # 5. Voronoi diagram - advanced spatial analysis (optional)
    if n_features >= 2:
        plot_voronoi_diagram(U, f"LHS_{label}")

    return {
        'ks_pvals': ks_pvals,
        'mean_ks_p': mean_p,
        'ks_quality': quality,
        'pairwise_distances': dists,
        'spatial_metrics': spatial_metrics
    }


# ===============================
# 4. Feature Extraction for Independence Test
# ===============================
tower_count = np.array([len(x) for x in crn])
rain_ratio = np.array([np.mean(x) if len(x) > 0 else 0.0 for x in rain])
wind_ratio = np.array([np.mean(x) if len(x) > 0 else 0.0 for x in wind])

features = pd.DataFrame({
    "Duration_mean": df[dur_cols].mean(axis=1),
    "Delay_mean": df[dly_cols].mean(axis=1),
    "Tower_count": tower_count,
    "Rain_ratio": rain_ratio,
    "Wind_ratio": wind_ratio
})


# ===============================
# 5. Enhanced Independence Analysis with Sampling Error Consideration
# ===============================
def enhanced_independence_analysis(df, n_samples=1000):
    """Enhanced independence analysis with sampling error consideration"""
    print("\n=== Enhanced Independence Analysis with Sampling Error Consideration ===")

    # Calculate sampling error metrics
    se_correlation = 1.0 / np.sqrt(n_samples - 1)
    critical_value_95 = 1.96 * se_correlation
    critical_value_99 = 2.576 * se_correlation

    print(f"Sample size: {n_samples}")
    print(f"Correlation standard error: {se_correlation:.4f}")
    print(f"95% confidence interval for zero correlation: ±{critical_value_95:.4f}")
    print(f"99% confidence interval for zero correlation: ±{critical_value_99:.4f}")

    cols = df.columns.tolist()
    n = len(cols)

    pearson_mat = np.eye(n)
    spearman_mat = np.eye(n)
    dcor_mat = np.eye(n)
    mi_mat = np.eye(n)
    p_values = np.eye(n)

    # Calculate all correlation metrics
    for i in range(n):
        for j in range(i + 1, n):
            x = df.iloc[:, i].values
            y = df.iloc[:, j].values

            # Pearson correlation with p-value
            r, p_val = pearsonr(x, y)
            pearson_mat[i, j] = pearson_mat[j, i] = r
            p_values[i, j] = p_values[j, i] = p_val

            # Spearman
            rho, _ = spearmanr(x, y)
            spearman_mat[i, j] = spearman_mat[j, i] = rho

            # Distance Correlation
            dc = dcor.distance_correlation(x, y)
            dcor_mat[i, j] = dcor_mat[j, i] = dc

            # Mutual Information
            mi = mutual_info_regression(x.reshape(-1, 1), y)[0]
            mi_mat[i, j] = mi_mat[j, i] = mi

    # Statistical significance analysis
    print(f"\n📊 Statistical Significance Analysis:")
    significant_pairs = []
    for i in range(n):
        for j in range(i + 1, n):
            r = pearson_mat[i, j]
            p_val = p_values[i, j]

            significant_95 = abs(r) > critical_value_95
            significant_99 = abs(r) > critical_value_99

            if significant_95:
                sig_level = "**" if significant_99 else "*"
                status = f"r = {r:.3f} {sig_level} (p = {p_val:.3f})"
                significant_pairs.append((cols[i], cols[j], r, p_val))
            else:
                status = f"r = {r:.3f} (p = {p_val:.3f}) - not significant"

            print(f"  {cols[i]:15} vs {cols[j]:15}: {status}")

    # Detailed analysis of 10 variable pairs
    print(f"\n🔍 Detailed Analysis of 10 Variable Pairs:")
    pairs = [
        (0, 1, "Duration_mean vs Delay_mean", "Construction progress vs Material supply"),
        (0, 2, "Duration_mean vs Tower_count", "Construction progress vs Equipment reliability"),
        (0, 3, "Duration_mean vs Rain_ratio", "Construction progress vs Rain impact"),
        (0, 4, "Duration_mean vs Wind_ratio", "Construction progress vs Wind impact"),
        (1, 2, "Delay_mean vs Tower_count", "Material supply vs Equipment reliability"),
        (1, 3, "Delay_mean vs Rain_ratio", "Material supply vs Rain impact"),
        (1, 4, "Delay_mean vs Wind_ratio", "Material supply vs Wind impact"),
        (2, 3, "Tower_count vs Rain_ratio", "Equipment failure vs Rain weather"),
        (2, 4, "Tower_count vs Wind_ratio", "Equipment failure vs Wind weather"),
        (3, 4, "Rain_ratio vs Wind_ratio", "Different weather type impacts")
    ]

    for i, j, label, meaning in pairs:
        r = pearson_mat[i, j]
        p_val = p_values[i, j]
        significant = abs(r) > critical_value_95

        if significant:
            status = "⚠️ POTENTIALLY CORRELATED" if abs(r) > critical_value_99 else "⚠️ WEAKLY CORRELATED"
        else:
            status = "✅ INDEPENDENT (within sampling error)"

        print(f"  {label:35} | {meaning:45} | r = {r:6.3f} | {status}")

    # Plot and save correlation matrices with significance markers
    mats = [pearson_mat, spearman_mat, dcor_mat, mi_mat]
    names = ["Pearson", "Spearman", "DistanceCorr", "MutualInfo"]

    for name, mat in zip(names, mats):
        plt.figure(figsize=(8, 6))
        mask = np.triu(np.ones_like(mat, dtype=bool), k=1)

        # Create annotation matrix with significance stars
        annot_mat = np.empty_like(mat, dtype=object)
        for i in range(n):
            for j in range(n):
                if i == j:
                    annot_mat[i, j] = "1.000"
                elif i < j:
                    if name == "Pearson" and abs(mat[i, j]) > critical_value_95:
                        star = "**" if abs(mat[i, j]) > critical_value_99 else "*"
                        annot_mat[i, j] = f"{mat[i, j]:.3f}{star}"
                    else:
                        annot_mat[i, j] = f"{mat[i, j]:.3f}"
                else:
                    annot_mat[i, j] = ""

        sns.heatmap(mat, annot=annot_mat, fmt="", cmap="coolwarm", center=0,
                    xticklabels=cols, yticklabels=cols, mask=mask,
                    cbar_kws={'label': f'{name} Correlation'})
        plt.title(f"{name} Correlation Matrix\n(* p<0.05, ** p<0.01)")
        plt.tight_layout()
        plt.savefig(f"output/{name}_matrix_enhanced.png", dpi=300)
        plt.show()

        pd.DataFrame(mat, index=cols, columns=cols).to_csv(f"output/{name}_matrix.csv")

    return {
        'correlation_matrices': dict(zip(names, mats)),
        'p_values': p_values,
        'standard_error': se_correlation,
        'critical_values': {
            '95%': critical_value_95,
            '99%': critical_value_99
        },
        'significant_pairs': significant_pairs
    }


# ===============================
# 6. Enhanced Results Interpretation
# ===============================
def interpret_results(summary, independence_results, res_dur, res_dly):
    """Enhanced results interpretation with clear scope separation"""
    print("\n" + "=" * 70)
    print("ENHANCED RESULTS INTERPRETATION")
    print("=" * 70)

    print("NOTE: Independence analysis includes non-LHS derived features.")
    print("      Only Duration and Delay are subject to LHS quality assessment.")
    print("=" * 70)

    # 🟢 LHS COVERAGE QUALITY ASSESSMENT (Duration & Delay only)
    print(f"\n🟢 LHS COVERAGE QUALITY ASSESSMENT")
    print(f"  Scope: Duration & Delivery Delay sampling only")
    print(f"  Duration Sampling (18 dimensions):")
    ks_dur = summary['LHS_Mean_KS_p_Duration']
    dur_quality = "EXCELLENT" if ks_dur > 0.1 else "GOOD" if ks_dur > 0.05 else "NEEDS IMPROVEMENT"
    print(f"    • Quality: {dur_quality} (mean KS p = {ks_dur:.4f})")
    print(f"    • {summary['LHS_Prop_KS_gt_0.05_Duration']:.1%} of dimensions pass uniformity test")

    print(f"  Delay Sampling (18 dimensions):")
    ks_dly = summary['LHS_Mean_KS_p_Delay']
    dly_quality = "EXCELLENT" if ks_dly > 0.1 else "GOOD" if ks_dly > 0.05 else "NEEDS IMPROVEMENT"
    print(f"    • Quality: {dly_quality} (mean KS p = {ks_dly:.4f})")
    print(f"    • {summary['LHS_Prop_KS_gt_0.05_Delay']:.1%} of dimensions pass uniformity test")

    # Spatial filling assessment
    print(f"\n📈 SPATIAL FILLING ASSESSMENT (LHS Sampling):")
    dur_ratio = summary['LHS_NN_ratio_Duration']
    dly_ratio = summary['LHS_NN_ratio_Delay']
    print(f"  Duration: Actual/Ideal NN distance ratio = {dur_ratio:.3f}")
    print(f"  Delay:    Actual/Ideal NN distance ratio = {dly_ratio:.3f}")
    print(f"  Note: Ratio close to 1.0 indicates good spatial distribution")

    # 🔵 INDEPENDENCE ASSESSMENT (All 5 variable types)
    print(f"\n🔵 INDEPENDENCE ASSESSMENT")
    print(f"  Scope: All 5 variable types (including derived features)")

    max_abs_corr = summary['CORR_Max_abs_correlation']
    se_corr = summary['CORR_Standard_Error']

    print(f"  Maximum absolute Pearson correlation: {max_abs_corr:.3f}")
    print(f"  Correlation standard error (N=1000): ±{se_corr:.3f}")
    print(f"  95% confidence interval for zero correlation: ±{independence_results['critical_values']['95%']:.3f}")

    # Independence judgment based on sampling error
    if max_abs_corr < se_corr:
        indep_quality = "VERY HIGH"
        explanation = "All correlations within 1 standard error"
    elif max_abs_corr < independence_results['critical_values']['95%']:
        indep_quality = "HIGH"
        explanation = "All correlations within 95% confidence interval"
    elif max_abs_corr < 0.3:
        indep_quality = "MODERATE"
        explanation = "Some small correlations detected"
    else:
        indep_quality = "LOW"
        explanation = "Substantial correlations present"

    print(f"  Independence quality: {indep_quality}")
    print(f"  Explanation: {explanation}")

    # Significant pairs analysis
    sig_pairs = independence_results['significant_pairs']
    if len(sig_pairs) == 0:
        print(f"  No statistically significant correlations found at 95% confidence level")
    else:
        print(f"  Statistically significant pairs ({len(sig_pairs)}):")
        for var1, var2, r, p in sig_pairs:
            print(f"    • {var1} vs {var2}: r = {r:.3f}, p = {p:.3f}")

    # Mutual information analysis
    mi_mat = independence_results['correlation_matrices']['MutualInfo']
    max_mi = np.max(mi_mat - np.eye(5))
    print(f"\n💡 MUTUAL INFORMATION ANALYSIS:")
    print(f"  Maximum mutual information: {max_mi:.4f}")
    if max_mi < 0.01:
        print(f"  → Mutual information close to 0, confirming no real information dependency")

    # 🎯 OVERALL RECOMMENDATIONS
    print(f"\n🎯 OVERALL RECOMMENDATIONS:")
    if (ks_dur > 0.05 and ks_dly > 0.05 and
            max_abs_corr < independence_results['critical_values']['95%'] and
            len(sig_pairs) == 0):
        print("  ✅ EXCELLENT: Both LHS sampling and variable independence are ACCEPTABLE")
        print("  ✅ LHS sampling quality meets requirements for simulation")
        print("  ✅ Variables are effectively independent for practical purposes")
    else:
        print("  ⚠️  Some aspects may need attention:")
        if ks_dur <= 0.05 or ks_dly <= 0.05:
            print("    - LHS Sampling: Consider increasing samples for duration/delay variables")
        if len(sig_pairs) > 0 or max_abs_corr >= 0.3:
            print("    - Independence: Review potentially correlated variable pairs")


# ===============================
# 7. Main Execution with Clear Scope Separation
# ===============================
def main():
    # Verify distribution parameters
    verify_distribution_parameters()

    print("\n" + "=" * 60)
    print("ANALYSIS SCOPE DEFINITION")
    print("=" * 60)
    print("🟢 LHS Sampling Quality Validation: Duration & Delivery Delay only")
    print("🔵 Derived Feature Correlation Analysis: All 5 variable types")
    print("=" * 60)

    # 🟢 LHS SAMPLING QUALITY VALIDATION (ONLY for true LHS variables)
    print("\n" + "🟢 " + "=" * 50)
    print("🟢 LHS SAMPLING QUALITY VALIDATION")
    print("🟢 Scope: Duration (18D) & Delivery Delay (18D) only")
    print("🟢 " + "=" * 50)

    X_dur = df[dur_cols].values
    X_dly = df[dly_cols].values

    U_dur = transform_duration_to_prob_space(X_dur, dur_cols)
    U_dly = transform_delay_to_prob_space(X_dly)

    # Validate ONLY these two LHS-sampled variables
    res_dur = validate_lhs_coverage(U_dur, "Duration_LHS")
    res_dly = validate_lhs_coverage(U_dly, "Delay_LHS")

    # 🔵 DERIVED FEATURE CORRELATION ANALYSIS (NOT part of LHS validation!)
    print("\n" + "🔵 " + "=" * 50)
    print("🔵 DERIVED FEATURE CORRELATION ANALYSIS")
    print("🔵 Scope: All 5 variable types (including non-LHS features)")
    print("🔵 " + "=" * 50)

    # Feature extraction for correlation analysis
    features = pd.DataFrame({
        "Duration_mean": df[dur_cols].mean(axis=1),
        "Delay_mean": df[dly_cols].mean(axis=1),
        "Tower_count": tower_count,
        "Rain_ratio": rain_ratio,
        "Wind_ratio": wind_ratio
    })

    independence_results = enhanced_independence_analysis(features, n_samples=len(features))

    # Save summary metrics with clear scope labels
    summary = {
        # 🟢 LHS Sampling Quality Metrics
        "LHS_Mean_KS_p_Duration": res_dur['mean_ks_p'],
        "LHS_Mean_KS_p_Delay": res_dly['mean_ks_p'],
        "LHS_Min_KS_p_Duration": np.min(res_dur['ks_pvals']),
        "LHS_Min_KS_p_Delay": np.min(res_dly['ks_pvals']),
        "LHS_Prop_KS_gt_0.05_Duration": np.mean(np.array(res_dur['ks_pvals']) > 0.05),
        "LHS_Prop_KS_gt_0.05_Delay": np.mean(np.array(res_dly['ks_pvals']) > 0.05),
        "LHS_NN_ratio_Duration": res_dur['spatial_metrics']['mean_nn_distance'] / res_dur['spatial_metrics'][
            'ideal_nn_distance'],
        "LHS_NN_ratio_Delay": res_dly['spatial_metrics']['mean_nn_distance'] / res_dly['spatial_metrics'][
            'ideal_nn_distance'],

        # 🔵 Correlation Analysis Metrics
        "CORR_Max_abs_correlation": np.max(np.abs(independence_results['correlation_matrices']['Pearson'] - np.eye(5))),
        "CORR_Significant_pairs_count": len(independence_results['significant_pairs']),
        "CORR_Standard_Error": independence_results['standard_error']
    }

    pd.DataFrame([summary]).to_csv("output/summary_metrics_enhanced.csv", index=False)

    # Enhanced results interpretation with scope clarification
    interpret_results(summary, independence_results, res_dur, res_dly)

    print(f"\n✅ Enhanced summary metrics saved to output/summary_metrics_enhanced.csv")
    print("✅ All figures saved to 'output/' directory.")
    print("✅ Analysis completed successfully!")


if __name__ == "__main__":
    main()