# -*- coding: utf-8 -*-
"""
Optimized LHS Sampling and Validation Program (with controlled jitter and full seed control)
- 所有随机数生成都与 self.config.SEED 相关
- 保留原始程序功能（PERT、截断正态、泊松、伯努利、验证与绘图等）
- 在 LHS 生成处引入受控抖动，防止分层与逆CDF映射导致局部堆积
- 在 iman_conover_joint 之后加入极小后置扰动以打破严格秩产生的完全一致性
- 添加KS检验统计量输出功能
"""
import argparse
from collections import Counter
from pathlib import Path

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import qmc
from scipy.linalg import cholesky
from typing import Dict, List, Tuple
import warnings

# 如果osc04a.static.constants不存在，创建默认配置
try:
    from osc04a.static.constants import DirectoryConfig
except ImportError:
    class DirectoryConfig:
        OUTPUT_DIR = "output"

# Filter specific font missing warnings
warnings.filterwarnings("ignore", message="Glyph .* missing from font.*")


class LHSConfig:
    """Configuration parameters class"""
    SAMPLE_SIZE = 1000
    PLOT_SIZE = (18, 12)
    DPI = 150
    SEED = 44
    OUTPUT_PRECISION = 2
    CRANE_FAILURE_RATE = 1 / 3000
    MTBF = 1 / CRANE_FAILURE_RATE
    SAMPLE_DAYS = 153
    SAMPLE_HOURS = SAMPLE_DAYS * 10
    SIGNIFICANCE_LEVEL = 0.05
    # Jitter parameters
    # JITTER_STRENGTH: 0..1, 表示分层样本中保留原始位置的权重（1.0 保持原始 LHS，0.0 更随机）
    JITTER_STRENGTH = 0.95
    # POST_JITTER_EPS: 在 Iman-Conover 调整后，给 U 加的极小扰动（避免严格秩导致的完美拟合）
    POST_JITTER_EPS = 1e-6


def lhs_with_jitter(d: int, n: int, seed: int = None, jitter_strength: float = 0.9) -> np.ndarray:
    """
    Generate a Latin Hypercube sample with controlled jitter.
    - d: dimension (number of columns)
    - n: number of samples (rows)
    - jitter_strength: in [0,1]. 1 => original LHS positions; smaller => more randomized (but still stratified).
    Returns: ndarray shape (n, d) with values in (0,1).
    """
    rng = np.random.default_rng(seed)
    # cutpoints for equal strata
    cutpoints = np.linspace(0.0, 1.0, n + 1)

    # create raw stratified points: one point in each stratum
    u = np.zeros((n, d))
    for j in range(d):
        # pick a random offset inside each stratum [i/n, (i+1)/n)
        offsets = rng.random(n) * (1.0 / n)
        u[:, j] = cutpoints[:-1] + offsets
        # randomize the order across strata so rows are permuted per-dimension
        rng.shuffle(u[:, j])

    # if jitter_strength < 1: blend towards strata centers chosen randomly to add smoothing
    if jitter_strength < 1.0:
        centers = (cutpoints[:-1] + cutpoints[1:]) / 2.0
        for j in range(d):
            # choose a base center (with replacement) to mix with
            base = rng.choice(centers, size=n, replace=True)
            u[:, j] = jitter_strength * u[:, j] + (1.0 - jitter_strength) * base

    # ensure strictly inside (0,1)
    eps = 1e-12
    np.clip(u, eps, 1.0 - eps, out=u)
    return u


def iman_conover_joint(U: np.ndarray, target_corr: np.ndarray, post_jitter_eps: float = 1e-12,
                       rng_seed: int = None) -> np.ndarray:
    """Induces correlation while preserving marginal distributions via a rank-based adjustment procedure."""

    def _fix_correlation_matrix(C: np.ndarray) -> np.ndarray:
        """Finds the nearest valid positive-definite correlation matrix."""
        C_symm = (C + C.T) / 2
        np.fill_diagonal(C_symm, 1.0)
        eigvals, eigvecs = np.linalg.eigh(C_symm)
        eigvals[eigvals < 1e-12] = 1e-12
        C_pd = eigvecs @ np.diag(eigvals) @ eigvecs.T
        D_inv = np.diag(1.0 / np.sqrt(np.diag(C_pd)))
        C_fixed = D_inv @ C_pd @ D_inv
        return C_fixed

    # ensure target_corr is PD
    try:
        T = cholesky(target_corr, lower=True)
    except np.linalg.LinAlgError:
        print("Warning: Target correlation matrix is not positive definite. Finding the nearest valid matrix.")
        fixed_corr = _fix_correlation_matrix(target_corr)
        T = cholesky(fixed_corr, lower=True)

    # Convert U to standard normals
    Z = stats.norm.ppf(U)
    # empirical correlation of Z
    R = np.corrcoef(Z, rowvar=False)
    # guard in case R is singular; force PD
    try:
        C_emp = cholesky(R, lower=True)
    except np.linalg.LinAlgError:
        # attempt small regularization
        eps_reg = 1e-8
        R_reg = R + np.eye(R.shape[0]) * eps_reg
        C_emp = cholesky(R_reg, lower=True)

    # apply linear transform to induce target correlation
    Z_new = Z @ np.linalg.inv(C_emp) @ T
    ranks = Z_new.argsort(axis=0).argsort(axis=0)
    U_sorted = np.sort(U, axis=0)
    U_new = np.array([U_sorted[ranks[:, j], j] for j in range(U.shape[1])]).T

    # small post-jitter to break exact ties / perfect patterns
    if post_jitter_eps is not None and post_jitter_eps > 0:
        rng = np.random.default_rng(rng_seed)
        jitter = rng.uniform(-post_jitter_eps, post_jitter_eps, size=U_new.shape)
        U_new = U_new + jitter
        np.clip(U_new, 0.0 + 1e-12, 1.0 - 1e-12, out=U_new)

    return U_new


def sample_crane_failures(failure_rate, max_hours=1530.0, sample_size=1, seed: int = None) -> tuple[
    list[list[float]], list, list]:
    """Generates multiple sets of failure time series with seed control."""
    rng = np.random.default_rng(seed)
    failure_counts_of_all_samplings_list = []
    sorted_failure_occurrence_times_of_all_samplings_list = []
    failure_occurrence_times_of_every_samplings_list_of_list = []

    for _ in range(sample_size):
        failure_count_of_one_sampling = rng.poisson(failure_rate * max_hours)  # 使用可控RNG
        failure_counts_of_all_samplings_list.append(failure_count_of_one_sampling)

        if failure_count_of_one_sampling > 0:
            failure_times_of_one_sampling_list = rng.uniform(0, max_hours,
                                                             size=failure_count_of_one_sampling)  # 使用可控RNG
            failure_occurrence_times_of_every_samplings_list_of_list.append(
                list(np.round(failure_times_of_one_sampling_list, 2)))
            failure_times_of_one_sampling_list.sort()
            sorted_failure_occurrence_times_of_all_samplings_list.extend(failure_times_of_one_sampling_list)
        else:
            failure_occurrence_times_of_every_samplings_list_of_list.append([])

    return failure_occurrence_times_of_every_samplings_list_of_list, failure_counts_of_all_samplings_list, sorted_failure_occurrence_times_of_all_samplings_list


def lhs_transform(u: np.ndarray, dist, *args, **kwargs) -> np.ndarray:
    """
    【OPTIMIZED】LHS transform function to prevent endpoint pile-up.
    This version squeezes the [0, 1] interval to avoid feeding extreme values
    (0 or 1) into the Percent Point Function (PPF), which causes the pile-up.
    """
    if len(u.shape) != 1:
        raise ValueError("Input must be a 1D array")

    # Squeeze the [0, 1] interval to [epsilon, 1-epsilon]
    epsilon = 1e-12
    u_squeezed = epsilon + u * (1 - 2 * epsilon)

    samples = dist.ppf(u_squeezed, *args, **kwargs)
    return samples


class LHSSampler:
    """LHS Sampler Class with full seed control"""

    def __init__(self, config: LHSConfig = None):
        self.config = config or LHSConfig()
        self._validate_config()
        # Use a reproducible RNG for all random operations
        self._rng = np.random.default_rng(self.config.SEED)
        self._seed_sequence = np.random.SeedSequence(self.config.SEED)

    def _validate_config(self):
        if self.config.SAMPLE_SIZE <= 0:
            raise ValueError("Sample size must be a positive integer")
        if self.config.CRANE_FAILURE_RATE <= 0:
            raise ValueError("Failure rate must be positive")

    def _get_derived_seed(self, offset: int) -> int:
        """Generate a derived seed from the base seed with offset to avoid correlation issues."""
        return self.config.SEED + offset

    @staticmethod
    def pert_to_alpha_beta(a: float, b: float, c: float) -> Tuple[float, float]:
        if c <= a:
            raise ValueError("For PERT distribution, parameter 'c' must be greater than 'a'")
        # Using the standard PERT formulation with gamma = 4
        alpha_param = 1 + 4 * (b - a) / (c - a)
        beta_param = 1 + 4 * (c - b) / (c - a)
        return alpha_param, beta_param

    def pert_rvs(self, a: float, b: float, c: float, size: int = None, u: np.ndarray = None) -> np.ndarray:
        """
        【OPTIMIZED】Samples from a PERT distribution using LHS samples.
        This version also squeezes the [0, 1] interval to prevent pile-up at the
        maximum value 'c' of the distribution.
        """
        alpha_param, beta_param = self.pert_to_alpha_beta(a, b, c)
        if u is None:
            # Fallback for non-LHS random sampling with controlled RNG
            return a + (c - a) * stats.beta.rvs(alpha_param, beta_param, size=size, random_state=self._rng)

        # Squeeze the [0, 1] interval to [epsilon, 1-epsilon] to avoid boundary pile-up
        epsilon = 1e-12
        u_squeezed = epsilon + u * (1 - 2 * epsilon)

        return a + (c - a) * stats.beta.ppf(u_squeezed, alpha_param, beta_param)

    @staticmethod
    def load_daily_prob(
            csv_file: str,
            start_date: Tuple[int, int] = (3, 1),
            end_date: Tuple[int, int] = (7, 31)
    ) -> Dict[Tuple[int, int], float]:
        try:
            df = pd.read_csv(csv_file, usecols=['Month', 'Day', 'Frequency'])
            df['Month'] = df['Month'].astype(int)
            df['Day'] = df['Day'].astype(int)
            if (df['Frequency'] < 0).any() or (df['Frequency'] > 1).any():
                raise ValueError("Probability values must be in the [0, 1] range")
            df['Frequency'] = df['Frequency'].clip(0, 1)

            def date_in_range(row):
                md = (row['Month'], row['Day'])
                if start_date[0] == end_date[0]:
                    return start_date[0] == md[0] and start_date[1] <= md[1] <= end_date[1]
                return (md[0] > start_date[0] or (md[0] == start_date[0] and md[1] >= start_date[1])) and \
                    (md[0] < end_date[0] or (md[0] == end_date[0] and md[1] <= end_date[1]))

            filtered_df = df[df.apply(date_in_range, axis=1)].copy()
            if filtered_df.empty:
                raise ValueError(f"No valid data found in date range {start_date}-{end_date}")
            return {(r.Month, r.Day): r.Frequency for _, r in filtered_df.iterrows()}
        except (KeyError, FileNotFoundError) as e:
            raise ValueError(f"CSV file must contain Month/Day/Frequency columns: {e}")
        except Exception as e:
            raise ValueError(f"Failed to load probability file: {str(e)}")

    def sample_daily_strong_wind_events(
            self, prob_dict: Dict[Tuple[int, int], float],
            sample_size: int, rho: float = 0.2, min_prob: float = 1e-4
    ) -> np.ndarray:
        """Sample daily strong wind events with full seed control."""
        strong_wind = np.zeros((sample_size, self.config.SAMPLE_DAYS), dtype=bool)
        # 使用类内部的受控RNG
        u_matrix = self._rng.random((sample_size, self.config.SAMPLE_DAYS))
        date_range = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        p_base = np.array([max(prob_dict.get((d.month, d.day), 0.0), min_prob) for d in date_range])

        for day_idx, pi in enumerate(p_base):
            if day_idx > 0:
                prev = strong_wind[:, day_idx - 1]
                p_given_wind = min(pi + rho * (1 - pi), 1.0 - min_prob)
                p_given_no_wind = max(pi * (1 - rho), min_prob)
                p = np.where(prev, p_given_wind, p_given_no_wind)
            else:
                p = pi
            strong_wind[:, day_idx] = u_matrix[:, day_idx] < p
        return strong_wind

    def sample_daily_precipitation(
            self, prob_dict: Dict[Tuple[int, int], float],
            sample_size: int, rho: float = 0.015, min_prob: float = 1e-4
    ) -> np.ndarray:
        """Sample daily precipitation events with full seed control."""
        precip = np.zeros((sample_size, self.config.SAMPLE_DAYS), dtype=bool)
        # 使用类内部的受控RNG
        u_matrix = self._rng.random((sample_size, self.config.SAMPLE_DAYS))
        date_range = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        p_base = np.array([max(prob_dict.get((d.month, d.day), 0.0), min_prob) for d in date_range])

        for day_idx, pi in enumerate(p_base):
            p = pi
            if day_idx > 0:
                prev = precip[:, day_idx - 1]
                p_given_rain = min(pi + rho * (1 - pi), 1.0 - min_prob)
                p_given_no_rain = max(pi * (1 - rho), min_prob)
                p = np.where(prev, p_given_rain, p_given_no_rain)
            precip[:, day_idx] = u_matrix[:, day_idx] < p
        return precip

    def sample(self, sample_size: int = None) -> pd.DataFrame:
        """Generate one batch of samples with full seed control."""
        sample_size = sample_size or self.config.SAMPLE_SIZE
        n_floors = 18

        # --- Step 1: LHS for durations with jitter ---
        u_dur = lhs_with_jitter(
            d=n_floors, n=sample_size,
            seed=self._get_derived_seed(0),  # 使用派生种子
            jitter_strength=self.config.JITTER_STRENGTH
        )

        # --- Step 2: LHS for delivery delays with jitter ---
        u_dly = lhs_with_jitter(
            d=n_floors, n=sample_size,
            seed=self._get_derived_seed(1),  # 使用派生种子
            jitter_strength=self.config.JITTER_STRENGTH
        )

        # --- Step 3: Construct intra-group correlation matrices ---
        rho_floor = 0.9
        target_corr_dur = np.eye(n_floors)
        target_corr_dly = np.eye(n_floors)
        for i in range(n_floors):
            for j in range(n_floors):
                if i != j:
                    target_corr_dur[i, j] = rho_floor ** abs(i - j)
                    target_corr_dly[i, j] = rho_floor ** abs(i - j)

        # --- Step 4: Induce correlation separately for each group (Iman-Conover) ---
        u_dur = iman_conover_joint(
            u_dur, target_corr_dur,
            post_jitter_eps=self.config.POST_JITTER_EPS,
            rng_seed=self._get_derived_seed(100)  # 使用派生种子
        )
        u_dly = iman_conover_joint(
            u_dly, target_corr_dly,
            post_jitter_eps=self.config.POST_JITTER_EPS,
            rng_seed=self._get_derived_seed(200)  # 使用派生种子
        )

        # --- Step 5: Sample durations (PERT distribution) ---
        flr_params = {
            f'FLR{i:02d}': (5, 6, 9) if i in [1, 2, 18] else (4, 5, 8)
            for i in range(1, 19)
        }
        durations = {
            f'{flr}_DUR': self.pert_rvs(*params, u=u_dur[:, i])
            for i, (flr, params) in enumerate(sorted(flr_params.items()))
        }

        # --- Step 6: Sample delivery delays (Truncated Normal) ---
        a_trunc, b_trunc = (-10 + 4) / 2, (10 + 4) / 2  # bounds scaled by std
        delivery_delays = {
            f'FLR{i + 1:02d}_DLV_DLY': lhs_transform(
                u_dly[:, i], stats.truncnorm,
                a_trunc, b_trunc, loc=-4, scale=2
            )
            for i in range(n_floors)
        }

        # --- Step 7: Crane failures (Poisson process) with seed control ---
        crane_failures, _, _ = sample_crane_failures(
            self.config.CRANE_FAILURE_RATE,
            self.config.SAMPLE_HOURS,
            sample_size,
            seed=self._get_derived_seed(300)  # 使用派生种子
        )

        # --- Step 8: Weather (Daily rain & strong wind) with seed control ---
        rain_prob = self.load_daily_prob(csv_file='Precipitation_stat.csv')
        wind_prob = self.load_daily_prob(csv_file='strong_wind_stat.csv')

        rain_hold = self.sample_daily_precipitation(rain_prob, sample_size, rho=0.015)
        wind_hold = self.sample_daily_strong_wind_events(wind_prob, sample_size, rho=0.2)

        # --- Step 9: Assemble into DataFrame ---
        crane_failures_clean = [[float(x) for x in sublist] for sublist in crane_failures]
        rain_hold_clean = [arr.tolist() for arr in rain_hold]
        wind_hold_clean = [arr.tolist() for arr in wind_hold]

        return pd.DataFrame({
            **durations,
            **delivery_delays,
            'CRN_FAL': crane_failures_clean,
            'RAIN_HOLD': rain_hold_clean,
            'WIND_HOLD': wind_hold_clean
        })


def _print_correlation_stats(corr_matrix: pd.DataFrame, label: str) -> None:
    """Prints detailed correlation statistics."""
    values = corr_matrix.values[np.triu_indices_from(corr_matrix, k=1)]
    print(f"{label} Correlation Statistics:")
    print(f" Mean: {values.mean():.4f}, Std Dev: {values.std():.4f}")
    print(f" Range: [{values.min():.4f}, {values.max():.4f}]")
    if "Duration" in label and values.mean() > 0.6:
        print(" ✅ Business insight: High correlation suggests consistent construction team performance.")
    elif "Delay" in label and values.mean() > 0.6:
        print(" ✅ Business insight: High correlation suggests consistent supplier delivery patterns.")


class LHSValidator:
    """Enhanced LHS Validator Class"""

    def __init__(self, config: LHSConfig = None):
        self.config = config or LHSConfig()
        plt.rcParams['font.sans-serif'] = ['Arial']
        plt.rcParams["axes.unicode_minus"] = False
        self.ks_results = {}  # 存储KS检验结果
        # Create output directory if it doesn't exist
        self.output_dir = Path(DirectoryConfig.OUTPUT_DIR)
        self.output_dir.mkdir(exist_ok=True)

    def _get_output_path(self, filename: str) -> Path:
        """Get full output path for a file"""
        return self.output_dir / filename

    @staticmethod
    def binom_test(k, n, p):
        return stats.binomtest(k, n, p).pvalue

    def validate_univariate(self, df: pd.DataFrame, plot: bool = False) -> Dict[str, Tuple[float, float]]:
        """Enhanced univariate validation with flatter plots for academic papers."""
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        groups = {
            'Floor Construction Duration': [c for c in numeric_cols if c.endswith('_DUR')],
            'Component Delivery Delay': [c for c in numeric_cols if c.endswith('_DLV_DLY')]
        }

        ks_results = {}  # 存储每个变量的KS检验结果

        for group_title, group_cols in groups.items():
            if not group_cols:
                print(f"No columns found for group: {group_title}")
                continue

            n_cols = len(group_cols)
            ncols_per_row = 6
            nrows = (n_cols + ncols_per_row - 1) // ncols_per_row

            fig, axes = plt.subplots(nrows, ncols_per_row, figsize=(24, 3.5 * nrows), dpi=self.config.DPI)
            fig.suptitle(f'Univariate Validation: {group_title}', fontsize=16, y=1.0)
            axes = axes.flatten()

            for i, col in enumerate(group_cols):
                ax = axes[i]
                sample = df[col].dropna().values
                if sample.size == 0:
                    ax.text(0.5, 0.5, "No Data", ha='center', va='center')
                    ax.set_title(col, fontsize=10)
                    continue

                if 'DUR' in col:
                    a, b, c = (5, 6, 9) if col in ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR'] else (4, 5, 8)
                    alpha, beta_param = LHSSampler.pert_to_alpha_beta(a, b, c)
                    dist = stats.beta(alpha, beta_param, loc=a, scale=c - a)
                    dist_name, params_str = 'PERT', f'a={a}, b={b}, c={c}'
                elif 'DLY' in col:
                    mu, sigma, lower, upper = -4.0, 2.0, -10.0, 10.0
                    a_trunc, b_trunc = (lower - mu) / sigma, (upper - mu) / sigma
                    dist = stats.truncnorm(a_trunc, b_trunc, loc=mu, scale=sigma)
                    dist_name, params_str = 'TruncNorm', f'loc={mu}, scale={sigma}\nclip=[{lower}, {upper}]'
                else:
                    continue

                sns.histplot(sample, ax=ax, stat='density', bins='auto', color='skyblue', label='Empirical')
                x_min, x_max = ax.get_xlim()
                x_vals = np.linspace(x_min, x_max, 400)
                # safeguard for domain issues
                try:
                    ax.plot(x_vals, dist.pdf(x_vals), 'r-', lw=2, label='Theoretical PDF')
                except Exception:
                    # if dist.pdf fails for domain values, compute only on valid slice
                    xv = x_vals[
                        (x_vals >= (a if 'DUR' in col else -np.inf)) & (x_vals <= (c if 'DUR' in col else np.inf))]
                    if len(xv) > 0:
                        ax.plot(xv, dist.pdf(xv), 'r-', lw=2, label='Theoretical PDF')

                ax.set_title(col, fontsize=11)
                ax.set_xlabel('Value', fontsize=9)
                ax.set_ylabel('Density', fontsize=9)

                inset_ax = ax.inset_axes([0.58, 0.55, 0.4, 0.4])
                # stats.probplot supports dist param being a scipy distribution instance
                try:
                    stats.probplot(sample, dist=dist, plot=inset_ax)
                except Exception:
                    # fallback to comparing with a normal Q-Q if custom dist fails
                    stats.probplot(sample, plot=inset_ax)
                inset_ax.set_title('Q-Q Plot', fontsize=8)
                inset_ax.get_lines()[0].set_markerfacecolor('steelblue')
                try:
                    inset_ax.get_lines()[0].set_markeredgecolor('steelblue')
                except Exception:
                    pass
                try:
                    inset_ax.get_lines()[0].set_markersize(3.0)
                except Exception:
                    pass
                try:
                    inset_ax.get_lines()[1].set_color('darkred')
                except Exception:
                    pass
                inset_ax.set_xlabel('Theoretical Quantiles', fontsize=7)
                inset_ax.set_ylabel('Sample Quantiles', fontsize=7)
                inset_ax.tick_params(axis='both', which='major', labelsize=6)

                # KS test
                try:
                    ks_stat, p_value = stats.kstest(sample, dist.cdf)
                except Exception:
                    # fallback: compare empirical CDF to theoretical via internal numeric mapping
                    clipped = np.clip(sample, a if 'DUR' in col else -1e9, c if 'DUR' in col else 1e9)
                    try:
                        ks_stat, p_value = stats.kstest(clipped, dist.cdf)
                    except Exception:
                        ks_stat, p_value = np.nan, np.nan

                # 存储KS检验结果
                ks_results[col] = (ks_stat, p_value)

                stats_text = (
                    f"Distribution: {dist_name}\n"
                    f"Params: {params_str}\n"
                    f"Sample Size: {len(sample)}\n"
                    f"{'─' * 25}\n"
                    f"{'Stat':<5} {'Empirical':<10} {'Theoretical':<10}\n"
                    f"{'Mean':<5} {np.mean(sample):<10.2f} {dist.mean():<10.2f}\n"
                    f"{'Std':<5} {np.std(sample):<10.2f} {dist.std():<10.2f}\n"
                    f"{'─' * 25}\n"
                    f"KS Test: D={ks_stat:.3f}, p={p_value:.3f}"
                )
                ax.text(0.02, 0.98, stats_text, transform=ax.transAxes, fontsize=7,
                        verticalalignment='top', bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))
                ax.legend(loc='upper right', fontsize=8)

            for j in range(n_cols, len(axes)):
                axes[j].set_visible(False)
            fig.tight_layout(rect=[0, 0.03, 1, 0.98])

            filename = f"validation_{group_title.lower().replace(' ', '_')}.png"
            output_path = self._get_output_path(filename)
            plt.savefig(output_path, bbox_inches='tight', dpi=self.config.DPI)
            if not plot:
                plt.close(fig)

        return ks_results

    def validate_multivariate(self, df: pd.DataFrame, plot: bool = False) -> None:
        numeric_df = df.select_dtypes(include=[np.number])
        duration_cols = [c for c in numeric_df.columns if c.endswith('_DUR')]
        delay_cols = [c for c in numeric_df.columns if c.endswith('_DLV_DLY')]

        if len(duration_cols) >= 2:
            print("\n=== Duration Correlation Analysis ===")
            corr = numeric_df[duration_cols].corr()
            self._plot_correlation_matrix(numeric_df[duration_cols], "Duration Correlation Matrix", plot)
            _print_correlation_stats(corr, "Duration")
        if len(delay_cols) >= 2:
            print("\n=== Delivery Delay Correlation Analysis ===")
            corr = numeric_df[delay_cols].corr()
            self._plot_correlation_matrix(numeric_df[delay_cols], "Delivery Delay Correlation Matrix", plot)
            _print_correlation_stats(corr, "Delivery Delay")

    def _plot_correlation_matrix(self, data: pd.DataFrame, title: str, plot: bool) -> None:
        """Plots correlation matrix with non-overlapping significance stars."""
        if not plot:
            return
        corr_matrix = data.corr()

        fig = plt.figure(figsize=(14, 12), dpi=self.config.DPI)
        mask = np.triu(np.ones_like(corr_matrix, dtype=bool))

        sns.heatmap(corr_matrix, mask=mask, annot=True, cmap='coolwarm', fmt=".2f",
                    annot_kws={"size": 8}, center=0, vmin=-1, vmax=1)

        for i in range(1, corr_matrix.shape[0]):
            for j in range(i):
                _, p_val = stats.pearsonr(data.iloc[:, i], data.iloc[:, j])
                star = ""
                if p_val < 0.01:
                    star = "**"
                elif p_val < 0.05:
                    star = "*"
                else:
                    continue
                plt.text(j + 0.8, i + 0.25, star, ha='center', va='center', color='black', fontsize=10)

        plt.title(f"{title} (*p<0.05, **p<0.01)", fontsize=14)
        plt.xticks(rotation=45, ha='right')
        plt.tight_layout()

        filename = f"validation_{title.lower().replace(' ', '_')}_events.png"
        output_path = self._get_output_path(filename)
        plt.savefig(output_path, bbox_inches='tight', dpi=self.config.DPI)
        if not plot:
            plt.close(fig)

    def validate_crane_failure_process(self, failure_lists: List[List[float]], plot: bool = False) -> Tuple[
        float, float]:
        """Validate crane failure process and return KS statistics"""
        mu = self.config.CRANE_FAILURE_RATE * self.config.SAMPLE_HOURS
        failure_counts = [len(s) for s in failure_lists]
        counts_freq = Counter(failure_counts)
        max_obs_count = max(counts_freq.keys()) if counts_freq else 0
        k_values = np.arange(0, max_obs_count + 5)
        theoretical_probs = stats.poisson.pmf(k_values, mu)

        all_failure_times = sorted([t for sublist in failure_lists for t in sublist])

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6), dpi=self.config.DPI)
        fig.suptitle('Validation of Crane Failure (Poisson Process)', fontsize=16)

        ax1.bar(counts_freq.keys(), np.array(list(counts_freq.values())) / len(failure_counts),
                alpha=0.7, color='blue', label='Empirical Freq.')
        ax1.plot(k_values, theoretical_probs, 'ro-', label=f'Theoretical Poisson (λt={mu:.2f})')
        ax1.set_xlabel('Number of Failures per 1530 hours')
        ax1.set_ylabel('Probability')
        ax1.set_title('Failure Count Distribution')
        ax1.legend()
        ax1.grid(axis='y', linestyle=':')

        sns.histplot(all_failure_times, bins=30, stat="density", alpha=0.7, color='blue',
                     label='Empirical Density', ax=ax2)
        ax2.axhline(y=1 / self.config.SAMPLE_HOURS, color='r', linestyle='--',
                    label='Theoretical Uniform Density')
        ax2.set_xlabel('Failure Time (in hours)')
        ax2.set_ylabel('Probability Density')
        ax2.set_title('Failure Time Distribution')

        ks_stat, p_value = np.nan, np.nan
        if all_failure_times:
            u_values = np.array(all_failure_times) / self.config.SAMPLE_HOURS
            ks_stat, p_value = stats.kstest(u_values, 'uniform')
            result = "Fail to reject H0" if p_value >= 0.05 else "Reject H0"
            ks_text = f"KS Test (Uniformity):\nD={ks_stat:.4f}, p={p_value:.4f}\nResult at α=0.05: {result}"
            ax2.text(0.05, 0.15, ks_text, transform=ax2.transAxes, fontsize=9, verticalalignment='top',
                     bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))
        ax2.legend()
        plt.tight_layout(rect=[0, 0.03, 1, 0.95])

        filename = "validation_crane_failure.png"
        output_path = self._get_output_path(filename)
        plt.savefig(output_path, bbox_inches='tight', dpi=self.config.DPI)
        print(f"Saved: {output_path}")
        if not plot:
            plt.close(fig)

        return ks_stat, p_value

    def validate_bernoulli_process(self, events: np.ndarray, prob_dict: Dict[Tuple[int, int], float],
                                   event_name: str, plot: bool = False) -> None:
        daily_freq = np.mean(events, axis=0)
        dates = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        theoretical_probs = np.array([prob_dict.get((d.month, d.day), 0.0) for d in dates])
        mae = np.mean(np.abs(daily_freq - theoretical_probs))

        fig = plt.figure(figsize=(12, 6), dpi=self.config.DPI)
        plt.plot(dates, daily_freq, label=f'Empirical Daily Frequency', alpha=0.8, lw=2)
        plt.plot(dates, theoretical_probs, label='Theoretical Daily Probability', linestyle='--', color='red')
        plt.title(f'{event_name} Probability Validation')
        plt.xlabel('Date')
        plt.ylabel('Probability')
        plt.ylim(0, max(1.0, np.max(daily_freq) * 1.1))

        sig_days_count = 0
        for i, p_theory in enumerate(theoretical_probs):
            if p_theory > 0:
                k = np.sum(events[:, i])
                p_val = self.binom_test(k, events.shape[0], p_theory)
                if p_val < self.config.SIGNIFICANCE_LEVEL:
                    sig_days_count += 1

        info_text = (f"Mean Absolute Error (MAE): {mae:.4f}\n"
                     f"Days with significant deviation (p<0.05): {sig_days_count}")
        plt.text(0.02, 0.98, info_text, transform=plt.gca().transAxes, fontsize=10,
                 verticalalignment='top', bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))

        plt.legend()
        plt.xticks(rotation=45)
        plt.tight_layout()

        filename = f"validation_{event_name.lower().replace(' ', '_')}_events.png"
        output_path = self._get_output_path(filename)
        plt.savefig(output_path, bbox_inches='tight', dpi=self.config.DPI)
        print(f"Saved: {output_path}")
        if not plot:
            plt.close(fig)

    def _print_ks_summary(self, ks_results: Dict[str, Tuple[float, float]]):
        """打印KS检验结果摘要"""
        if not ks_results:
            print("No KS test results available.")
            return

        print("\n" + "=" * 60)
        print("KS TEST SUMMARY STATISTICS")
        print("=" * 60)

        # 提取所有D值和p值
        d_values = [result[0] for result in ks_results.values() if not np.isnan(result[0])]
        p_values = [result[1] for result in ks_results.values() if not np.isnan(result[1])]

        if d_values and p_values:
            max_d = max(d_values)
            min_p = min(p_values)
            avg_d = np.mean(d_values)
            avg_p = np.mean(p_values)

            print(f"Maximum D statistic: {max_d:.6f}")
            print(f"Minimum p-value:     {min_p:.6f}")
            print(f"Average D statistic: {avg_d:.6f}")
            print(f"Average p-value:     {avg_p:.6f}")

            # 找到对应的变量
            max_d_var = [var for var, (d, p) in ks_results.items()
                         if not np.isnan(d) and abs(d - max_d) < 1e-6][0]
            min_p_var = [var for var, (d, p) in ks_results.items()
                         if not np.isnan(p) and abs(p - min_p) < 1e-6][0]

            print(f"Variable with max D:  {max_d_var} (D={max_d:.6f}, p={ks_results[max_d_var][1]:.6f})")
            print(f"Variable with min p:  {min_p_var} (D={ks_results[min_p_var][0]:.6f}, p={min_p:.6f})")

            # 统计显著性结果
            significant_count = sum(1 for d, p in ks_results.values()
                                    if not np.isnan(p) and p < 0.05)
            total_count = len([p for p in p_values if not np.isnan(p)])

            print(f"Significant results (p < 0.05): {significant_count}/{total_count}")

            # 质量评估
            if max_d < 0.05 and min_p > 0.05:
                print("✅ Overall quality: EXCELLENT - All distributions fit well")
            elif max_d < 0.08 and min_p > 0.01:
                print("✅ Overall quality: GOOD - Most distributions fit well")
            elif max_d < 0.10:
                print("⚠️  Overall quality: ACCEPTABLE - Some distributions show minor deviations")
            else:
                print("❌ Overall quality: POOR - Significant deviations detected")

        else:
            print("No valid KS test results available.")
        print("=" * 60)

    def validate_all(self, df: pd.DataFrame, plot: bool = False) -> None:
        """Runs all validation checks and prints KS summary."""
        print("=== Univariate Distribution Validation ===")
        ks_results = self.validate_univariate(df, plot)

        # 打印KS检验摘要
        self._print_ks_summary(ks_results)

        print("\n=== Multivariate Correlation Validation ===")
        self.validate_multivariate(df, plot)

        if 'CRN_FAL' in df.columns:
            print("\n=== Poisson Process Validation (Crane Failure) ===")
            crane_ks_stat, crane_p_value = self.validate_crane_failure_process(df['CRN_FAL'].tolist(), plot)
            print(f"Crane Failure KS Test: D={crane_ks_stat:.6f}, p={crane_p_value:.6f}")

        if 'RAIN_HOLD' in df.columns:
            print("\n=== Bernoulli Process Validation (Daily Rain Hold) ===")
            rain_prob = LHSSampler.load_daily_prob('Precipitation_stat.csv')
            self.validate_bernoulli_process(np.vstack(df['RAIN_HOLD'].values), rain_prob, "Daily Rain Hold", plot)

        if 'WIND_HOLD' in df.columns:
            print("\n=== Bernoulli Process Validation (Daily Strong Wind Hold) ===")
            wind_prob = LHSSampler.load_daily_prob('strong_wind_stat.csv')
            self.validate_bernoulli_process(np.vstack(df['WIND_HOLD'].values), wind_prob,
                                            "Daily Strong Wind Hold", plot)

        if plot:
            print("\nDisplaying all generated plots. Close plot windows to exit the program.")
            plt.show()


def main():
    config = LHSConfig()
    parser = argparse.ArgumentParser(description='Enhanced LHS sampling and validation program')
    parser.add_argument('--plot', action='store_true', help='Display validation plots after saving them.')
    parser.add_argument('--sample-size', type=int, default=config.SAMPLE_SIZE,
                        help=f'Set the sample size (default: {config.SAMPLE_SIZE})')
    parser.add_argument('--seed', type=int, default=config.SEED,
                        help=f'Set the random seed (default: {config.SEED})')
    args = parser.parse_args()
    config.SAMPLE_SIZE = args.sample_size
    config.SEED = args.seed

    try:
        print(f"Starting LHS sampling with sample size: {args.sample_size}, seed: {args.seed}")
        sampler = LHSSampler(config)
        df = sampler.sample(args.sample_size)
        print("\nFirst 5 rows of sampling results:")
        print(df.head())

        output_csv = "lhs_sampling_results.csv"
        df.to_csv(output_csv, index=False)
        print(f"\nSampling results saved to {output_csv}")

        print("\nStarting validation...")
        validator = LHSValidator(config)
        validator.validate_all(df, args.plot)
        print("\nValidation complete.")

    except Exception as e:
        print(f"\nAn error occurred: {e}")
        raise


if __name__ == '__main__':
    # Note: To run this script, you will need the following data files in the same directory:
    # - Precipitation_stat.csv
    # - strong_wind_stat.csv
    # These files should contain 'Month', 'Day', and 'Frequency' columns.
    main()