# -*- coding: utf-8 -*-
"""
Optimized LHS Sampling and Validation Program
- The core sampling logic from the original script is preserved.
- The LHSValidator class is enhanced for more robust and informative validation,
including the addition of Q-Q plots and more detailed statistical summaries on each figure.
- The LHSSampler.sample method is fixed to handle non-positive-definite correlation matrices.
- Plotting functions are adjusted for better academic presentation.
"""
import argparse
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import qmc
from scipy.linalg import cholesky
from typing import Dict, List, Tuple
import warnings

# Filter specific font missing warnings
warnings.filterwarnings("ignore", message="Glyph .* missing from font.*")


class LHSConfig:
    """Configuration parameters class"""
    SAMPLE_SIZE = 1000
    PLOT_SIZE = (18, 12)
    DPI = 150
    SEED = 42
    OUTPUT_PRECISION = 2
    CRANE_FAILURE_RATE = 1 / 3000
    MTBF = 1 / CRANE_FAILURE_RATE
    SAMPLE_DAYS = 153
    SAMPLE_HOURS = SAMPLE_DAYS * 10
    SIGNIFICANCE_LEVEL = 0.05


def iman_conover_joint(U: np.ndarray, target_corr: np.ndarray) -> np.ndarray:
    """Induces correlation while preserving marginal distributions via a rank-based adjustment procedure."""

    def _fix_correlation_matrix(C: np.ndarray) -> np.ndarray:
        """Finds the nearest valid positive-definite correlation matrix."""
        C_symm = (C + C.T) / 2
        np.fill_diagonal(C_symm, 1.0)
        eigvals, eigvecs = np.linalg.eigh(C_symm)
        eigvals[eigvals < 1e-12] = 1e-12
        C_pd = eigvecs @ np.diag(eigvals) @ eigvecs.T
        D_inv = np.diag(1.0 / np.sqrt(np.diag(C_pd)))
        C_fixed = D_inv @ C_pd @ D_inv
        return C_fixed

    try:
        T = cholesky(target_corr, lower=True)
    except np.linalg.LinAlgError:
        print("Warning: Target correlation matrix is not positive definite. Finding the nearest valid matrix.")
        fixed_corr = _fix_correlation_matrix(target_corr)
        T = cholesky(fixed_corr, lower=True)

    Z = stats.norm.ppf(U)
    R = np.corrcoef(Z, rowvar=False)
    C_emp = cholesky(R, lower=True)
    Z_new = Z @ np.linalg.inv(C_emp) @ T
    ranks = Z_new.argsort(axis=0).argsort(axis=0)
    U_sorted = np.sort(U, axis=0)
    return np.array([U_sorted[ranks[:, j], j] for j in range(U.shape[1])]).T


def sample_crane_failures(failure_rate, max_hours=1530.0, sample_size=1) -> tuple[
    list[list[float]], list, list]:
    """Generates multiple sets of failure time series."""
    failure_counts_of_all_samplings_list = []
    sorted_failure_occurrence_times_of_all_samplings_list = []
    failure_occurrence_times_of_every_samplings_list_of_list = []
    for _ in range(sample_size):
        failure_count_of_one_sampling = np.random.poisson(failure_rate * max_hours)
        failure_counts_of_all_samplings_list.append(failure_count_of_one_sampling)
        if failure_count_of_one_sampling > 0:
            failure_times_of_one_sampling_list = np.random.uniform(0, max_hours,
                                                                   size=failure_count_of_one_sampling)
            failure_occurrence_times_of_every_samplings_list_of_list.append(
                list(np.round(failure_times_of_one_sampling_list, 2)))
            failure_times_of_one_sampling_list.sort()
            sorted_failure_occurrence_times_of_all_samplings_list.extend(failure_times_of_one_sampling_list)
        else:
            failure_occurrence_times_of_every_samplings_list_of_list.append([])
    return failure_occurrence_times_of_every_samplings_list_of_list, failure_counts_of_all_samplings_list, sorted_failure_occurrence_times_of_all_samplings_list


def lhs_transform(u: np.ndarray, dist, *args, **kwargs) -> np.ndarray:
    """
    【OPTIMIZED】LHS transform function to prevent endpoint pile-up.
    This version squeezes the [0, 1] interval to avoid feeding extreme values
    (0 or 1) into the Percent Point Function (PPF), which causes the pile-up.
    """
    if len(u.shape) != 1:
        raise ValueError("Input must be a 1D array")

    # Squeeze the [0, 1] interval to [epsilon, 1-epsilon]
    epsilon = 1e-9
    u_squeezed = epsilon + u * (1 - 2 * epsilon)

    samples = dist.ppf(u_squeezed, *args, **kwargs)
    return samples


class LHSSampler:
    """LHS Sampler Class"""

    def __init__(self, config: LHSConfig = None):
        self.config = config or LHSConfig()
        self._validate_config()
        np.random.seed(self.config.SEED)

    def _validate_config(self):
        if self.config.SAMPLE_SIZE <= 0:
            raise ValueError("Sample size must be a positive integer")
        if self.config.CRANE_FAILURE_RATE <= 0:
            raise ValueError("Failure rate must be positive")

    @staticmethod
    def pert_to_alpha_beta(a: float, b: float, c: float) -> Tuple[float, float]:
        if c <= a:
            raise ValueError("For PERT distribution, parameter 'c' must be greater than 'a'")
        # Using the standard PERT formulation with gamma = 4
        alpha_param = 1 + 4 * (b - a) / (c - a)
        beta_param = 1 + 4 * (c - b) / (c - a)
        return alpha_param, beta_param

    def pert_rvs(self, a: float, b: float, c: float, size: int = None, u: np.ndarray = None) -> np.ndarray:
        """
        【OPTIMIZED】Samples from a PERT distribution using LHS samples.
        This version also squeezes the [0, 1] interval to prevent pile-up at the
        maximum value 'c' of the distribution.
        """
        alpha_param, beta_param = self.pert_to_alpha_beta(a, b, c)
        if u is None:
            # Fallback for non-LHS random sampling
            return a + (c - a) * stats.beta.rvs(alpha_param, beta_param, size=size)

        # Squeeze the [0, 1] interval to [epsilon, 1-epsilon] to avoid boundary pile-up
        epsilon = 1e-9
        u_squeezed = epsilon + u * (1 - 2 * epsilon)

        return a + (c - a) * stats.beta.ppf(u_squeezed, alpha_param, beta_param)

    @staticmethod
    def load_daily_prob(
            csv_file: str,
            start_date: Tuple[int, int] = (3, 1),
            end_date: Tuple[int, int] = (7, 31)
    ) -> Dict[Tuple[int, int], float]:
        try:
            df = pd.read_csv(csv_file, usecols=['Month', 'Day', 'Frequency'])
            df['Month'] = df['Month'].astype(int)
            df['Day'] = df['Day'].astype(int)
            if (df['Frequency'] < 0).any() or (df['Frequency'] > 1).any():
                raise ValueError("Probability values must be in the [0, 1] range")
            df['Frequency'] = df['Frequency'].clip(0, 1)

            def date_in_range(row):
                md = (row['Month'], row['Day'])
                if start_date[0] == end_date[0]:
                    return start_date[0] == md[0] and start_date[1] <= md[1] <= end_date[1]
                return (md[0] > start_date[0] or (md[0] == start_date[0] and md[1] >= start_date[1])) and \
                    (md[0] < end_date[0] or (md[0] == end_date[0] and md[1] <= end_date[1]))

            filtered_df = df[df.apply(date_in_range, axis=1)].copy()
            if filtered_df.empty:
                raise ValueError(f"No valid data found in date range {start_date}-{end_date}")
            return {(r.Month, r.Day): r.Frequency for _, r in filtered_df.iterrows()}
        except (KeyError, FileNotFoundError) as e:
            raise ValueError(f"CSV file must contain Month/Day/Frequency columns: {e}")
        except Exception as e:
            raise ValueError(f"Failed to load probability file: {str(e)}")

    def sample_daily_strong_wind_events(
            self, prob_dict: Dict[Tuple[int, int], float],
            sample_size: int, rho: float = 0.2, min_prob: float = 1e-4
    ) -> np.ndarray:
        strong_wind = np.zeros((sample_size, self.config.SAMPLE_DAYS), dtype=bool)
        u_matrix = np.random.rand(sample_size, self.config.SAMPLE_DAYS)
        date_range = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        p_base = np.array([max(prob_dict.get((d.month, d.day), 0.0), min_prob) for d in date_range])
        for day_idx, pi in enumerate(p_base):
            if day_idx > 0:
                prev = strong_wind[:, day_idx - 1]
                p_given_wind = min(pi + rho * (1 - pi), 1.0 - min_prob)
                p_given_no_wind = max(pi * (1 - rho), min_prob)
                p = np.where(prev, p_given_wind, p_given_no_wind)
            else:
                p = pi
            strong_wind[:, day_idx] = u_matrix[:, day_idx] < p
        return strong_wind

    def sample_daily_precipitation(
            self, prob_dict: Dict[Tuple[int, int], float],
            sample_size: int, rho: float = 0.015, min_prob: float = 1e-4
    ) -> np.ndarray:
        precip = np.zeros((sample_size, self.config.SAMPLE_DAYS), dtype=bool)
        u_matrix = np.random.rand(sample_size, self.config.SAMPLE_DAYS)
        date_range = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        p_base = np.array([max(prob_dict.get((d.month, d.day), 0.0), min_prob) for d in date_range])
        for day_idx, pi in enumerate(p_base):
            p = pi
            if day_idx > 0:
                prev = precip[:, day_idx - 1]
                p_given_rain = min(pi + rho * (1 - pi), 1.0 - min_prob)
                p_given_no_rain = max(pi * (1 - rho), min_prob)
                p = np.where(prev, p_given_rain, p_given_no_rain)
            precip[:, day_idx] = u_matrix[:, day_idx] < p
        return precip

    def sample(self, sample_size: int = None) -> pd.DataFrame:
        """Generate one batch of samples."""
        sample_size = sample_size or self.config.SAMPLE_SIZE
        n_floors = 18

        # --- Step 1: LHS for durations ---
        lhs_sampler_dur = qmc.LatinHypercube(d=n_floors, seed=self.config.SEED)
        u_dur = lhs_sampler_dur.random(sample_size)

        # --- Step 2: LHS for delivery delays ---
        lhs_sampler_dly = qmc.LatinHypercube(d=n_floors, seed=self.config.SEED + 1)
        u_dly = lhs_sampler_dly.random(sample_size)

        # --- Step 3: Construct intra-group correlation matrices ---
        rho_floor = 0.9
        target_corr_dur = np.eye(n_floors)
        target_corr_dly = np.eye(n_floors)
        for i in range(n_floors):
            for j in range(n_floors):
                if i != j:
                    target_corr_dur[i, j] = rho_floor ** abs(i - j)
                    target_corr_dly[i, j] = rho_floor ** abs(i - j)

        # --- Step 4: Induce correlation separately for each group ---
        u_dur = iman_conover_joint(u_dur, target_corr_dur)
        u_dly = iman_conover_joint(u_dly, target_corr_dly)

        # --- Step 5: Sample durations (PERT distribution) ---
        flr_params = {
            f'FLR{i:02d}': (5, 6, 9) if i in [1, 2, 18] else (4, 5, 8)
            for i in range(1, 19)
        }
        durations = {
            f'{flr}_DUR': self.pert_rvs(*params, u=u_dur[:, i])
            for i, (flr, params) in enumerate(sorted(flr_params.items()))
        }

        # --- Step 6: Sample delivery delays (Truncated Normal) ---
        a_trunc, b_trunc = (-10 + 4) / 2, (10 + 4) / 2  # bounds scaled by std
        delivery_delays = {
            f'FLR{i + 1:02d}_DLV_DLY': lhs_transform(
                u_dly[:, i], stats.truncnorm,
                a_trunc, b_trunc, loc=-4, scale=2
            )
            for i in range(n_floors)
        }

        # --- Step 7: Crane failures (Poisson process) ---
        crane_failures, _, _ = sample_crane_failures(
            self.config.CRANE_FAILURE_RATE,
            self.config.SAMPLE_HOURS,
            sample_size
        )

        # --- Step 8: Weather (Daily rain & strong wind) ---
        rain_prob = self.load_daily_prob(csv_file='Precipitation_stat.csv')
        wind_prob = self.load_daily_prob(csv_file='strong_wind_stat.csv')

        rain_hold = self.sample_daily_precipitation(
            rain_prob, sample_size, rho=0.015
        )
        wind_hold = self.sample_daily_strong_wind_events(
            wind_prob, sample_size, rho=0.2
        )

        # --- Step 9: Assemble into DataFrame ---
        return pd.DataFrame({
            **durations,
            **delivery_delays,
            'CRN_FAL': crane_failures,
            'RAIN_HOLD': list(rain_hold),
            'WIND_HOLD': list(wind_hold),
        })


def _print_correlation_stats(corr_matrix: pd.DataFrame, label: str) -> None:
    """Prints detailed correlation statistics."""
    values = corr_matrix.values[np.triu_indices_from(corr_matrix, k=1)]
    print(f"{label} Correlation Statistics:")
    print(f" Mean: {values.mean():.4f}, Std Dev: {values.std():.4f}")
    print(f" Range: [{values.min():.4f}, {values.max():.4f}]")
    if "Duration" in label and values.mean() > 0.6:
        print(" ✅ Business insight: High correlation suggests consistent construction team performance.")
    elif "Delay" in label and values.mean() > 0.6:
        print(" ✅ Business insight: High correlation suggests consistent supplier delivery patterns.")


class LHSValidator:
    """Enhanced LHS Validator Class"""

    def __init__(self, config: LHSConfig = None):
        self.config = config or LHSConfig()
        plt.rcParams['font.sans-serif'] = ['Arial']
        plt.rcParams["axes.unicode_minus"] = False

    @staticmethod
    def binom_test(k, n, p):
        return stats.binomtest(k, n, p).pvalue

    def validate_univariate(self, df: pd.DataFrame, plot: bool = False) -> None:
        """Enhanced univariate validation with flatter plots for academic papers."""
        numeric_cols = df.select_dtypes(include=[np.number]).columns
        groups = {
            'Floor Construction Duration': [c for c in numeric_cols if c.endswith('_DUR')],
            'Component Delivery Delay': [c for c in numeric_cols if c.endswith('_DLV_DLY')]
        }

        for group_title, group_cols in groups.items():
            if not group_cols:
                print(f"No columns found for group: {group_title}")
                continue

            n_cols = len(group_cols)
            ncols_per_row = 6
            nrows = (n_cols + ncols_per_row - 1) // ncols_per_row

            fig, axes = plt.subplots(nrows, ncols_per_row, figsize=(24, 3.5 * nrows), dpi=self.config.DPI)
            fig.suptitle(f'Univariate Validation: {group_title}', fontsize=16, y=1.0)
            axes = axes.flatten()

            for i, col in enumerate(group_cols):
                ax = axes[i]
                sample = df[col].dropna().values
                if sample.size == 0:
                    ax.text(0.5, 0.5, "No Data", ha='center', va='center')
                    ax.set_title(col, fontsize=10)
                    continue

                if 'DUR' in col:
                    a, b, c = (5, 6, 9) if col in ['FLR01_DUR', 'FLR02_DUR', 'FLR18_DUR'] else (4, 5, 8)
                    alpha, beta_param = LHSSampler.pert_to_alpha_beta(a, b, c)
                    dist = stats.beta(alpha, beta_param, loc=a, scale=c - a)
                    dist_name, params_str = 'PERT', f'a={a}, b={b}, c={c}'
                elif 'DLY' in col:
                    mu, sigma, lower, upper = -4.0, 2.0, -10.0, 10.0
                    a_trunc, b_trunc = (lower - mu) / sigma, (upper - mu) / sigma
                    dist = stats.truncnorm(a_trunc, b_trunc, loc=mu, scale=sigma)
                    dist_name, params_str = 'TruncNorm', f'loc={mu}, scale={sigma}\nclip=[{lower}, {upper}]'
                else:
                    continue

                sns.histplot(sample, ax=ax, stat='density', bins='auto', color='skyblue', label='Empirical')
                x_min, x_max = ax.get_xlim()
                x_vals = np.linspace(x_min, x_max, 400)
                ax.plot(x_vals, dist.pdf(x_vals), 'r-', lw=2, label='Theoretical PDF')
                ax.set_title(col, fontsize=11)
                ax.set_xlabel('Value', fontsize=9)
                ax.set_ylabel('Density', fontsize=9)

                inset_ax = ax.inset_axes([0.58, 0.55, 0.4, 0.4])
                stats.probplot(sample, dist=dist, plot=inset_ax)
                inset_ax.set_title('Q-Q Plot', fontsize=8)
                inset_ax.get_lines()[0].set_markerfacecolor('steelblue')
                inset_ax.get_lines()[0].set_markeredgecolor('steelblue')
                inset_ax.get_lines()[0].set_markersize(3.0)
                inset_ax.get_lines()[1].set_color('darkred')
                inset_ax.set_xlabel('Theoretical Quantiles', fontsize=7)
                inset_ax.set_ylabel('Sample Quantiles', fontsize=7)
                inset_ax.tick_params(axis='both', which='major', labelsize=6)

                ks_stat, p_value = stats.kstest(sample, dist.cdf)
                stats_text = (
                    f"Distribution: {dist_name}\n"
                    f"Params: {params_str}\n"
                    f"Sample Size: {len(sample)}\n"
                    f"{'─' * 25}\n"
                    f"{'Stat':<5} {'Empirical':<10} {'Theoretical':<10}\n"
                    f"{'Mean':<5} {np.mean(sample):<10.2f} {dist.mean():<10.2f}\n"
                    f"{'Std':<5} {np.std(sample):<10.2f} {dist.std():<10.2f}\n"
                    f"{'─' * 25}\n"
                    f"KS Test: D={ks_stat:.3f}, p={p_value:.3f}"
                )
                ax.text(0.02, 0.98, stats_text, transform=ax.transAxes, fontsize=7,
                        verticalalignment='top', bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))
                ax.legend(loc='upper right', fontsize=8)

            for j in range(n_cols, len(axes)):
                axes[j].set_visible(False)
            fig.tight_layout(rect=[0, 0.03, 1, 0.98])

            filename = f"validation_{group_title.lower().replace(' ', '_')}.png"
            plt.savefig(filename, bbox_inches='tight', dpi=self.config.DPI)
            # 【FIX】If not plotting to screen, close the figure to save memory
            if not plot:
                plt.close(fig)

    def validate_multivariate(self, df: pd.DataFrame, plot: bool = False) -> None:
        numeric_df = df.select_dtypes(include=[np.number])
        duration_cols = [c for c in numeric_df.columns if c.endswith('_DUR')]
        delay_cols = [c for c in numeric_df.columns if c.endswith('_DLV_DLY')]

        if len(duration_cols) >= 2:
            print("\n=== Duration Correlation Analysis ===")
            corr = numeric_df[duration_cols].corr()
            self._plot_correlation_matrix(numeric_df[duration_cols], "Duration Correlation Matrix", plot)
            _print_correlation_stats(corr, "Duration")
        if len(delay_cols) >= 2:
            print("\n=== Delivery Delay Correlation Analysis ===")
            corr = numeric_df[delay_cols].corr()
            self._plot_correlation_matrix(numeric_df[delay_cols], "Delivery Delay Correlation Matrix", plot)
            _print_correlation_stats(corr, "Delivery Delay")

    def _plot_correlation_matrix(self, data: pd.DataFrame, title: str, plot: bool) -> None:
        """Plots correlation matrix with non-overlapping significance stars."""
        if not plot:
            return
        corr_matrix = data.corr()

        fig = plt.figure(figsize=(14, 12), dpi=self.config.DPI)
        mask = np.triu(np.ones_like(corr_matrix, dtype=bool))

        sns.heatmap(corr_matrix, mask=mask, annot=True, cmap='coolwarm', fmt=".2f",
                    annot_kws={"size": 8}, center=0, vmin=-1, vmax=1)

        for i in range(1, corr_matrix.shape[0]):
            for j in range(i):
                _, p_val = stats.pearsonr(data.iloc[:, i], data.iloc[:, j])
                star = ""
                if p_val < 0.01:
                    star = "**"
                elif p_val < 0.05:
                    star = "*"
                else:
                    continue
                plt.text(j + 0.8, i + 0.25, star, ha='center', va='center', color='black', fontsize=10)

        plt.title(f"{title} (*p<0.05, **p<0.01)", fontsize=14)
        plt.xticks(rotation=45, ha='right')
        plt.tight_layout()

        filename = f"validation_{title.lower().replace(' ', '_')}_events.png"
        plt.savefig(filename, bbox_inches='tight', dpi=self.config.DPI)
        # 【FIX】If not plotting to screen, close the figure to save memory
        if not plot:
            plt.close(fig)

    def validate_crane_failure_process(self, failure_lists: List[List[float]], plot: bool = False) -> None:
        mu = self.config.CRANE_FAILURE_RATE * self.config.SAMPLE_HOURS
        failure_counts = [len(s) for s in failure_lists]
        counts_freq = Counter(failure_counts)
        max_obs_count = max(counts_freq.keys()) if counts_freq else 0
        k_values = np.arange(0, max_obs_count + 5)
        theoretical_probs = stats.poisson.pmf(k_values, mu)

        all_failure_times = sorted([t for sublist in failure_lists for t in sublist])

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 6), dpi=self.config.DPI)
        fig.suptitle('Validation of Crane Failure (Poisson Process)', fontsize=16)

        ax1.bar(counts_freq.keys(), np.array(list(counts_freq.values())) / len(failure_counts),
                alpha=0.7, color='blue', label='Empirical Freq.')
        ax1.plot(k_values, theoretical_probs, 'ro-', label=f'Theoretical Poisson (λt={mu:.2f})')
        ax1.set_xlabel('Number of Failures per 1530 hours')
        ax1.set_ylabel('Probability')
        ax1.set_title('Failure Count Distribution')
        ax1.legend()
        ax1.grid(axis='y', linestyle=':')

        sns.histplot(all_failure_times, bins=30, stat="density", alpha=0.7, color='blue',
                     label='Empirical Density', ax=ax2)
        ax2.axhline(y=1 / self.config.SAMPLE_HOURS, color='r', linestyle='--',
                    label='Theoretical Uniform Density')
        ax2.set_xlabel('Failure Time (in hours)')
        ax2.set_ylabel('Probability Density')
        ax2.set_title('Failure Time Distribution')

        if all_failure_times:
            u_values = np.array(all_failure_times) / self.config.SAMPLE_HOURS
            ks_stat, p_value = stats.kstest(u_values, 'uniform')
            result = "Fail to reject H0" if p_value >= 0.05 else "Reject H0"
            ks_text = f"KS Test (Uniformity):\nD={ks_stat:.4f}, p={p_value:.4f}\nResult at α=0.05: {result}"
            ax2.text(0.05, 0.95, ks_text, transform=ax2.transAxes, fontsize=9, verticalalignment='top',
                     bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))
        ax2.legend()
        plt.tight_layout(rect=[0, 0.03, 1, 0.95])

        filename = "validation_crane_failure.png"
        plt.savefig(filename, bbox_inches='tight', dpi=self.config.DPI)
        # 【FIX】If not plotting to screen, close the figure to save memory
        if not plot:
            plt.close(fig)

    def validate_bernoulli_process(self, events: np.ndarray, prob_dict: Dict[Tuple[int, int], float],
                                   event_name: str, plot: bool = False) -> None:
        daily_freq = np.mean(events, axis=0)
        dates = pd.date_range('2025-03-01', periods=self.config.SAMPLE_DAYS, freq='D')
        theoretical_probs = np.array([prob_dict.get((d.month, d.day), 0.0) for d in dates])
        mae = np.mean(np.abs(daily_freq - theoretical_probs))

        fig = plt.figure(figsize=(12, 6), dpi=self.config.DPI)
        plt.plot(dates, daily_freq, label=f'Empirical Daily Frequency', alpha=0.8, lw=2)
        plt.plot(dates, theoretical_probs, label='Theoretical Daily Probability', linestyle='--', color='red')
        plt.title(f'{event_name} Probability Validation')
        plt.xlabel('Date')
        plt.ylabel('Probability')
        plt.ylim(0, max(1.0, np.max(daily_freq) * 1.1))

        sig_days_count = 0
        for i, p_theory in enumerate(theoretical_probs):
            if p_theory > 0:
                k = np.sum(events[:, i])
                p_val = self.binom_test(k, events.shape[0], p_theory)
                if p_val < self.config.SIGNIFICANCE_LEVEL:
                    sig_days_count += 1

        info_text = (f"Mean Absolute Error (MAE): {mae:.4f}\n"
                     f"Days with significant deviation (p<0.05): {sig_days_count}")
        plt.text(0.02, 0.98, info_text, transform=plt.gca().transAxes, fontsize=10,
                 verticalalignment='top', bbox=dict(boxstyle='round,pad=0.3', fc='wheat', alpha=0.6))

        plt.legend()
        plt.xticks(rotation=45)
        plt.tight_layout()

        filename = f"validation_{event_name.lower().replace(' ', '_')}_events.png"
        plt.savefig(filename, bbox_inches='tight', dpi=self.config.DPI)
        # 【FIX】If not plotting to screen, close the figure to save memory
        if not plot:
            plt.close(fig)

    def validate_all(self, df: pd.DataFrame, plot: bool = False) -> None:
        """Runs all validation checks."""
        print("=== Univariate Distribution Validation ===")
        self.validate_univariate(df, plot)

        print("\n=== Multivariate Correlation Validation ===")
        self.validate_multivariate(df, plot)

        if 'CRN_FAL' in df.columns:
            print("\n=== Poisson Process Validation (Crane Failure) ===")
            self.validate_crane_failure_process(df['CRN_FAL'].tolist(), plot)

        if 'RAIN_HOLD' in df.columns:
            print("\n=== Bernoulli Process Validation (Daily Rain Hold) ===")
            rain_prob = LHSSampler.load_daily_prob('Precipitation_stat.csv')
            self.validate_bernoulli_process(np.vstack(df['RAIN_HOLD'].values), rain_prob, "Daily Rain Hold", plot)

        if 'WIND_HOLD' in df.columns:
            print("\n=== Bernoulli Process Validation (Daily Strong Wind Hold) ===")
            wind_prob = LHSSampler.load_daily_prob('strong_wind_stat.csv')
            self.validate_bernoulli_process(np.vstack(df['WIND_HOLD'].values), wind_prob,
                                            "Daily Strong Wind Hold", plot)

        # 【FIX】After all plots are generated in memory, show them all at once.
        if plot:
            print("\nDisplaying all generated plots. Close plot windows to exit the program.")
            plt.show()


def main():
    config = LHSConfig()
    parser = argparse.ArgumentParser(description='Enhanced LHS sampling and validation program')
    parser.add_argument('--plot', action='store_true', help='Display validation plots after saving them.')
    parser.add_argument('--sample-size', type=int, default=config.SAMPLE_SIZE,
                        help=f'Set the sample size (default: {config.SAMPLE_SIZE})')
    args = parser.parse_args()
    config.SAMPLE_SIZE = args.sample_size

    try:
        print(f"Starting LHS sampling with sample size: {args.sample_size}...")
        sampler = LHSSampler(config)
        df = sampler.sample(args.sample_size)
        print("\nFirst 5 rows of sampling results:")
        print(df.head())

        output_csv = "lhs_sampling_results.csv"
        df.to_csv(output_csv, index=False)
        print(f"\nSampling results saved to {output_csv}")

        print("\nStarting validation...")
        validator = LHSValidator(config)
        validator.validate_all(df, args.plot)
        print("\nValidation complete.")

    except Exception as e:
        print(f"\nAn error occurred: {e}")
        raise


if __name__ == '__main__':
    # Note: To run this script, you will need the following data files in the same directory:
    # - Precipitation_stat.csv
    # - strong_wind_stat.csv
    # These files should contain 'Month', 'Day', and 'Frequency' columns.
    main()