import numpy as np
from scipy import stats
from scipy.stats import qmc
import matplotlib.pyplot as plt
import seaborn as sns

# --- 1. 定义与您程序中完全相同的目标分布 ---
# Component Delivery Delay Distribution
mu, sigma, lower, upper = -4.0, 2.0, -10.0, 10.0
a_trunc, b_trunc = (lower - mu) / sigma, (upper - mu) / sigma
target_dist = stats.truncnorm(a_trunc, b_trunc, loc=mu, scale=sigma)

SAMPLE_SIZE = 1000
np.random.seed(42)

# --- 2. 普通蒙特卡洛随机抽样 (Standard Random Sampling) ---
random_samples = target_dist.rvs(size=SAMPLE_SIZE)
ks_stat_random, p_value_random = stats.kstest(random_samples, target_dist.cdf)

# --- 3. 拉丁超立方抽样 (LHS) - 与您程序逻辑相同 ---
sampler = qmc.LatinHypercube(d=1, seed=42)
u_lhs = sampler.random(n=SAMPLE_SIZE)
lhs_samples = target_dist.ppf(u_lhs.flatten()) # 使用PPF进行转换
ks_stat_lhs, p_value_lhs = stats.kstest(lhs_samples, target_dist.cdf)


# --- 4. 可视化对比 ---
print(f"--- Standard Random Sampling ---")
print(f"KS Statistic (D): {ks_stat_random:.6f}")
print(f"P-value: {p_value_random:.6f}\n") # p值会比较正常，例如 > 0.05

print(f"--- Latin Hypercube Sampling (LHS) ---")
print(f"KS Statistic (D): {ks_stat_lhs:.6f}") # D值会极小
print(f"P-value: {p_value_lhs:.6f}\n") # p值会非常接近1.0

fig, axes = plt.subplots(2, 2, figsize=(16, 10), dpi=120)
fig.suptitle("LHS vs. Standard Random Sampling Comparison", fontsize=16)

# 绘制直方图 (PDF对比)
sns.histplot(random_samples, bins=40, stat='density', ax=axes[0, 0], color='skyblue', label='Empirical')
sns.histplot(lhs_samples, bins=40, stat='density', ax=axes[0, 1], color='salmon', label='Empirical')

x_vals = np.linspace(-10, 10, 400)
for ax in axes[0, :]:
    ax.plot(x_vals, target_dist.pdf(x_vals), 'r-', lw=2, label='Theoretical PDF')
    ax.legend()
axes[0, 0].set_title(f"Random Sampling Histogram\np-value = {p_value_random:.4f}")
axes[0, 1].set_title(f"LHS Histogram\np-value = {p_value_lhs:.4f}")

# 绘制ECDF与理论CDF的对比 (这才是K-S检验的核心)
def plot_cdf_comparison(sample, ax, title):
    ecdf_x = np.sort(sample)
    ecdf_y = np.arange(1, len(sample) + 1) / len(sample)
    ax.plot(ecdf_x, ecdf_y, label='Empirical CDF (ECDF)', color='blue')
    ax.plot(x_vals, target_dist.cdf(x_vals), label='Theoretical CDF', color='red', linestyle='--')
    ax.set_title(title)
    ax.legend()
    ax.grid(alpha=0.3)

plot_cdf_comparison(random_samples, axes[1, 0], f"Random Sampling CDF vs. Theoretical\nMax Distance (D) = {ks_stat_random:.4f}")
plot_cdf_comparison(lhs_samples, axes[1, 1], f"LHS CDF vs. Theoretical\nMax Distance (D) = {ks_stat_lhs:.4f}")

plt.tight_layout(rect=[0, 0, 1, 0.96])
plt.show()