import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use(backend="TkAgg")

# 设置参数
alpha = 0.12  # 显著性水平
n = 30        # 样本大小
effect = 1    # 效应大小（备择假设均值）
sigma = 1     # 总体标准差（假设已知）

print("=== 参数设置 ===")
print(f"α = {alpha}")
print(f"样本大小 n = {n}")
print(f"效应大小 = {effect}")
print(f"总体标准差 σ = {sigma}")
print()

# 公式1: z_critical = InverseNormal(0, 1, 1 - alpha)
print("=== 计算临界值 z_critical ===")
print("公式: z_critical = stats.norm.ppf(1 - alpha, 0, 1)")

# 计算标准正态分布的临界值
z_critical = stats.norm.ppf(1 - alpha, 0, 1)
print(f"z_critical = {z_critical:.4f}")

# 验证: P(Z ≤ z_critical) 应该等于 1 - alpha
prob_left = stats.norm.cdf(z_critical, 0, 1)
print(f"验证: P(Z ≤ {z_critical:.4f}) = {prob_left:.4f} (应该等于 {1-alpha:.4f})")

# 拒绝域概率应该是 alpha
prob_right = 1 - prob_left
print(f"拒绝域概率: P(Z > {z_critical:.4f}) = {prob_right:.4f} (等于 α)")
print()

# 公式2: power = 1 - Normal(effect, 1/sqrt(n), z_critical)
print("=== 计算检验效能 power ===")
print("公式: power = 1 - stats.norm.cdf(z_critical, effect, sigma/np.sqrt(n))")

# 计算备择假设分布下，检验统计量小于临界值的概率（即第二类错误β）
# 在备择假设下，检验统计量的分布是 N(effect, sigma/√n)
beta = stats.norm.cdf(z_critical, effect, sigma/np.sqrt(n))
power = 1 - beta

print(f"备择假设分布: N({effect}, {sigma/np.sqrt(n):.4f})")
print(f"在备择假设下，P(检验统计量 ≤ {z_critical:.4f}) = {beta:.4f} (这是第二类错误β)")
print(f"检验效能 power = 1 - {beta:.4f} = {power:.4f}")
print()

# 可视化解释
print("=== 可视化解释 ===")

# 创建图形
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))

# 图1: 标准正态分布和临界值
x = np.linspace(-3, 3, 1000)
y_null = stats.norm.pdf(x, 0, 1)

ax1.plot(x, y_null, 'b-', linewidth=2, label='零假设分布 N(0,1)')
ax1.fill_between(x, y_null, where=(x > z_critical), alpha=0.3, color='red',
                 label=f'拒绝域 (α={alpha:.3f})')
ax1.axvline(z_critical, color='red', linestyle='--',
            label=f'临界值 z={z_critical:.3f}')
ax1.set_title('零假设分布和拒绝域')
ax1.set_xlabel('z值')
ax1.set_ylabel('概率密度')
ax1.legend()
ax1.grid(True, alpha=0.3)

# 图2: 两个分布对比和效能计算
x2 = np.linspace(-1, 3, 1000)
se = sigma / np.sqrt(n)  # 标准误

y_null2 = stats.norm.pdf(x2, 0, se)
y_alt = stats.norm.pdf(x2, effect, se)

ax2.plot(x2, y_null2, 'b-', linewidth=2, label='零假设分布 N(0,SE)')
ax2.plot(x2, y_alt, 'r-', linewidth=2, label=f'备择假设分布 N({effect},SE)')
ax2.axvline(z_critical * se, color='red', linestyle='--',
            label=f'临界值 x̄={z_critical*se:.3f}')

# 填充第二类错误区域（β）
x_beta = np.linspace(-1, z_critical * se, 500)
y_beta = stats.norm.pdf(x_beta, effect, se)
ax2.fill_between(x_beta, y_beta, alpha=0.3, color='orange',
                 label=f'第二类错误区域 (β={beta:.3f})')

# 填充检验效能区域（power）
x_power = np.linspace(z_critical * se, 3, 500)
y_power = stats.norm.pdf(x_power, effect, se)
ax2.fill_between(x_power, y_power, alpha=0.3, color='green',
                 label=f'检验效能 (power={power:.3f})')

ax2.set_title('零假设 vs 备择假设分布')
ax2.set_xlabel('样本均值 x̄')
ax2.set_ylabel('概率密度')
ax2.legend()
ax2.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# 敏感性分析：不同参数对效能的影响
print("=== 参数对检验效能的影响 ===")

# 1. 样本大小对效能的影响
sample_sizes = [10, 30, 50, 100]
print("\n1. 样本大小对效能的影响 (α=0.12, effect=1):")
for n_test in sample_sizes:
    se_test = sigma / np.sqrt(n_test)
    z_crit_test = stats.norm.ppf(1 - alpha, 0, 1)
    beta_test = stats.norm.cdf(z_crit_test * se_test, effect, se_test)
    power_test = 1 - beta_test
    print(f"   n={n_test}: power={power_test:.4f}")

# 2. 效应大小对效能的影响
effects = [0.5, 1.0, 1.5, 2.0]
print("\n2. 效应大小对效能的影响 (α=0.12, n=30):")
for eff in effects:
    se_test = sigma / np.sqrt(n)
    z_crit_test = stats.norm.ppf(1 - alpha, 0, 1)
    beta_test = stats.norm.cdf(z_crit_test * se_test, eff, se_test)
    power_test = 1 - beta_test
    print(f"   effect={eff}: power={power_test:.4f}")

# 3. α水平对效能的影响
alphas = [0.01, 0.05, 0.10, 0.20]
print("\n3. α水平对效能的影响 (effect=1, n=30):")
for a in alphas:
    z_crit_test = stats.norm.ppf(1 - a, 0, 1)
    se_test = sigma / np.sqrt(n)
    beta_test = stats.norm.cdf(z_crit_test * se_test, effect, se_test)
    power_test = 1 - beta_test
    print(f"   α={a}: power={power_test:.4f}")