import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from sklearn.calibration import calibration_curve
from sklearn.inspection import permutation_importance
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score, roc_curve, precision_score, recall_score
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

# 读取女胎检测数据
df_female = pd.read_excel('附件.xlsx', sheet_name='女胎检测数据')

# 数据预处理：定义异常标签
df_female['abnormal'] = df_female['染色体的非整倍体'].notna().astype(int)

# 选择特征
features = ['13号染色体的Z值', '18号染色体的Z值', '21号染色体的Z值', 'X染色体的Z值',
            'GC含量', '原始读段数', '在参考基因组上比对的比例', '重复读段的比例', '唯一比对的读段数',
            '13号染色体的GC含量', '18号染色体的GC含量', '21号染色体的GC含量',
            '被过滤掉读段数的比例', '孕妇BMI', '年龄', '身高', '体重']
target = 'abnormal'

# 处理缺失值
for col in features:
    if df_female[col].dtype != 'object':
        df_female[col] = df_female[col].fillna(df_female[col].median())

# 分割数据集
X = df_female[features]
y = df_female[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

# 标准化特征
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)

# 处理数据不平衡 - 使用类别权重而不是SMOTE
class_weights = 'balanced'  # 自动调整类别权重

# 模型建立 - 使用L1正则化
model = LogisticRegression(penalty='l1', solver='liblinear', max_iter=1000, class_weight=class_weights)
Cs = np.logspace(-4, 4, 20)
scores = []
for C in Cs:
    model.set_params(C=C)
    cv_scores = cross_val_score(model, X_train_scaled, y_train, cv=5, scoring='roc_auc')
    scores.append(cv_scores.mean())

best_C = Cs[np.argmax(scores)]
model.set_params(C=best_C)
model.fit(X_train_scaled, y_train)

# 模型评估
y_pred = model.predict(X_test_scaled)
y_prob = model.predict_proba(X_test_scaled)[:, 1]
accuracy = accuracy_score(y_test, y_pred)
auc_score = roc_auc_score(y_test, y_prob)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)

print(f"最佳C: {best_C}")
print(f"准确率: {accuracy}")
print(f"AUC: {auc_score}")
print(f"精确率: {precision}")
print(f"召回率: {recall}")
print("混淆矩阵:\n", confusion_matrix(y_test, y_pred))

# 绘制ROC曲线
fpr, tpr, _ = roc_curve(y_test, y_prob)
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC曲线 (AUC = {auc_score:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('假阳性率')
plt.ylabel('真阳性率')
plt.title('接收者操作特征(ROC)曲线')
plt.legend(loc="lower right")
plt.savefig('接收者操作特征(ROC)曲线.png')
plt.close()

# 绘制校准曲线
prob_true, prob_pred = calibration_curve(y_test, y_prob, n_bins=10)
plt.figure(figsize=(8, 6))
plt.plot(prob_pred, prob_true, marker='o', linewidth=1, label='模型')
plt.plot([0, 1], [0, 1], linestyle='--', label='完美校准')
plt.xlabel('预测概率')
plt.ylabel('真实概率')
plt.title('校准曲线')
plt.legend()
plt.savefig('校准曲线.png')
plt.close()

# 特征重要性 - 置换重要性
perm_importance = permutation_importance(model, X_test_scaled, y_test, n_repeats=10, random_state=42)
sorted_idx = perm_importance.importances_mean.argsort()
plt.figure(figsize=(10, 8))
plt.barh(range(len(sorted_idx)), perm_importance.importances_mean[sorted_idx])
plt.yticks(range(len(sorted_idx)), np.array(features)[sorted_idx])
plt.xlabel("置换重要性")
plt.title("特征重要性分析")
plt.tight_layout()
plt.savefig('特征重要性分析.png')
plt.close()


# SIMEX校正函数
def simex_correction(model, X, y, noise_levels=[0.5, 1.0, 1.5, 2.0], n_boot=50):
    coefs_by_noise = []

    for sigma in noise_levels:
        coefs = []
        for _ in range(n_boot):
            # 添加测量误差
            X_noisy = X + np.random.normal(0, sigma, X.shape)
            # 拟合模型
            model.fit(X_noisy, y)
            coefs.append(model.coef_[0])

        # 计算平均系数
        avg_coefs = np.mean(coefs, axis=0)
        coefs_by_noise.append(avg_coefs)

    # 外推至无噪声情况 (二次多项式外推)
    final_coefs = []
    for i in range(X.shape[1]):
        coef_vals = [coefs_by_noise[j][i] for j in range(len(noise_levels))]
        # 二次多项式拟合
        z = np.polyfit(noise_levels, coef_vals, 2)
        p = np.poly1d(z)
        # 外推至sigma=0
        final_coefs.append(p(0))

    return np.array(final_coefs)


# 应用SIMEX校正
print("正在进行SIMEX校正...")
simex_coefs = simex_correction(model, X_train_scaled, y_train)
print("SIMEX校正完成")


# Bootstrap置信区间计算
def bootstrap_auc(model, X, y, n_boot=1000):
    auc_scores = []
    n_samples = len(y)

    for _ in range(n_boot):
        # Bootstrap抽样
        indices = np.random.choice(range(n_samples), n_samples, replace=True)
        X_boot = X[indices]
        y_boot = y.iloc[indices]

        # 预测概率
        if hasattr(model, 'predict_proba'):
            y_prob_boot = model.predict_proba(X_boot)[:, 1]
        else:
            y_prob_boot = model.decision_function(X_boot)

        # 计算AUC
        auc_scores.append(roc_auc_score(y_boot, y_prob_boot))

    # 计算置信区间
    ci_lower = np.percentile(auc_scores, 2.5)
    ci_upper = np.percentile(auc_scores, 97.5)

    return auc_scores, (ci_lower, ci_upper)


# 计算Bootstrap置信区间
print("正在计算Bootstrap置信区间...")
auc_scores, auc_ci = bootstrap_auc(model, X_test_scaled, y_test)
print(f"AUC的95%"
      f": ({auc_ci[0]:.3f}, {auc_ci[1]:.3f})")

# 绘制Bootstrap AUC分布
plt.figure(figsize=(8, 6))
plt.hist(auc_scores, bins=30, alpha=0.7)
plt.axvline(np.mean(auc_scores), color='r', linestyle='--', label=f'均值 = {np.mean(auc_scores):.3f}')
plt.axvline(auc_ci[0], color='g', linestyle='--', label=f'95% CI下限 = {auc_ci[0]:.3f}')
plt.axvline(auc_ci[1], color='g', linestyle='--', label=f'95% CI上限 = {auc_ci[1]:.3f}')
plt.xlabel('AUC值')
plt.ylabel('频数')
plt.title('Bootstrap AUC分布')
plt.legend()
plt.savefig('Bootstrap AUC分布.png')
plt.close()

# 使用statsmodels获取详细统计信息
X_train_sm = sm.add_constant(pd.DataFrame(X_train_scaled, columns=features))
y_train_sm = y_train.reset_index(drop=True)  # 确保索引对齐
logit_model = sm.Logit(y_train_sm, X_train_sm)
logit_result = logit_model.fit_regularized(method='l1', alpha=1 / best_C)
print(logit_result.summary())

# 保存重要结果到文件
results = {
    '最佳C值': best_C,
    '准确率': accuracy,
    'AUC': auc_score,
    '精确率': precision,
    '召回率': recall,
    'AUC置信区间': auc_ci,
    '特征重要性': dict(zip(features, perm_importance.importances_mean)),
    'SIMEX校正系数': dict(zip(['截距'] + features, simex_coefs))
}

# 创建格式化的结果字符串
result_str = f"""
{'='*50}
模型评估结果
{'='*50}

1. 最佳参数
   - 最佳C值: {results['最佳C值']:.4f}

2. 性能指标
   - 准确率: {results['准确率']:.4f}
   - AUC值: {results['AUC']:.4f} (95%置信区间: {results['AUC置信区间'][0]:.4f}-{results['AUC置信区间'][1]:.4f})
   - 精确率: {results['精确率']:.4f}
   - 召回率: {results['召回率']:.4f}

3. 特征重要性
"""

# 添加特征重要性
for feature, importance in results['特征重要性'].items():
    result_str += f"   - {feature}: {importance:.4f}\n"

result_str += "\n4. SIMEX校正系数\n"

# 添加SIMEX校正系数
for coef_name, coef_value in results['SIMEX校正系数'].items():
    result_str += f"   - {coef_name}: {coef_value:.4f}\n"

# 输出到控制台
print(result_str)


