import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from xgboost import XGBClassifier, plot_importance, cv, DMatrix
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import roc_auc_score, roc_curve, classification_report, confusion_matrix
from sklearn.feature_selection import SelectKBest, f_classif, VarianceThreshold
from imblearn.over_sampling import SMOTE
import seaborn as sns
import xgboost as xgb
from matplotlib.font_manager import FontProperties

# ======================
# 1. 系统配置与字体设置
# ======================
def get_available_chinese_font():
    candidate_fonts = ['SimHei', 'Microsoft YaHei', 'Heiti TC', 'KaiTi', 'sans-serif']
    for font in candidate_fonts:
        try:
            fp = FontProperties(fname=plt.matplotlib.get_data_path() + f'/fonts/ttf/{font}.ttf')
            if fp.get_family():
                return font
        except FileNotFoundError:
            continue
    return 'sans-serif'

plt.rcParams["font.family"] = get_available_chinese_font()
plt.rcParams["axes.unicode_minus"] = False
plt.rcParams["figure.dpi"] = 100

# ======================
# 2. 数据加载与基础探索
# ======================
try:
    df_Master = pd.read_csv('F:\\数据\\df_Master.csv', encoding='gb18030')
    print(f"[1/7] 数据加载成功，形状: {df_Master.shape}")
except Exception as e:
    print(f"数据加载失败: {e}")
    exit(1)

print("\n[2/7] 数据探索")
print("目标变量分布:")
print(df_Master['target'].value_counts(normalize=True).to_string())

# ======================
# 3. 数据清洗与预处理
# ======================
print("\n[3/7] 数据预处理 - 独热编码")
X = df_Master.drop(['target', 'sample_status'], axis=1)
y = df_Master['target']
X_encoded = pd.get_dummies(X, drop_first=True)
print(f"独热编码后特征数量: {X_encoded.shape[1]}")

print("\n[3/7] 数据预处理 - 缺失值处理")
data_combined = pd.concat([X_encoded, y], axis=1)
data_clean = data_combined.dropna()
X_clean = data_clean.drop('target', axis=1)
y_clean = data_clean['target']
print(f"处理后数据集形状: {X_clean.shape}（删除了 {len(data_combined) - len(data_clean)} 条缺失值样本）")

# ======================
# 4. 特征工程 - 筛选与降维
# ======================
print("\n[4/7] 特征工程 - 特征筛选")

# 方差过滤
selector_variance = VarianceThreshold(threshold=0.01)
X_variance = selector_variance.fit_transform(X_clean)
filtered_features = X_clean.columns[selector_variance.get_support()]
print(f"方差过滤后特征数量: {X_variance.shape[1]}（移除了 {len(X_clean.columns) - X_variance.shape[1]} 个低方差特征）")

# ANOVA选择Top200
selector_kbest = SelectKBest(score_func=f_classif, k=min(200, X_variance.shape[1]))
X_selected = selector_kbest.fit_transform(X_variance, y_clean)
selected_features = filtered_features[selector_kbest.get_support()]  # 最终选中的特征名称
print(f"最终筛选后特征数量: {X_selected.shape[1]}（基于ANOVA选择Top{min(200, X_variance.shape[1])}）")

# ======================
# 5. 数据集划分与样本平衡
# ======================
print("\n[5/7] 数据集划分与样本平衡")

X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(
    X_selected, y_clean, X_clean.index,
    test_size=0.2,
    random_state=0,
    stratify=y_clean,
    shuffle=True
)
print(f"训练集形状: {X_train.shape}, 测试集形状: {X_test.shape}")
print("训练集正负样本比例:", y_train.value_counts(normalize=True).to_string())

# SMOTE平衡
try:
    smote = SMOTE(
        random_state=0,
        sampling_strategy=0.25,
        k_neighbors=5,
        n_jobs=-1
    )
except TypeError:
    smote = SMOTE(
        random_state=0,
        ratio=0.25,
        k_neighbors=9
    )

X_train_resampled, y_train_resampled = smote.fit_resample(X_train, y_train)
print(f"平衡后训练集样本分布: {dict(y_train_resampled.value_counts())}")

# ======================
# 6. 模型训练 - 交叉验证与参数优化
# ======================
print("\n[6/7] 模型训练 - 9折交叉验证")

xgb_params = {
    'objective': 'binary:logistic',
    'eval_metric': 'auc',
    'booster': 'gbtree',
    'learning_rate': 0.05,
    'max_depth': 4,
    'min_child_weight': 3,
    'gamma': 0.2,
    'subsample': 0.8,
    'colsample_bytree': 0.8,
    'reg_alpha': 0.1,
    'reg_lambda': 0.1,
    'random_state': 0,
    'nthread': -1,
    'verbosity': 0,
}

# 创建DMatrix时指定特征名称
dtrain = DMatrix(X_train_resampled, label=y_train_resampled, feature_names=selected_features.tolist())
dtest = DMatrix(X_test, label=y_test, feature_names=selected_features.tolist())

cv_results = cv(
    params=xgb_params,
    dtrain=dtrain,
    num_boost_round=1000,
    nfold=5,
    stratified=True,
    early_stopping_rounds=20,
    verbose_eval=10,
    seed=0
)

best_ntree = len(cv_results['test-auc-mean'])
print(f"交叉验证最佳迭代次数: {best_ntree}（验证集AUC均值: {cv_results['test-auc-mean'].iloc[-1]:.4f} ± {cv_results['test-auc-std'].iloc[-1]:.4f}）")

# ======================
# 7. 模型训练与评估
# ======================
print("\n[7/7] 模型训练 - 最终模型")

evals_result = {}
model = xgb.train(
    params=xgb_params,
    dtrain=dtrain,
    num_boost_round=best_ntree,
    evals=[(dtrain, 'train'), (dtest, 'test')],
    evals_result=evals_result,
    early_stopping_rounds=20,
    verbose_eval=10
)

# ======================
# 8. 模型评估与可视化
# ======================
print("\n模型评估 - 概率预测")
y_train_probs = model.predict(dtrain)
y_test_probs = model.predict(dtest)

fpr_train, tpr_train, _ = roc_curve(y_train_resampled, y_train_probs)
fpr_test, tpr_test, _ = roc_curve(y_test, y_test_probs)

train_auc = roc_auc_score(y_train_resampled, y_train_probs)
test_auc = roc_auc_score(y_test, y_test_probs)
print(f"训练集AUC: {train_auc:.4f}, 测试集AUC: {test_auc:.4f}")

print("\n模型评估 - 分类报告")
print(classification_report(y_test, (y_test_probs > 0.3).astype(int), digits=4, zero_division=0))

cm = confusion_matrix(y_test, (y_test_probs > 0.3).astype(int))

plt.figure(figsize=(18, 12))

plt.subplot(2, 2, 1)
plt.plot(fpr_train, tpr_train, 'b--', label=f'训练集 (AUC={train_auc:.4f})')
plt.plot(fpr_test, tpr_test, 'r-', label=f'测试集 (AUC={test_auc:.4f})')
plt.plot([0, 1], [0, 1], 'k--', alpha=0.5)
plt.xlabel('假正率 (FPR)')
plt.ylabel('真正率 (TPR)')
plt.title('ROC曲线')
plt.legend()
plt.grid(True)

plt.subplot(2, 2, 2)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False)
plt.xlabel('预测类别')
plt.ylabel('实际类别')
plt.title('混淆矩阵')

plt.subplot(2, 1, 2)
plot_importance(
    model,
    max_num_features=20,
    height=0.6,
    title='特征重要性排名',
    xlabel='重要性得分',
    ylabel='特征名称'
)
plt.tight_layout()
plt.show()

# ======================
# 9. 结果保存
# ======================
print("\n结果保存")

# 获取模型训练时使用的特征名称
feature_names = model.feature_names
importance_scores = [model.get_fscore().get(fname, 0) for fname in feature_names]  # 确保顺序一致

assert len(feature_names) == len(importance_scores), "特征名称与重要性得分数量不一致"

feature_importance = pd.DataFrame({
    '特征名称': feature_names,
    '重要性得分': importance_scores
}).sort_values('重要性得分', ascending=False)

feature_importance.to_csv('F:\\数据\\feature_importance.csv', index=False)
print("特征重要性已保存至 F:\\数据\\feature_importance.csv")

test_result = pd.DataFrame({
    '样本ID': df_Master.index[idx_test],
    '真实标签': y_test,
    '预测概率': y_test_probs,
    '预测标签': (y_test_probs > 0.3).astype(int)
})
test_result.to_csv('F:\\数据\\model_predictions.csv', index=False)
print("预测结果已保存至 F:\\数据\\model_predictions.csv")

# ======================
# 10. 模型总结
# ======================
print("\n模型训练完成，关键指标：")
print(f"- 测试集AUC: {test_auc:.4f}")
print(f"- 筛选后特征数量: {len(selected_features)}")
print(f"- 平衡后训练集大小: {len(y_train_resampled)}")


print("\n===== 交叉验证结果 =====")
print(f"交叉验证最佳迭代次数: {best_ntree}")
print(f"验证集AUC（最佳迭代）: {cv_results['test-auc-mean'].iloc[-1]:.4f}")
print(f"验证集标准差: {cv_results['test-auc-std'].iloc[-1]:.4f}")


# 模型训练完成，关键指标：
# - 测试集AUC: 0.7607
# - 筛选后特征数量: 200
# - 平衡后训练集大小: 278

# ===== 交叉验证结果 =====
# 交叉验证最佳迭代次数: 67
# 验证集AUC（最佳迭代）: 0.9198
# 验证集标准差: 0.0512


#预处理：独热编码 + 缺失值处理
#特征选择：方差过滤 + ANOVA Top200
#样本处理：SMOTE过采样  解决不平衡问题
#模型训练：XGBoost + 5折交叉验证
#模型评估： AUC/ROC/混淆矩阵/分类报告
#结果输出：特征重要性 + 预测结果

