import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, roc_curve, \
    confusion_matrix
import matplotlib.pyplot as plt
import joblib
# 添加XGBoost和LightGBM
import xgboost as xgb
import lightgbm as lgb
import numpy as np

# 读取新数据
df = pd.read_csv('数据/new.csv')  
print(df)
# 数据预处理
# 将分类变量转换为数值
df['Gender'] = df['Gender'].replace({'Male': 1, 'Female': 0})
df['Polyuria'] = df['Polyuria'].replace({'Yes': 1, 'No': 0})
df['Polydipsia'] = df['Polydipsia'].replace({'Yes': 1, 'No': 0})
df['sudden weight loss'] = df['sudden weight loss'].replace({'Yes': 1, 'No': 0})
df['weakness'] = df['weakness'].replace({'Yes': 1, 'No': 0})
df['Polyphagia'] = df['Polyphagia'].replace({'Yes': 1, 'No': 0})
df['Genital thrush'] = df['Genital thrush'].replace({'Yes': 1, 'No': 0})
df['visual blurring'] = df['visual blurring'].replace({'Yes': 1, 'No': 0})
df['Itching'] = df['Itching'].replace({'Yes': 1, 'No': 0})
df['Irritability'] = df['Irritability'].replace({'Yes': 1, 'No': 0})
df['delayed healing'] = df['delayed healing'].replace({'Yes': 1, 'No': 0})
df['partial paresis'] = df['partial paresis'].replace({'Yes': 1, 'No': 0})
df['muscle stiffness'] = df['muscle stiffness'].replace({'Yes': 1, 'No': 0})
df['Alopecia'] = df['Alopecia'].replace({'Yes': 1, 'No': 0})
df['Obesity'] = df['Obesity'].replace({'Yes': 1, 'No': 0})
df['class'] = df['class'].replace({'Positive': 1, 'Negative': 0})

# 确保所有列都是数值类型
df['Age'] = df['Age'].astype(int)
df['Gender'] = df['Gender'].astype(int)
categorical_columns = ['Polyuria', 'Polydipsia', 'sudden weight loss', 'weakness', 'Polyphagia', 
                       'Genital thrush', 'visual blurring', 'Itching', 'Irritability', 
                       'delayed healing', 'partial paresis', 'muscle stiffness', 'Alopecia', 'Obesity', 'class']
df[categorical_columns] = df[categorical_columns].astype(int)

# 准备数据
X = df.drop(['class'], axis=1)
y = df['class']

# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建保存结果的文件夹
import os
results_dir = "模型优化结果"
if not os.path.exists(results_dir):
    os.makedirs(results_dir)

# ===== 决策树模型参数优化 =====
print("\n开始决策树参数优化...")
dt_param_grid = {
    'max_depth': [3, 5, 7, 10, None],
    'min_samples_split': [2, 5, 10],
    'min_samples_leaf': [1, 2, 4],
    'criterion': ['gini', 'entropy']
}

dt_grid_search = GridSearchCV(
    estimator=DecisionTreeClassifier(random_state=42),
    param_grid=dt_param_grid,
    cv=5,
    scoring='roc_auc',
    n_jobs=-1,
    verbose=1
)

dt_grid_search.fit(X_train, y_train)
print("决策树最佳参数:", dt_grid_search.best_params_)
print("决策树最佳ROC AUC得分:", dt_grid_search.best_score_)

# 使用最佳参数创建决策树模型
dt_model = DecisionTreeClassifier(random_state=42, **dt_grid_search.best_params_)

# ===== SVM模型参数优化 =====
print("\n开始SVM参数优化...")
svm_param_grid = {
    'C': [0.1, 1, 10, 100],
    'gamma': ['scale', 'auto', 0.1, 0.01],
    'kernel': ['rbf', 'linear']
}

svm_grid_search = GridSearchCV(
    estimator=SVC(probability=True, random_state=42),
    param_grid=svm_param_grid,
    cv=5,
    scoring='roc_auc',
    n_jobs=-1,
    verbose=1
)

svm_grid_search.fit(X_train, y_train)
print("SVM最佳参数:", svm_grid_search.best_params_)
print("SVM最佳ROC AUC得分:", svm_grid_search.best_score_)

# 使用最佳参数创建SVM模型
svm_model = SVC(probability=True, random_state=42, **svm_grid_search.best_params_)

# ===== XGBoost模型参数优化 =====
print("\n开始XGBoost参数优化...")
xgb_param_grid = {
    'n_estimators': [100],
    'learning_rate': [0.05, 0.1],
    'max_depth': [3, 5],
    'min_child_weight': [1],
    'subsample': [0.8],
    'colsample_bytree': [0.8]
}

xgb_grid_search = GridSearchCV(
    estimator=xgb.XGBClassifier(random_state=42, use_label_encoder=False, eval_metric='logloss'),
    param_grid=xgb_param_grid,
    cv=5,
    scoring='roc_auc',
    n_jobs=-1,
    verbose=1
)

xgb_grid_search.fit(X_train, y_train)
print("XGBoost最佳参数:", xgb_grid_search.best_params_)
print("XGBoost最佳ROC AUC得分:", xgb_grid_search.best_score_)

# 使用最佳参数创建XGBoost模型
xgb_model = xgb.XGBClassifier(random_state=42, use_label_encoder=False, eval_metric='logloss', **xgb_grid_search.best_params_)

# ===== LightGBM模型参数优化 =====
# 使用GridSearchCV进行参数网格搜索
# 定义参数网格
print("\n开始LightGBM参数优化...")
lgb_param_grid = {
    'n_estimators': [100],
    'learning_rate': [0.05, 0.1],
    'max_depth': [3, 5],
    'num_leaves': [31],
    'min_child_samples': [10],
    'subsample': [0.8],
    'colsample_bytree': [0.8]
}

# 创建基础LightGBM模型
lgb_grid_search = GridSearchCV(
    estimator=lgb.LGBMClassifier(random_state=42),
    param_grid=lgb_param_grid,
    cv=5,
    scoring='roc_auc',
    n_jobs=-1,
    verbose=1
)

# 执行网格搜索
lgb_grid_search.fit(X_train, y_train)

# 输出最佳参数
print("LightGBM最佳参数:", lgb_grid_search.best_params_)
print("LightGBM最佳ROC AUC得分:", lgb_grid_search.best_score_)

# 使用最佳参数创建LightGBM模型
lgb_model = lgb.LGBMClassifier(random_state=42, **lgb_grid_search.best_params_)

# 训练模型
dt_model.fit(X_train, y_train)
svm_model.fit(X_train, y_train)
xgb_model.fit(X_train, y_train)
lgb_model.fit(X_train, y_train)

# 保存各个模型
joblib.dump(dt_model, '数据/dt_model.joblib')
joblib.dump(svm_model, '数据/svm_model.joblib')
joblib.dump(xgb_model, '数据/xgb_model.joblib')
joblib.dump(lgb_model, '数据/lgb_model.joblib')

# 评估单一预测模型
def evaluate_model(model, X_test, y_test):
    y_pred = model.predict(X_test)
    y_proba = model.predict_proba(X_test)[:, 1]

    accuracy = accuracy_score(y_test, y_pred)
    precision = precision_score(y_test, y_pred)
    recall = recall_score(y_test, y_pred)
    f1 = f1_score(y_test, y_pred)
    roc_auc = roc_auc_score(y_test, y_proba)
    confusion_mat = confusion_matrix(y_test, y_pred)

    fpr, tpr, _ = roc_curve(y_test, y_proba)

    return accuracy, precision, recall, f1, roc_auc, confusion_mat, fpr, tpr

# 评估模型
dt_accuracy, dt_precision, dt_recall, dt_f1, dt_roc_auc, dt_confusion_mat, dt_fpr, dt_tpr = evaluate_model(dt_model, X_test, y_test)
svm_accuracy, svm_precision, svm_recall, svm_f1, svm_roc_auc, svm_confusion_mat, svm_fpr, svm_tpr = evaluate_model(svm_model, X_test, y_test)
xgb_accuracy, xgb_precision, xgb_recall, xgb_f1, xgb_roc_auc, xgb_confusion_mat, xgb_fpr, xgb_tpr = evaluate_model(xgb_model, X_test, y_test)
lgb_accuracy, lgb_precision, lgb_recall, lgb_f1, lgb_roc_auc, lgb_confusion_mat, lgb_fpr, lgb_tpr = evaluate_model(lgb_model, X_test, y_test)

# ===== 特征重要性分析 =====
# 获取各模型的特征重要性（如果支持）
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']  # 设置中文显示
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题

# 决策树特征重要性
dt_feature_importance = dt_model.feature_importances_
dt_feature_importance_df = pd.DataFrame({
    'Feature': X.columns,
    'Importance': dt_feature_importance
}).sort_values(by='Importance', ascending=False)

plt.figure(figsize=(10, 6))
plt.barh(dt_feature_importance_df['Feature'], dt_feature_importance_df['Importance'])
plt.xlabel('重要性')
plt.ylabel('特征')
plt.title('决策树特征重要性')
plt.tight_layout()
plt.savefig(os.path.join(results_dir, '决策树特征重要性.png'))

# XGBoost特征重要性
xgb_feature_importance = xgb_model.feature_importances_
xgb_feature_importance_df = pd.DataFrame({
    'Feature': X.columns,
    'Importance': xgb_feature_importance
}).sort_values(by='Importance', ascending=False)

plt.figure(figsize=(10, 6))
plt.barh(xgb_feature_importance_df['Feature'], xgb_feature_importance_df['Importance'])
plt.xlabel('重要性')
plt.ylabel('特征')
plt.title('XGBoost特征重要性')
plt.tight_layout()
plt.savefig(os.path.join(results_dir, 'XGBoost特征重要性.png'))

# LightGBM特征重要性
lgb_feature_importance = lgb_model.feature_importances_
lgb_feature_importance_df = pd.DataFrame({
    'Feature': X.columns,
    'Importance': lgb_feature_importance
}).sort_values(by='Importance', ascending=False)

plt.figure(figsize=(10, 6))
plt.barh(lgb_feature_importance_df['Feature'], lgb_feature_importance_df['Importance'])
plt.xlabel('重要性')
plt.ylabel('特征')
plt.title('LightGBM特征重要性')
plt.tight_layout()
plt.savefig(os.path.join(results_dir, 'LightGBM特征重要性.png'))

# 保存特征重要性数据到CSV
dt_feature_importance_df.to_csv(os.path.join(results_dir, '决策树特征重要性.csv'), index=False)
xgb_feature_importance_df.to_csv(os.path.join(results_dir, 'XGBoost特征重要性.csv'), index=False)
lgb_feature_importance_df.to_csv(os.path.join(results_dir, 'LightGBM特征重要性.csv'), index=False)

# ===== 模型性能对比可视化 =====
# 准确率对比
models = ['决策树', 'SVM', 'XGBoost', 'LightGBM']
accuracy_scores = [dt_accuracy, svm_accuracy, xgb_accuracy, lgb_accuracy]
precision_scores = [dt_precision, svm_precision, xgb_precision, lgb_precision]
recall_scores = [dt_recall, svm_recall, xgb_recall, lgb_recall]
f1_scores = [dt_f1, svm_f1, xgb_f1, lgb_f1]
roc_auc_scores = [dt_roc_auc, svm_roc_auc, xgb_roc_auc, lgb_roc_auc]

# 创建性能指标对比图
plt.figure(figsize=(12, 8))
x = np.arange(len(models))
width = 0.15

plt.bar(x - 2*width, accuracy_scores, width, label='准确率')
plt.bar(x - width, precision_scores, width, label='精确度')
plt.bar(x, recall_scores, width, label='召回率')
plt.bar(x + width, f1_scores, width, label='F1值')
plt.bar(x + 2*width, roc_auc_scores, width, label='ROC AUC')

plt.xlabel('模型')
plt.ylabel('得分')
plt.title('模型性能对比')
plt.xticks(x, models)
plt.legend()
plt.ylim(0, 1.1)
plt.tight_layout()
plt.savefig(os.path.join(results_dir, '模型性能对比.png'))

# 绘制 ROC 曲线对比
plt.figure(figsize=(10, 7))
plt.plot(dt_fpr, dt_tpr, label='决策树 (AUC = %0.2f)' % dt_roc_auc)
plt.plot(svm_fpr, svm_tpr, label='支持向量机 (AUC = %0.2f)' % svm_roc_auc)
plt.plot(xgb_fpr, xgb_tpr, label='XGBoost (AUC = %0.2f)' % xgb_roc_auc)
plt.plot(lgb_fpr, lgb_tpr, label='LightGBM (AUC = %0.2f)' % lgb_roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('假正例率')
plt.ylabel('真正例率')
plt.title('ROC曲线对比')
plt.legend(loc="lower right")
plt.savefig(os.path.join(results_dir, 'ROC曲线对比.png'))

# 保存模型评估结果到CSV
results_df = pd.DataFrame({
    '模型': models,
    '准确率': accuracy_scores,
    '精确度': precision_scores,
    '召回率': recall_scores,
    'F1值': f1_scores,
    'ROC AUC': roc_auc_scores
})
results_df.to_csv(os.path.join(results_dir, '模型评估结果.csv'), index=False)

# 保存混淆矩阵可视化
def plot_confusion_matrix(cm, title, filename):
    plt.figure(figsize=(8, 6))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(2)
    plt.xticks(tick_marks, ['负例', '正例'])
    plt.yticks(tick_marks, ['负例', '正例'])
    
    # 在混淆矩阵中显示数字
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], 'd'),
                    ha="center", va="center",
                    color="white" if cm[i, j] > thresh else "black")
    
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.tight_layout()
    plt.savefig(filename)

plot_confusion_matrix(dt_confusion_mat, '决策树混淆矩阵', os.path.join(results_dir, '决策树混淆矩阵.png'))
plot_confusion_matrix(svm_confusion_mat, 'SVM混淆矩阵', os.path.join(results_dir, 'SVM混淆矩阵.png'))
plot_confusion_matrix(xgb_confusion_mat, 'XGBoost混淆矩阵', os.path.join(results_dir, 'XGBoost混淆矩阵.png'))
plot_confusion_matrix(lgb_confusion_mat, 'LightGBM混淆矩阵', os.path.join(results_dir, 'LightGBM混淆矩阵.png'))

# 保存网格搜索结果
grid_search_results = {
    '决策树': {
        '最佳参数': dt_grid_search.best_params_,
        '最佳得分': dt_grid_search.best_score_,
        '所有结果': pd.DataFrame(dt_grid_search.cv_results_)
    },
    'SVM': {
        '最佳参数': svm_grid_search.best_params_,
        '最佳得分': svm_grid_search.best_score_,
        '所有结果': pd.DataFrame(svm_grid_search.cv_results_)
    },
    'XGBoost': {
        '最佳参数': xgb_grid_search.best_params_,
        '最佳得分': xgb_grid_search.best_score_,
        '所有结果': pd.DataFrame(xgb_grid_search.cv_results_)
    },
    'LightGBM': {
        '最佳参数': lgb_grid_search.best_params_,
        '最佳得分': lgb_grid_search.best_score_,
        '所有结果': pd.DataFrame(lgb_grid_search.cv_results_)
    }
}

# 保存网格搜索结果到CSV
for model_name, results in grid_search_results.items():
    results['所有结果'].to_csv(os.path.join(results_dir, f'{model_name}网格搜索结果.csv'), index=False)
    
    # 保存最佳参数
    with open(os.path.join(results_dir, f'{model_name}最佳参数.txt'), 'w', encoding='utf-8') as f:
        f.write(f'最佳参数: {results["最佳参数"]}\n')
        f.write(f'最佳得分: {results["最佳得分"]}\n')

print(f"\n所有模型优化结果已保存到 '{results_dir}' 文件夹")

# 打印评估结果
print("Decision Tree 模型评估结果:")
print("准确率:", dt_accuracy)
print("精确度:", dt_precision)
print("召回率:", dt_recall)
print("F1 值:", dt_f1)
print("ROC AUC:", dt_roc_auc)
print("混淆矩阵:\n", dt_confusion_mat)