# @Time : 2024/12/29 20:29
# @Author : ZHUYI
# @File : test3
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc
import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 1. 读取数据集
data = pd.read_csv('csv/framingham_heart_study.csv')

# 2. 数据预处理
# 处理缺失值
data.fillna(data.mean(), inplace=True)  # 用均值填充数值型缺失值
data.dropna(subset=['education'], inplace=True)  # 删除教育程度缺失的行

# 选择特征和目标变量
X = data.drop(columns=['TenYearCHD'])
y = data['TenYearCHD']

# 处理分类变量
categorical_features = ['male', 'education', 'currentSmoker', 'BPMeds', 'prevalentStroke', 'prevalentHyp', 'diabetes']
numeric_features = X.select_dtypes(include=['int64', 'float64']).columns.tolist()

# 创建预处理管道
preprocessor = ColumnTransformer(
    transformers=[('num', StandardScaler(), numeric_features),
                  ('cat', OneHotEncoder(), categorical_features)])

# 3. 应用逻辑回归模型
logistic_pipeline = Pipeline(steps=[('preprocessor', preprocessor),
                                    ('classifier', LogisticRegression())])

# 4. 应用随机森林模型
rf_pipeline = Pipeline(steps=[('preprocessor', preprocessor),
                              ('classifier', RandomForestClassifier())])

# 5. 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 6. 训练和评估逻辑回归模型
logistic_pipeline.fit(X_train, y_train)
y_pred_logistic = logistic_pipeline.predict(X_test)

print("逻辑回归模型评估:")
print(confusion_matrix(y_test, y_pred_logistic))
print(classification_report(y_test, y_pred_logistic))

# 7. 训练和评估随机森林模型
rf_pipeline.fit(X_train, y_train)
y_pred_rf = rf_pipeline.predict(X_test)

print("随机森林模型评估:")
print(confusion_matrix(y_test, y_pred_rf))
print(classification_report(y_test, y_pred_rf))


# 8. 可视化部分
# 8.1 混淆矩阵可视化
def plot_confusion_matrix(cm, model_name):
    """
    绘制混淆矩阵
    :param cm: 混淆矩阵
    :param model_name: 模型名称
    """
    plt.figure(figsize=(6, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', cbar=False, xticklabels=['No CHD', 'CHD'],
                yticklabels=['No CHD', 'CHD'])
    plt.title(f'{model_name}的混淆矩阵')
    plt.xlabel('预测值')
    plt.ylabel('真实值')
    plt.show()


# 逻辑回归的混淆矩阵
cm_logistic = confusion_matrix(y_test, y_pred_logistic)
plot_confusion_matrix(cm_logistic, '逻辑回归')

# 随机森林的混淆矩阵
cm_rf = confusion_matrix(y_test, y_pred_rf)
plot_confusion_matrix(cm_rf, '随机森林')


# 8.2 ROC曲线和AUC
def plot_roc_curve(fpr, tpr, auc_value, model_name):
    """
    绘制ROC曲线
    :param fpr: 假阳性率
    :param tpr: 真阳性率
    :param auc_value: AUC值
    :param model_name: 模型名称
    """
    plt.plot(fpr, tpr, color='blue', label=f'{model_name} AUC = {auc_value:.2f}')
    plt.plot([0, 1], [0, 1], color='gray', linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假阳性率')
    plt.ylabel('阳性率')
    plt.title(f'{model_name}的ROC曲线')
    plt.legend(loc='lower right')
    plt.show()


# 逻辑回归的ROC曲线
y_prob_logistic = logistic_pipeline.predict_proba(X_test)[:, 1]
fpr_logistic, tpr_logistic, _ = roc_curve(y_test, y_prob_logistic)
auc_logistic = auc(fpr_logistic, tpr_logistic)
plot_roc_curve(fpr_logistic, tpr_logistic, auc_logistic, '逻辑回归')

# 随机森林的ROC曲线
y_prob_rf = rf_pipeline.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_prob_rf)
auc_rf = auc(fpr_rf, tpr_rf)
plot_roc_curve(fpr_rf, tpr_rf, auc_rf, '随机森林')


# 8.3 随机森林特征重要性
def plot_feature_importance(model, feature_names):
    """
    绘制特征重要性
    :param model: 模型
    :param feature_names: 特征名称列表
    """
    importance = model.named_steps['classifier'].feature_importances_
    feature_names_all = numeric_features + list(preprocessor.named_transformers_['cat'].get_feature_names_out())
    feature_importance_df = pd.DataFrame({'feature': feature_names_all, 'importance': importance})
    feature_importance_df = feature_importance_df.sort_values(by='importance', ascending=False)

    plt.figure(figsize=(10, 6))
    sns.barplot(x='importance', y='feature', data=feature_importance_df)
    plt.title('随机森林特征重要性')
    plt.xlabel('重要性')
    plt.show()


# 随机森林特征重要性
plot_feature_importance(rf_pipeline, X.columns)
