import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from sklearn.linear_model import LogisticRegression
import shap
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# 加载数据
data = pd.read_csv("./diabetes_012_health_indicators_BRFSS2015.csv", index_col=None)

# 假设最后一列是标签列
features = data.iloc[:253568, :-1].values  # 提取特征列
labels = data.iloc[:253568, -1].values  # 提取标签列

# 转换为 PyTorch 张量
features = torch.tensor(features, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.float32)

# 创建 TensorDataset 和 DataLoader
dataset = TensorDataset(features, labels)
train_loader = DataLoader(dataset=dataset, batch_size=128, shuffle=True)

# 将 PyTorch 数据集转换为 NumPy 数组
features_numpy = features.numpy()
labels_numpy = labels.numpy()

# 将数据分为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(features_numpy, labels_numpy, test_size=0.2, random_state=42)

# 训练逻辑回归分类器
clf = LogisticRegression(max_iter=1000, random_state=42)
clf.fit(X_train, y_train)

# clf = RandomForestClassifier(n_estimators=100, random_state=42)
# clf.fit(X_train, y_train)
#
# # 计算特征重要性
# importances = clf.feature_importances_
# feature_names = train_dataset.columns[0:12]
# feature_importances = pd.DataFrame(importances, index=feature_names, columns=['importance']).sort_values('importance', ascending=False)
# print(feature_importances)

# 预测测试集
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f'Accuracy: {accuracy}')

# 使用SHAP值解释模型
explainer = shap.LinearExplainer(clf, X_train)
shap_values = explainer.shap_values(X_test)

# 可视化特征重要性
# feature_names = data.columns["f_1", "f_2", "f_3", "f_4", "f_5", "f_6", "f_7", "f_8", "f_9",
#                              "f_10", "f_11", "f_12", "f_13", "f_14", "f_15", "f_16", "f_17", "f_18"
#                               , "f_19", "f_20", "f_21"]
feature_names = [f"feature_{i}" for i in range(data.shape[1] - 1)]
shap.summary_plot(shap_values, X_test, feature_names=feature_names)
