import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV
import pandas as pd
from sklearn.preprocessing import StandardScaler

# 导入数据集
data = pd.read_excel("files/data.xlsx")
df = pd.DataFrame(data)
filtered_df = df[df['类别'].isin(['AD', 'CN'])]

# 在目标数据中删除类别这一列后的剩下的特征数据
X = filtered_df.drop('类别', axis=1)
# 目标数据映射为0和1
y = filtered_df['类别'].map({'AD': 0, 'CN': 1})

# 分割数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=15)
# print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# 创建特征缩放的对象
scaler = StandardScaler()
# 缩放feature的训练集和测试集
scaled_X_train = scaler.fit_transform(X_train)
scaled_X_test = scaler.transform(X_test)

# 参数网格搜索，超参数调优
param_grid = {
    'C': [0.001, 0.01, 0.1, 1, 10, 100],
    'solver': ['lbfgs', 'newton-cg','sag', 'saga'],
    'penalty': ['l2','l1']
}
# 由网格选择器得出的结果
# param_grid = {
#     'C': [0.001, 0.01, 0.1, 1, 10, 100],
#     'solver': ['lbfgs', 'newton-cg'],
#     'penalty': ['l2']
# }

# 进行超参数网格搜索，以寻找最优的组合
grid_search = GridSearchCV(LogisticRegression(max_iter=10000, random_state=40), param_grid, cv=5, scoring='accuracy')
grid_search.fit(scaled_X_train, y_train)

# 输出最优参数和模型性能
print("最优参数:", grid_search.best_params_)
best_log_model = grid_search.best_estimator_

y_pred = best_log_model.predict(scaled_X_test)
print("准确率:", accuracy_score(y_test, y_pred))
# print("分类报告:\n", classification_report(y_test, y_pred))


# # 从sklearn库中掏出逻辑回归的模型
# log_model = LogisticRegression()
# #训练模型
# log_model.fit(scaled_X_train,y_train)

# y_pred=log_model.predict(scaled_X_test)
# print(accuracy_score(y_test,y_pred))

# sns.scatterplot(x='特征', y='Unnamed: 1', data=filtered_df, hue='类别')

# plt.show()