import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV
import pandas as pd
from sklearn.preprocessing import StandardScaler

# 导入数据集
data = pd.read_excel("files/data.xlsx")
df = pd.DataFrame(data)
filtered_df = df[df['类别'].isin(['AD', 'LMCI'])]

# 在目标数据中删除类别这一列后的剩下的特征数据
X = filtered_df.drop('类别', axis=1)
# 目标数据映射为0和1
y = filtered_df['类别'].map({'AD': 0, 'LMCI': 1})


def test(random):
# 分割数据
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random)
    # 创建特征缩放的对象
    scaler = StandardScaler()
    # 缩放feature的训练集和测试集
    scaled_X_train = scaler.fit_transform(X_train)
    scaled_X_test = scaler.transform(X_test)

    # 参数网格搜索，超参数调优
    param_grid = {
        'C': [0.001, 0.01, 0.1, 1, 10, 100],
        'solver': ['lbfgs', 'saga'],
        'penalty': ['l2','l1']
    }
    # 由网格选择器得出的结果
    # param_grid = {
    #     'C': [0.001, 0.01, 0.1, 1, 10, 100],
    #     'solver': ['lbfgs', 'newton-cg'],
    #     'penalty': ['l2', 'l1']
    # }

    # 进行超参数网格搜索，以寻找最优的组合
    grid_search = GridSearchCV(LogisticRegression(max_iter=10000, random_state=40), param_grid, cv=5, scoring='accuracy')
    grid_search.fit(scaled_X_train, y_train)

    # 输出最优参数和模型性能
    print("最优参数:", grid_search.best_params_)
    best_log_model = grid_search.best_estimator_

    y_pred = best_log_model.predict(scaled_X_test)
    print("准确率:", accuracy_score(y_test, y_pred))

for i in range(0,45):
    test(i)
    print(i)