import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler


# 数据集的分类
def classifier_data(item):
    items = [['AD', 'CN'], ['AD', 'EMCI'], ['AD', 'LMCI'], ['CN', 'EMCI'], ['CN', 'LMCI'],
             ['EMCI', 'LMCI'], ['MCI', 'CN'], ['AD', 'MCI']]
    return items[item];

# 替代函数，用于合并EMCI和LMCI的位置，不要修改items中值的位置，否则会报错
def replace_data():
    df['类别'] = df['类别'].apply(lambda x: 'MCI' if x in ['EMCI', 'LMCI'] else x)

# 初始化训练集以及测试集
def fit_data(classification):
    # 按照数据分类取出类别
    filtered_df = df[df['类别'].isin(classification)]
    # 在目标数据中删除类别这一列后的剩下的特征数据
    X = filtered_df.drop('类别', axis=1)
    y = filtered_df['类别']

    # 所有数据已经划分好，修改训练模型即可

    # 分割数据
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=15)

    # 创建特征缩放的对象
    scaler = StandardScaler()
    # 缩放feature的训练集和测试集
    scaled_X_train = scaler.fit_transform(X_train)
    scaled_X_test = scaler.transform(X_test)

    # 参数网格搜索，超参数调优
    param_grid = {
        'C': [0.01, 0.1, 1, 10, 100],
        'solver': ['lbfgs', 'newton-cg', 'sag', 'saga'],
        'penalty': ['l2', 'l1']
    }

    # 进行超参数网格搜索，以寻找最优的组合
    grid_search = GridSearchCV(LogisticRegression(max_iter=10000, random_state=40), param_grid, cv=5,
                               scoring='accuracy')
    grid_search.fit(scaled_X_train, y_train)

    # 输出最优参数和模型性能
    print("最优参数:", grid_search.best_params_)
    best_log_model = grid_search.best_estimator_

    y_pred = best_log_model.predict(scaled_X_test)
    print("准确率:", accuracy_score(y_test, y_pred))
    return accuracy_score(y_test, y_pred)


# 主函数
if __name__ == '__main__':
    socre = 0.0
    # 导入数据集
    data = pd.read_excel("files/data.xlsx")
    df = pd.DataFrame(data)
    for i in range(8):
        if i == 6:
            replace_data()
        socre += fit_data(classifier_data(i))

    print("最终的得分：", socre/8.0)