# 采用5折交叉验证，分别用log似然损失和正确率，
# 对Logistic回归模型的正则超参数调优

# 导入工具包
import pandas as pd
import numpy as np

from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV

# 1、读取数据
dpath = "E:/VC_project/data/diabetes/"
train = pd.read_csv(dpath+"FE_diabets.csv")
print(train.head())

# 准备数据
y_train = train["Target"]
X_train = train.drop(["Target"],axis=1)

# 保存特征名称
feat_names = X_train.columns

# 2、使用log似然损失和正确率，进行超参数调优

# (1)设置参数范围
penaltys = ["l1","l2"] #L1和L2正则
Cs = [0.001,0.01,0.1,1,10,100,1000]
#Cs = [0.1,0.5,1,1.5,2]
tuned_parameters = dict(penalty=penaltys,C=Cs)
Scoings = ["neg_log_loss","accuracy"]
# （2）生成学习器实例
lr = LogisticRegression(solver="liblinear")

for i in Scoings:
    # （3）生成GridSearchCV实例
    grid = GridSearchCV(lr, tuned_parameters, scoring=i,cv=5)
    # （4）调用fit方法
    grid.fit(X_train, y_train)
    # 查看最佳分数及参数
    if i=="neg_log_loss":
        print(-grid.best_score_)
    else:
        print(grid.best_score_)
    print(grid.best_params_)



