# 首先 import 必要的模块
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt

#读取数据
train=pd.read_csv("E:\【❤】CSDN_AI_CLASS\第五周\logistic回归作业\FE_pima-indians-diabetes.csv")
print(train.head())
y_train = train['Outcome']
X_train = train.drop(["Outcome"], axis=1)
from scipy.sparse import csr_matrix
#保存特征名字以备后用（可视化）
feat_names = X_train.columns
X_train = csr_matrix(X_train)
##需要调优的参数
# 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
#tuned_parameters = {'penalty':['l1','l2'],
#                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
#                   }
penaltys = ['l1', 'l2']
#设置c范围
Cs = [0.001, 0.01, 0.1, 1,10, 100, 1000]
tuned_parameters = dict(penalty=penaltys, C=Cs)#组合调优参数
#正确率调参
lr_penalty = LogisticRegression()
grid = GridSearchCV(lr_penalty, tuned_parameters, cv=5, return_train_score=True, scoring='accuracy')#正确率
grid.fit(X_train, y_train)
print(grid.best_score_)#打印模型参数return_train_scorereturn_train_score
print(grid.best_params_)
##需要调优的参数
# 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
#tuned_parameters = {'penalty':['l1','l2'],
#                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
#                   }
penaltys = ['l1', 'l2']

#设置c范围
Cs = [0.001, 0.01, 0.1, 1,10, 100, 1000]

tuned_parameters = dict(penalty=penaltys, C=Cs)#组合调优参数
#正确率调参
lr_penalty = LogisticRegression()
grid = GridSearchCV(lr_penalty, tuned_parameters,cv=5, scoring='neg_log_loss', return_train_score=True)#-log似然损失
grid.fit(X_train, y_train)
print(-grid.best_score_)#打印模型参数
print(grid.best_params_)

# plot CV误差曲线
# print(grid.cv_results_)
test_means = grid.cv_results_['mean_test_score']
test_stds = grid.cv_results_['std_test_score']
train_means = grid.cv_results_['mean_train_score']
train_stds = grid.cv_results_['std_train_score']
# plot results
n_Cs = len(Cs)
number_penaltys = len(penaltys)
test_scores = np.array(test_means).reshape(n_Cs, number_penaltys)
train_scores = np.array(train_means).reshape(n_Cs, number_penaltys)
test_stds = np.array(test_stds).reshape(n_Cs, number_penaltys)
train_stds = np.array(train_stds).reshape(n_Cs, number_penaltys)
x_axis = np.log10(Cs)
for i, value in enumerate(penaltys):
    # pyplot.plot(log(Cs), test_scores[i], label= 'penalty:'   + str(value))
    plt.errorbar(x_axis, -test_scores[:, i], yerr=test_stds[:, i], label=penaltys[i] + ' Test')
    plt.errorbar(x_axis, -train_scores[:, i], yerr=train_stds[:, i], label=penaltys[i] + ' Train')
plt.legend()
plt.xlabel('log(C)')
plt.ylabel('logloss')
plt.savefig('LogisticGridSearchCV_C.png')
plt.show()

#将数据分割训练数据与测试数据
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss

# 随机采样10%的数据构建测试样本
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, random_state=777, test_size=0.1)
lr= LogisticRegression(penalty='l1',tol=0.0001,C=1,random_state=777).fit(X_train, y_train)
y_pred=lr.predict(X_test)
accuracyScore=accuracy_score(y_test, y_pred)
logLoss=log_loss(y_test, y_pred)
print("accuracy_score=",accuracyScore,",log_loss=",logLoss)
print('coef_:\n',lr.coef_)