import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt

pd.set_option('display.width',1000)
pd.set_option('display.max_rows',500)
pd.set_option('display.max_columns',500)

"""读取数据"""
df=pd.read_csv("FE_pima-indians-diabetes.csv")
print(df.head())

'''准备数据'''
y_df=df["Target"]
x_df=df.drop(["Target"],axis=1)
'''
print(y_df.head())
print(x_df.head())
'''
feat_names=x_df.columns

'''默认参数的Logistic Regression'''
from sklearn.linear_model import LogisticRegression

lr=LogisticRegression(solver='liblinear')

'''交叉验证用于评估模型性能和进行参数调优'''

from sklearn.model_selection import cross_val_score
loss=cross_val_score(lr,x_df,y_df,cv=5,scoring='neg_log_loss')
print('logloss of each fold is :',-loss)
print('cv logloss is: ',-loss.mean())

'''正则化的Logistic Regression及参数调优'''
penaltys = ['l1','l2']
Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
tuned_parameters = dict(penalty = penaltys, C = Cs)

lr_penalty= LogisticRegression(solver='liblinear')
grid= GridSearchCV(lr_penalty, tuned_parameters,cv=5, scoring='neg_log_loss',return_train_score=True)
grid.fit(x_df,y_df)
print(grid)
print(-grid.best_score_)
print(grid.best_params_)
'''画误差曲线'''
test_means=grid.cv_results_['mean_test_score']
test_stds=grid.cv_results_['std_test_score']
train_means=grid.cv_results_['mean_train_score']
train_stds=grid.cv_results_['std_train_score']

n_Cs=len(Cs)
n_penaltys=len(penaltys)
test_scores=np.array(test_means).reshape(n_Cs,n_penaltys)
train_scores=np.array(train_means).reshape(n_Cs,n_penaltys)
test_stds=np.array(test_stds).reshape(n_Cs,n_penaltys)
train_stds=np.array(train_stds).reshape(n_Cs,n_penaltys)

x_axis=np.log(Cs)
for i,value in enumerate(penaltys):
    plt.errorbar(x_axis,-test_scores[:,i],yerr=test_stds[:,i],label=penaltys[i]+' Test')
plt.legend()
plt.xlabel('log(C)')
plt.ylabel('logloss')
plt.savefig('LogisticGridSearchCv_C.png')
plt.show()

