import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.metrics import accuracy_score,log_loss
from sklearn.preprocessing import StandardScaler,minmax_scale

data = pd.read_csv("pima-indians-diabetes.csv")
# print(data.head())
# print(data.info())
# print(data.describe())

data[data==0].count()

# 数值型特征的序号
# numberical = [1,2,3,4,5,6]
# fig,axs = plt.subplots(8,2,figsize=(18,48))
# columns = data.columns
# axs = axs.flatten()
# for i in range(data.shape[1]-1):
#     if i in numberical:
        # sns.distplot(data.iloc[:,i],bins=30,kde=False,ax=axs[2*i])
    # else:
    #     sns.countplot(data.iloc[:,i],ax=axs[2*i])
    # sns.violinplot(x='Target',y=columns[i],data=data,ax=axs[2 * i + 1])
# plt.show()
# sns.countplot(data['Target'])

# corr = data.corr()
# plt.figure(figsize=(14,10))
# sns.heatmap(corr,square=True,annot=True)

meadiun_insted_col = ['Plasma_glucose_concentration','blood_pressure','BMI']
data[meadiun_insted_col] = data[meadiun_insted_col].replace(0,np.NaN)
data[meadiun_insted_col] = data[meadiun_insted_col].fillna(data[meadiun_insted_col].median())
print(data[meadiun_insted_col].isnull().sum())
print(data[meadiun_insted_col].describe())

# new_feature_col = ['Triceps_skin_fold_thickness','serum_insulin']
# for col in new_feature_col:
#     data[col+"_miss"] = data[col].apply(lambda x:1 if x==0 else 0) 
#     sns.countplot(hue = col+"_miss",x="Target",data=data)
#     plt.show()
# corr = data[['Triceps_skin_fold_thickness_miss','serum_insulin_miss','Target']].corr()
# sns.heatmap(corr,square=True,annot=True)

X = minmax_scale(data.drop('Target',axis=1))
y = data['Target']

logist = LogisticRegression()
penalty = ['l1','l2']
C = [0.1,1,10,100,1000]
solver = ['liblinear','saga']

# 负log损失评估
clf1 = GridSearchCV(logist,{"penalty":penalty,"C":C,"solver":solver},scoring="neg_log_loss",verbose=0,cv=5,return_train_score=True)
clf1.fit(X,y)
print("以负log评估的训练效果得分：",-clf1.best_score_)
print("以负log评估的最佳超参数",clf1.best_params_)
# plot CV误差曲线
test_means = clf1.cv_results_[ 'mean_test_score' ]
test_stds = clf1.cv_results_[ 'std_test_score' ]
train_means = clf1.cv_results_[ 'mean_train_score' ]
train_stds = clf1.cv_results_[ 'std_train_score' ]

# plot results by C and penalty
n_Cs = len(C)
number_penaltys = len(penalty)
n_solver = len(solver)
test_means_scores = np.array(test_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
train_means_scores = np.array(train_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
test_stds_scores = np.array(test_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
train_stds_scores = np.array(train_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)

x_axis = np.log10(C)
for i, value in enumerate(penalty):
    plt.errorbar(x_axis, test_means_scores[:,i], yerr=test_stds_scores[:,i] ,label = penalty[i] +' Test')
    
plt.legend()
plt.xlabel( 'log(C)' )                                                                                                      
plt.ylabel( 'neg-log-loss' )
plt.show()

# plot results by C and solver
test_means_scores = np.array(test_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
train_means_scores = np.array(train_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
test_stds_scores = np.array(test_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
train_stds_scores = np.array(train_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)

x_axis = np.log10(C)
for i, value in enumerate(penalty):
    plt.errorbar(x_axis, test_means_scores[:,i], yerr=test_stds_scores[:,i] ,label = solver[i] +' Test')
    
plt.legend()
plt.xlabel( 'log(C)' )                                                                                                      
plt.ylabel( 'neg-log-loss' )
plt.show()

# 准确率评估
clf2 = GridSearchCV(logist,{"penalty":penalty,"C":C,"solver":solver},scoring="accuracy",verbose=0,cv=5,return_train_score=True)
clf2.fit(X,y)
print("以准确度评估的训练效果得分：",clf2.best_score_)
print("以准确度评估的最佳超参数",clf2.best_params_)
# plot CV误差曲线
test_means = clf2.cv_results_[ 'mean_test_score' ]
test_stds = clf2.cv_results_[ 'std_test_score' ]
train_means = clf2.cv_results_[ 'mean_train_score' ]
train_stds = clf2.cv_results_[ 'std_train_score' ]


# plot results by C and penalty
n_Cs = len(C)
number_penaltys = len(penalty)
n_solver = len(solver)
test_means_scores = np.array(test_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
train_means_scores = np.array(train_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
test_stds_scores = np.array(test_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)
train_stds_scores = np.array(train_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=2)

x_axis = np.log10(C)
for i, value in enumerate(penalty):
    plt.errorbar(x_axis, test_means_scores[:,i], yerr=test_stds_scores[:,i] ,label = penalty[i] +' Test')
    
plt.legend()
plt.xlabel( 'log(C)' )                                                                                                      
plt.ylabel( 'accuracy' )
plt.show()

# plot results by C and solver
test_means_scores = np.array(test_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
train_means_scores = np.array(train_means).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
test_stds_scores = np.array(test_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)
train_stds_scores = np.array(train_stds).reshape(n_Cs,number_penaltys,n_solver).mean(axis=1)

x_axis = np.log10(C)
for i, value in enumerate(penalty):
    plt.errorbar(x_axis, test_means_scores[:,i], yerr=test_stds_scores[:,i] ,label = solver[i] +' Test')
    
plt.legend()
plt.xlabel( 'log(C)' )                                                                                                      
plt.ylabel( 'accuracy' )
plt.show()