'''
 数据说明： Pima Indians Diabetes Data Set（皮马印第安人糖尿病数据集）
 根据现有的医疗信息预测5年内皮马印第安人糖尿病发作的概率。
字段说明
数据集共9个字段:
pregnants：怀孕次数
Plasma_glucose_concentration：口服葡萄糖耐量试验中2小时后的血浆葡萄糖浓度
blood_pressure：舒张压，单位:mm Hg
Triceps_skin_fold_thickness：三头肌皮褶厚度，单位：mm
serum_insulin：餐后血清胰岛素，单位:mm
BMI：体重指数（体重（公斤）/ 身高（米）^2）
Diabetes_pedigree_function：糖尿病家系作用
Age：年龄
Target：标签， 0表示不发病，1表示发病
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import log_loss
from matplotlib import pyplot
train = pd.read_csv("./FE_pima-indians-diabetes.csv")
print(train.head(8))
#把Xy分开

y_train = train['Target']
X_train = train.drop(["Target"], axis=1)

#用于特征重要性可视化
feat_names = X_train.columns

#训练模型
#可以使用默认参数训练
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='liblinear')
#交叉验证用于评估模型性能和进行参数调优（模型选择）
#分类任务中交叉验证缺省是采用StratifiedKFold
from sklearn.model_selection import cross_val_score
#5则交叉验证负logloss
loss = cross_val_score(lr, X_train, y_train, cv=5, scoring='neg_log_loss')
print('logloss of each fold is:', -loss)
print('cv logloss is:', -loss.mean())
'''
logloss of each fold is: [0.48797856 0.53011593 0.4562292  0.422546   0.48392885]
cv logloss is: 0.47615970944434044  不太理想
'''


#下面进行正则化的Logistic Recursion及参数调优
#Logistic回归的需要调整参数有：C（正则系数，一般在log域均匀设置候选参数）
#和正则函数penalty(L2/L1)
#目标函数为J=C*sum(logloss(f(xi),yi))+penalty
#在sklearn框架下， 不同学习器的参数调整步骤相同：
#设置候选参数集合生成一个GridSearchCV的函数 调用GridSearchCV的fit函数
from  sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression


#需要调优的参数 请尝试将L1正则和L2正则翻开，并配合合适的优化求解算法（slover）

#设置超参数的搜索范围


penaltys = ['l1', 'l2']
Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
tuned_parameters = dict(penalty=penaltys, C=Cs)


lr_penalty = LogisticRegression(solver='liblinear')
# n_jobs 几个线程 verbose过程是否打印 0不打印
# 使用log似然损失scoring最佳超参数自动保留越大越好 实际越小越好所以前面加个负号
#gr_id = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='neg_log_loss', return_train_score=True, verbose=5)
#使用正确率
gr_id = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='accuracy', return_train_score=True, verbose=5)

gr_id.fit(X_train, y_train)
#看最佳超参数及系数
print(-gr_id.best_score_)
print(gr_id.best_params_)

df = pd.DataFrame({"columns": list(feat_names), "coeffient": list(gr_id.best_estimator_.coef_.T)})

df.sort_values(by=['coeffient'], ascending=False)

#plot CV误差曲线
test_means = -gr_id.cv_results_['mean_test_score']
test_stds = gr_id.cv_results_['std_test_score']
train_means = -gr_id.cv_results_['mean_train_score']
train_stds = gr_id.cv_results_['std_train_score']

#plot results
n_Cs = len(Cs)
number_penaltys = len(penaltys)
test_scores = np.array(test_means).reshape(n_Cs, number_penaltys)
train_scores = np.array(train_means).reshape(n_Cs, number_penaltys)
test_stds = np.array(test_stds).reshape(n_Cs, number_penaltys)
train_stds = np.array(train_stds).reshape(n_Cs, number_penaltys)

x_axis = np.log10(Cs)
for i, value in enumerate(penaltys):
    pyplot.errorbar(x_axis, test_scores[:, i], yerr=test_stds[:, i], label=pyplot.errorbar(x_axis, test_scores[:, i]))

    pyplot.errorbar(x_axis, train_scores[:, i], yerr=train_stds[:, i],label=pyplot.errorbar(x_axis, train_scores[:, i]))
    pyplot.legend()
    pyplot.xlabel('log(C)')
    pyplot.ylabel('logloss')
    pyplot.savefig('LogisticGridSearchCV_C.png')
    pyplot.show()











