# -*- coding:utf-8 -*-

# @Time    : 2018/10/18 4:22 PM

# @Author  : Swing


import pandas as pd
import numpy as np
import seaborn as  sn
from matplotlib import pyplot as plt
# 拆分测试集训练集 取20%作为测试机 其作为训练集
from sklearn.model_selection import train_test_split
# 采用网络搜索选择最好的正则函数和正则参数
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score

from sklearn.svm import LinearSVC, SVC

# 特征标准化
from sklearn.preprocessing import StandardScaler

# 读入数据
data = pd.read_csv('diabetes.csv')
print(data.head())

# 查看每个特征为0的个数
empty_col_names = ['Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI']
print((data[empty_col_names] == 0).sum())

y_data = data['Outcome'].values
x_data = data.drop(['Outcome', 'SkinThickness', 'Insulin'], axis=1)
# x_data.head()

empty_cols = ['Glucose', 'BloodPressure', 'BMI']
x_data[empty_cols] = x_data[empty_cols].replace(0, np.nan)
# (x_data.isnull()).sum()

# 使用中值填补缺失值
medians = x_data.median()
x_data = x_data.fillna(medians)
(x_data.isnull()).sum()


ss_x = StandardScaler()
x_data = ss_x.fit_transform(x_data)

x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, random_state=33, test_size=0.2)


penaltys = ['l1', 'l2']
cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
params = dict(penalty=penaltys, C=cs)

logisticReg = LogisticRegression(solver='liblinear')
grid = GridSearchCV(logisticReg, params, cv=5, return_train_score=True)
grid.fit(x_train, y_train)

# 最佳得分和最佳参数
print('最佳得分', -grid.best_score_)
print('最佳参数', grid.best_params_)



# cv误差曲线
test_means = grid.cv_results_['mean_test_score']
test_std = grid.cv_results_['std_test_score']
train_means = grid.cv_results_['mean_train_score']
train_std = grid.cv_results_['std_train_score']

n_cs = len(cs)
number_penatlys = len(penaltys)

# test_scores = np.array(test_means).reshape(n_cs, number_penatlys)
# train_scores = np.array(train_means).reshape(n_cs, number_penatlys)
# test_stds = np.array(test_std).reshape(n_cs, number_penatlys)
# train_stds = np.array(train_std).reshape(n_cs, number_penatlys)

# x_axis = np.log10(cs)
# for i, value in enumerate(penaltys):
#     # pyplot.plot(log(Cs), test_scores[i], label= 'penalty:'   + str(value))
#     plt.errorbar(x_axis, test_scores[:, i], yerr=test_stds[:, i], label=penaltys[i] + ' Test')
#     plt.errorbar(x_axis, train_scores[:, i], yerr=train_stds[:, i], label=penaltys[i] + ' Train')

# plt.legend()
# plt.xlabel('log(C)')
# plt.ylabel('neg-logloss')
# plt.savefig('LogisticGridSearchCV_C.pnpag')
#
# plt.show()


# yp = grid.predict_proba(x_test)
#
# log_loss = log_loss(y_test, yp)
#
# y_pred = grid.predict(x_test)
#
# acc = accuracy_score(y_test, grid.predict(x_test))

# svc = SVC(kernel='linear', probability=True)

# svc_params = {'C': cs}
# ls_grid = GridSearchCV(svc, svc_params, cv=5, scoring='neg_log_loss', return_train_score=True)
# ls_grid.fit(x_train, y_train)
#
# print('svc best score ', -ls_grid.best_score_)
# print('svc best params ', ls_grid.best_params_)


# svc_test_means = ls_grid.cv_results_['mean_test_score']
# svc_test_std = ls_grid.cv_results_['std_test_score']
#
# svc_train_means = ls_grid.cv_results_['mean_train_score']
# svc_train_std = ls_grid.cv_results_['std_train_score']

# n_cs = len(cs)
#
# svc_test_scores = np.array(svc_test_means).reshape(n_cs, 1)
# svc_train_scores = np.array(svc_train_means).reshape(n_cs, 1)
#
# svc_test_stds= np.array(svc_test_std).reshape(n_cs, 1)
# svc_train_stds= np.array(svc_train_std).reshape(n_cs, 1)
#
# x_axis = np.log10(cs)
# plt.errorbar(x_axis, svc_test_scores, yerr=svc_test_stds, label='Test')
# plt.errorbar(x_axis, svc_train_scores, yerr=svc_train_stds, label='Train')
#
# plt.legend()
# plt.xlabel('log(C)')
# plt.ylabel('neg-logloss')
# plt.show()

rbf_svc = SVC(kernel='rbf', probability=True)
gammas = [0.0001, 0.001, 0.01, 0.1, 1]
rbf_svc_params = {'C': cs, 'gamma': gammas}
rbf_svc_grid = GridSearchCV(rbf_svc, param_grid=rbf_svc_params, cv=5, scoring='neg_log_loss', return_train_score=True)
# rbf_svc_grid = GridSearchCV(rbf_svc, param_grid=rbf_svc_params, cv=5, return_train_score=True)
rbf_svc_grid.fit(x_train, y_train)

print('rbf best score ', -rbf_svc_grid.best_score_)
print('rbf best params', rbf_svc_grid.best_params_)

rbf_mean_test_score = rbf_svc_grid.cv_results_['mean_test_score']
rbf_mean_train_score = rbf_svc_grid.cv_results_['mean_train_score']

rbf_std_test = rbf_svc_grid.cv_results_['std_test_score']
rbf_std_train = rbf_svc_grid.cv_results_['std_train_score']

rbf_test_score = np.array(rbf_mean_test_score).reshape(n_cs, len(gammas))

x_axis = np.log10(cs)

for i, value in enumerate(gammas):
    plt.plot(x_axis, rbf_test_score[:, i], label=gammas[i])
plt.legend()
plt.xlabel('log(C)')
plt.ylabel('neg_log_loss')
plt.show()
