# 首先 import 必要的模块
import pandas as pd
import numpy as np

from sklearn.model_selection import GridSearchCV

#竞赛的评价指标为logloss
from sklearn.metrics import log_loss

import matplotlib.pyplot as plt
import seaborn as sns

#####################################################################################
# 0 EDA
data=pd.read_csv('diabetes.csv').copy()
print(data.head())
print(data.describe())
print(data.shape)

# 样本不均衡，0数目为500  1数目为268
# sns.countplot(data.Outcome)


# Pregnancies与Outcome之间的关系
# 怀孕17次有点猛啊，但是我又不敢删，不确定是不是离群点，暂时保留
# sns.jointplot(data.Pregnancies,data.Outcome)
# Glucose与Outcome之间的关系，已经咨询过医生，为0 的为异常点，需要删除
# print(data[data.Glucose<1].count())
data=data[data.Glucose>0]
print(data.shape)
# sns.jointplot(data.Glucose,data.Outcome)
# BloodPressure与Outcome之间的关系，已经咨询过医生，为0 的为异常点，需要删除
# print(data[data.BloodPressure<1].count())
data=data[data.BloodPressure>0]
print(data.shape)
# sns.jointplot(data.BloodPressure,data.Outcome)
# SkinThickness与Outcome之间的关系，有大于90的存在1个
# sns.jointplot(data.SkinThickness,data.Outcome)
# print(data[data.SkinThickness>90].count())
# Insulin与Outcome之间的关系，有大于800的存在1个
# data=data[data.Insulin>0]
# data['Insulin'].fillna(data['Insulin'].median())
# data['Insulin'][data['Insulin']==0]=data['Insulin'].median()
print('median is ',data['Insulin'].median())
print(data['Insulin'].head(100))
# sns.jointplot(data.Insulin,data.Outcome)
# BMI与Outcome之间的关系，已经咨询过医生，为0 的为异常点，需要删除
# print(data[data.BMI<1].count())
data=data[data.BMI>0]
print(data.shape)
# sns.jointplot(data.BMI,data.Outcome)
# DiabetesPedigreeFunction与Outcome之间的关系，是否做log变换需要考虑
# sns.jointplot(data.DiabetesPedigreeFunction,data.Outcome)
# Age与Outcome之间的关系，做log变换
# sns.jointplot(data.Age,data.Outcome)

#####################################################################################
# 1 FE  group by age，考虑按年龄分5段，20~33,33~45,45~57,57~69,69~81
# data['CategoricalAge']=pd.cut(data['Age'],5)

data.loc[data['Age'] <= 33, 'CategoricalAge'] = 1
data.loc[(data['Age'] > 33) & (data['Age'] <= 45), 'CategoricalAge'] = 2
data.loc[(data['Age'] > 45) & (data['Age'] <= 57), 'CategoricalAge'] = 3
data.loc[(data['Age'] > 57) & (data['Age'] <=69), 'CategoricalAge'] = 4
data.loc[data['Age'] > 69, 'CategoricalAge'] = 5
data['CategoricalAge'] = data['CategoricalAge'].astype(int)
# print(data['CategoricalAge'])
data_mean=data.groupby('CategoricalAge').mean()
print(data_mean)
data=data.sort_values(by='CategoricalAge')
print(type(data.groupby('CategoricalAge')['CategoricalAge'].count()))
# Pregnancies 分为三类，低，中，高,[(-0.017, 5.667] < (5.667, 11.333] < (11.333, 17.0]]
# data['CategoricalPregnancies']=pd.cut(data['Pregnancies'],3)
data.loc[data['Pregnancies'] <= 5.6, 'CategoricalPregnancies'] = 1
data.loc[(data['Pregnancies'] > 5.6) & (data['Pregnancies'] <= 11.3), 'CategoricalPregnancies'] = 2
data.loc[data['Pregnancies'] > 11.3, 'CategoricalPregnancies'] = 3
data['CategoricalPregnancies'] = data['CategoricalPregnancies'].astype(int)
# print(data['CategoricalPregnancies'])


# Glucose 分为三类，低，中，高,[(43.845, 95.667] < (95.667, 147.333] < (147.333, 199.0]]
# data['CategoricalGlucose']=pd.cut(data['Glucose'],3)
data.loc[data['Glucose'] <= 95.6, 'CategoricalGlucose'] = 1
data.loc[(data['Glucose'] > 95.6) & (data['Glucose'] <= 147.3), 'CategoricalGlucose'] = 2
data.loc[data['Glucose'] > 147.3, 'CategoricalGlucose'] = 3
data['CategoricalGlucose'] = data['CategoricalGlucose'].astype(int)
# print(data['CategoricalGlucose'])
# BloodPressure 分为三类，低，中，高,[(23.902, 56.667] < (56.667, 89.333] < (89.333, 122.0]]
# data['CategoricalBloodPressure']=pd.cut(data['BloodPressure'],3)
data.loc[data['BloodPressure'] <= 56.6, 'CategoricalBloodPressure'] = 1
data.loc[(data['BloodPressure'] > 56.6) & (data['BloodPressure'] <= 89.3), 'CategoricalBloodPressure'] = 2
data.loc[data['BloodPressure'] > 89.3, 'CategoricalBloodPressure'] = 3
data['CategoricalBloodPressure'] = data['CategoricalBloodPressure'].astype(int)
# print(data['CategoricalBloodPressure'])

# SkinThickness 分为三类，低，中，高,[(-0.099, 33.0] < (33.0, 66.0] < (66.0, 99.0]]，缺失值太多了，单独分为一类
# data['CategoricalSkinThickness']=pd.cut(data['SkinThickness'],3)
data.loc[data['SkinThickness'] ==0, 'CategoricalSkinThickness'] = 1
data.loc[(data['SkinThickness'] > 0) & (data['SkinThickness'] <= 66), 'CategoricalSkinThickness'] = 2
data.loc[data['SkinThickness'] >66, 'CategoricalSkinThickness'] = 3
data['CategoricalSkinThickness'] = data['CategoricalSkinThickness'].astype(int)
# print(data['CategoricalSkinThickness'])

# Insulin 分为三类，低，中，高,[(-0.846, 282.0] < (282.0, 564.0] < (564.0, 846.0]]，缺失值太多了，单独分为1类
# data['CategoricalInsulin']=pd.cut(data['Insulin'],3)
data.loc[data['Insulin'] ==0, 'CategoricalInsulin'] = 1
data.loc[(data['Insulin'] > 0) & (data['Insulin'] <= 564), 'CategoricalInsulin'] = 2
data.loc[data['Insulin'] >564, 'CategoricalInsulin'] = 3
data['CategoricalInsulin'] = data['CategoricalInsulin'].astype(int)
# print(data['CategoricalInsulin'])

# BMI 分为三类，低，中，高, [(18.151, 34.5] < (34.5, 50.8] < (50.8, 67.1]]
# data['CategoricalBMI']=pd.cut(data['BMI'],3)
data.loc[data['BMI'] <=34.5, 'CategoricalBMI']= 1
data.loc[(data['BMI'] > 34.5) & (data['BMI'] <= 50.8), 'CategoricalBMI'] = 2
data.loc[data['BMI'] >50.8, 'CategoricalBMI'] = 3
data['CategoricalBMI'] = data['CategoricalBMI'].astype(int)
# print(data['CategoricalBMI'])

# DiabetesPedigreeFunction 分为三类，低，中，高, [(0.0757, 0.859] < (0.859, 1.639] < (1.639, 2.42]]
# data['CategoricalDiabetes']=pd.cut(data['DiabetesPedigreeFunction'],3)
data.loc[data['DiabetesPedigreeFunction'] <=0.859, 'CategoricalDiabetes']= 1
data.loc[(data['DiabetesPedigreeFunction'] > 0.859) & (data['DiabetesPedigreeFunction'] <=1.639), 'CategoricalDiabetes'] = 2
data.loc[data['DiabetesPedigreeFunction'] >1.639, 'CategoricalDiabetes'] = 3
data['CategoricalDiabetes'] = data['CategoricalDiabetes'].astype(int)
# print(data['CategoricalDiabetes'])

# create_std_feature data and mean data use Glucose
def demean(col):
    return col - col.mean()
data['Glucose_std'] = data.groupby('CategoricalAge')['Glucose'].transform(demean)
data['Insulin_std'] = data.groupby('CategoricalAge')['Insulin'].transform(demean)

# print(data.head())


# 从原始数据中分离输入特征x和输出y
from sklearn.utils import shuffle
data = shuffle(data)
y = data['Outcome'].values
X = data.drop('Outcome', axis = 1)

X_value_index_list_random = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin',
       'BMI', 'DiabetesPedigreeFunction', 'Age', 'Glucose_std',  'Insulin_std']
X_hot_index_list_random = ['CategoricalAge',
       'CategoricalPregnancies', 'CategoricalGlucose',
       'CategoricalBloodPressure', 'CategoricalSkinThickness',
       'CategoricalInsulin', 'CategoricalBMI', 'CategoricalDiabetes']
X_value = data[X_value_index_list_random]
X_hot = data[X_hot_index_list_random]

#将数据分割训练数据与测试数据
from sklearn.model_selection import train_test_split

# 随机采样20%的数据构建测试样本，其余作为训练样本
X_train, X_test,y_train, y_test = train_test_split(X, y, random_state=33, test_size=0.1,stratify=y)
# 数据归一化
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
# from sklearn.preprocessing import PolynomialFeatures
from scipy.sparse import hstack
# 数值型特征用归一化，归一化之后再用多项式拟合，类别特征用热编码，区分开来，在模型训练之前要合并
ss_X = StandardScaler()
# ss_y = StandardScaler()
one_hot_encoder = OneHotEncoder()
# poly = PolynomialFeatures(degree=2)
# 分别对训练和测试数据的特征以及目标值进行标准化处理
# print(X_train[X_value_index_list_random].describe())

X_value_train = ss_X.fit_transform(X_train[X_value_index_list_random])
X_value_test = ss_X.transform(X_test[X_value_index_list_random])
#
# X_value_train=poly.fit_transform(X_value_train)
# X_value_test = poly.transform(X_value_test)
#
X_hot_train=one_hot_encoder.fit_transform(X_train[X_hot_index_list_random])
X_hot_test=one_hot_encoder.transform(X_test[X_hot_index_list_random])

#将变换后的类别型变量和数值型变量串联
x_train = hstack((X_value_train,X_hot_train))
x_test = hstack((X_value_test,X_hot_test))

from sklearn.linear_model import LogisticRegression
lr= LogisticRegression()

# 交叉验证用于评估模型性能和进行参数调优（模型选择）
#分类任务中交叉验证缺省是采用StratifiedKFold
from sklearn.model_selection import cross_val_score
loss = cross_val_score(lr, x_train, y_train, cv=5, scoring='neg_log_loss')
print ('logloss of each fold is: ',-loss)
print('cv logloss is:', -loss.mean())
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
#需要调优的参数
# 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
#tuned_parameters = {'penalty':['l1','l2'],
#                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
#                   }
penaltys = ['l1','l2']
Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
# solvers=['newton-cg','lbfgs','liblinear','sag']
# multi_classes=['ovr','multinomial']
tuned_parameters = dict(penalty = penaltys, C = Cs)

lr_penalty= LogisticRegression()
grid= GridSearchCV(lr_penalty, tuned_parameters,cv=5, scoring='neg_log_loss')
grid.fit(x_train,y_train)
print(grid.cv_results_)
print(-grid.best_score_)
print(grid.best_params_)
y_train_pred=grid.predict(x_train)
y_test_pred=grid.predict(x_test)
print("accuracy score: ")
print(accuracy_score(y_test, y_test_pred))
print(classification_report(y_test, y_test_pred))

# plot CV误差曲线
test_means = grid.cv_results_['mean_test_score']
test_stds = grid.cv_results_['std_test_score']
train_means = grid.cv_results_['mean_train_score']
train_stds = grid.cv_results_['std_train_score']

# plot results
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
n_Cs = len(Cs)
number_penaltys = len(penaltys)
test_scores = np.array(test_means).reshape(n_Cs, number_penaltys)
train_scores = np.array(train_means).reshape(n_Cs, number_penaltys)
test_stds = np.array(test_stds).reshape(n_Cs, number_penaltys)
train_stds = np.array(train_stds).reshape(n_Cs, number_penaltys)

x_axis = np.log10(Cs)
for i, value in enumerate(penaltys):
    # pyplot.plot(log(Cs), test_scores[i], label= 'penalty:'   + str(value))
    ax1.errorbar(x_axis, test_scores[:, i], yerr=test_stds[:, i], label=penaltys[i] + ' Test')
    ax1.errorbar(x_axis, train_scores[:, i], yerr=train_stds[:, i], label=penaltys[i] + ' Train')

ax1.legend()
ax1.set_xlabel('log(C)')
ax1.set_ylabel('neg-logloss')
# pyplot.savefig('LogisticGridSearchCV_C.png')



#LinearSVC不能得到每类的概率，在Otto数据集要求输出每类的概率，这里只是示意SVM的使用方法
#https://xacecask2.gitbooks.io/scikit-learn-user-guide-chinese-version/content/sec1.4.html
#1.4.1.2. 得分与概率
print('LinearSVC().fit(x_train, y_train)')
from sklearn.svm import LinearSVC
from sklearn import metrics

SVC1 = LinearSVC().fit(x_train, y_train)
#在校验集上测试，估计模型性能
y_predict = SVC1.predict(x_test)

print("Classification report for classifier %s:\n%s\n"
      % (SVC1, metrics.classification_report(y_test, y_predict)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, y_predict))
# 线性SVM LinearSVC的需要调整正则超参数包括C（正则系数，一般在log域（取log后的值）均匀设置候选参数）和正则函数penalty（L2/L1）
# 采用交叉验证，网格搜索步骤与Logistic回归正则参数处理类似
penaltys = ['l2']
C_s = [x/10000 for x in range(1,101)]
print(C_s)
# solvers=['newton-cg','lbfgs','liblinear','sag']
# multi_classes=['ovr','multinomial']
# tuned_parameters = dict(penalty = penaltys, C = C_s)
tuned_parameters = dict( C = C_s,penalty=penaltys)
linersvc_penalty= LinearSVC()
grid= GridSearchCV(linersvc_penalty, tuned_parameters,cv=5, scoring='accuracy')
# grid=grid.best_estimator_
grid.fit(x_train,y_train)
y_train_pred=grid.predict(x_train)
y_test_pred=grid.predict(x_test)
print("Classification report for classifier %s:\n%s\n"
      % (SVC1, metrics.classification_report(y_test, y_predict)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, y_predict))
print('****************************************************************')
# print(grid.cv_results_)
print(grid.best_score_)
print(grid.best_params_)
x_axis = np.log10(C_s)
accuracy_s = grid.cv_results_['mean_test_score']
ax2.plot(x_axis, np.array(accuracy_s), 'b-')

ax2.legend()
ax2.set_xlabel('log(C)')
ax2.set_ylabel('accuracy')

# RBF核SVM正则参数调优
from sklearn.svm import SVC
C_s = [c/10 for c in range(1,21) ]# logspace(a,b,N)把10的a次方到10的b次方区间分成N份
gamma_s = [g/100 for g in range(1,11) ]
kernel_s=['rbf']
tuned_parameters = dict( C = C_s,gamma=gamma_s,kernel=kernel_s)
SVC2=SVC()
grid= GridSearchCV(SVC2, tuned_parameters,cv=5, scoring='accuracy')
# grid=grid.best_estimator_
grid.fit(x_train,y_train)
y_train_pred=grid.predict(x_train)
y_test_pred=grid.predict(x_test)
print("Classification report for classifier %s:\n%s\n"
      % (SVC2, metrics.classification_report(y_test, y_test_pred)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(y_test, y_test_pred))
print('****************************************************************')
# print(grid.cv_results_)
# print(grid.best_score_)
print(grid.best_params_)
accuracy_s = grid.cv_results_['mean_test_score']
accuracy_s1 =np.array(accuracy_s).reshape(len(C_s),len(gamma_s))
x_axis = np.log10(C_s)
for j, gamma in enumerate(gamma_s):
    ax3.plot(x_axis, np.array(accuracy_s1[:,j]))

ax3.legend(loc = 'best')
ax3.set_xlabel( 'log(C)' )
ax3.set_ylabel( 'accuracy' )

plt.show()