import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler

from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegressionCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC

from sklearn.cross_validation import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report

data = pd.read_csv("./data/diabetes.csv")
X = data.drop("Outcome", axis=1)
y = data["Outcome"].values
# 分离训练数据和测试数据
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=54)
print(X_train.shape, X_test.shape)

# 用平均值代替缺省数据
imp = Imputer(missing_values=0, strategy='mean', axis=0)
X_train = imp.fit_transform(X_train)
X_test = imp.transform(X_test)


# 分别对训练和测试数据的特征进行标准化处理
ss_X = StandardScaler()
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.fit_transform(X_test)


# Logistic Regression回归
lr = LogisticRegression()
loss = cross_val_score(lr, X_train, y_train, cv=5, scoring='neg_log_loss')
print('logloss of each fold is: %s' % -loss)
print('cv logloss is:%s' % -loss.mean())


Cs = np.logspace(-3, 6, 10)
lrcv_L1 = LogisticRegressionCV(Cs=Cs, cv=5, scoring='neg_log_loss', penalty='l1', solver='liblinear', multi_class='ovr')
lrcv_L1.fit(X_train, y_train)
mse_mean = -np.mean(lrcv_L1.scores_[1], axis=0)
print('L1 C is:', lrcv_L1.C_)
print("L1 loss:", mse_mean)

n_Cs = len(Cs)
plt.plot(np.log10(Cs), mse_mean.reshape(n_Cs, 1))
plt.xlabel('L1 log(C)')
plt.ylabel('L1 neg-logloss')
plt.show()

lrcv_L2 = LogisticRegressionCV(Cs=Cs, cv=5, scoring='neg_log_loss', penalty='l2', solver='liblinear', multi_class='ovr')
lrcv_L2.fit(X_train, y_train)
mse_mean = -np.mean(lrcv_L2.scores_[1], axis=0)
print('L2 C is:', lrcv_L2.C_)
print("L2 loss:", mse_mean)

plt.plot(np.log10(Cs), mse_mean.reshape(n_Cs, 1))
plt.xlabel('L2 log(C)')
plt.ylabel('L2 neg-logloss')
plt.show()


# GridSearchCV
penaltys = ['l1', 'l2']
tuned_parameters = dict(penalty=penaltys, C=Cs)
lr_penalty = LogisticRegression()
grid = GridSearchCV(lr_penalty, tuned_parameters, cv=5, scoring='neg_log_loss')
grid.fit(X_train, y_train)
print("GridSearchCV best_score:", -grid.best_score_)
print("GridSearchCV best_params:", grid.best_params_)

#
# SVC1 = LinearSVC().fit(X_train, y_train)
# y_predict = SVC1.predict(X_test)
# print(SVC1.score(X_test, y_test))
# print("Classification report for classifier %s:\n%s\n" % (SVC1, classification_report(y_test, y_predict)))
# print("Confusion matrix:\n%s" % confusion_matrix(y_test, y_predict))


def fit_grid_point_Linear(C, X_train, y_train, X_val, y_val):
    # 在训练集是那个利用SVC训练
    SVC2 = LinearSVC(C=C)
    SVC2.fit(X_train, y_train)
    # 在校验集上返回accuracy
    accuracy = SVC2.score(X_val, y_val)
    # print("accuracy: {}".format(accuracy))
    return accuracy


# 需要调优的参数
C_s = np.logspace(-3, 6, 10)  # logspace(a,b,N)把10的a次方到10的b次方区间分成N份
accuracy_s = []
for i, oneC in enumerate(C_s):
    #    for j, penalty in enumerate(penalty_s):
    tmp = fit_grid_point_Linear(oneC, X_train, y_train, X_test, y_test)
    accuracy_s.append(tmp)
print("LinearSVC max accuracy:",np.max(accuracy_s))

x_axis = np.log10(C_s)
# for j, penalty in enumerate(penalty_s):
plt.plot(x_axis, np.array(accuracy_s), 'b-')
plt.legend()
plt.xlabel('log(C)')
plt.ylabel('accuracy')
plt.show()



def fit_grid_point_RBF(C, gamma, X_train, y_train, X_val, y_val):
    # 在训练集是那个利用SVC训练
    SVC3 = SVC(C=C, kernel='rbf', gamma=gamma)
    SVC3.fit(X_train, y_train)
    # 在校验集上返回accuracy
    accuracy = SVC3.score(X_val, y_val)
    # print("accuracy: {}".format(accuracy))
    return accuracy

# 需要调优的参数
C_s = np.logspace(0, 4, 5) # logspace(a,b,N)把10的a次方到10的b次方区间分成N份
gamma_s = np.logspace(-4, 0, 5)

accuracy_s = []
for i, oneC in enumerate(C_s):
    for j, gamma in enumerate(gamma_s):
        tmp = fit_grid_point_RBF(oneC, gamma, X_train, y_train, X_test, y_test)
        accuracy_s.append(tmp)
accuracy_s1 = np.array(accuracy_s).reshape(len(C_s), len(gamma_s))
print("RBF SVM max accuracy:", np.max(accuracy_s))
x_axis = np.log10(C_s)
for j, gamma in enumerate(gamma_s):
    plt.plot(x_axis, np.array(accuracy_s1[:, j]), label=' Test - log(gamma)' + str(np.log10(gamma)))

plt.legend()
plt.xlabel('log(C)')
plt.ylabel('accuracy')
plt.show()
