"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================

Toy example of 1D regression using linear, polynomial and RBF kernels.

"""
print(__doc__)

import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing

# #############################################################################
# Generate sample data
# X = np.sort(5 * np.random.rand(40, 1), axis=0)
# y = np.sin(X).ravel()

data = np.loadtxt(".\\Training.csv", dtype=np.float, delimiter=',')

X = np.c_[data[:,0:1],data[:,2:7]]
Y = data[:,1]
arTrnX, arX, arTrnY, arY = train_test_split(X, Y, test_size=0.6, random_state=0)
arTstX, arVldX, arTstY, arVldY = train_test_split(arX, arY, test_size=0.5, random_state=0)
# arTrainX = np.c_[data[0:200,0:1],data[0:200,2:7]]
# arTrainY = data[0:200,1]


scalerX = preprocessing.StandardScaler().fit(arTrnX)

arTrnX_Std = scalerX.transform(arTrnX)
arVldX_Std = scalerX.transform(arVldX)
arTstX_Std = scalerX.transform(arTstX)

# clf = svm.SVC(C=1).fit(X_Trn_transformed, y_Trn)
# X_test_transformed = scalerX.transform(X_test)
# clf.score(X_test_transformed, y_test)

# scalerY = preprocessing.StandardScaler().fit(arTrnY)
#
# arTrnY_Std = scalerX.transform(arTrnY)
# arTstY_Std = scalerX.transform(arTstY)



# arTstX = np.c_[data[0:200,0:1],data[0:200,2:7]]
# arTstY = data[0:200,1]




# #############################################################################
# Add noise to targets
# y[::5] += 3 * (0.5 - np.random.rand(8))

# #############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1)
svr_lin = SVR(kernel='linear', C=100, gamma='auto')
svr_poly = SVR(kernel='poly', C=100, gamma='auto', degree=3, epsilon=.1,
               coef0=1)

# #############################################################################
# Look at the results
lw = 2

svrs = [svr_rbf, svr_lin, svr_poly]
kernel_label = ['RBF', 'Linear', 'Polynomial']
model_color = ['m', 'c', 'g']

rg = svr_rbf.fit(arTrnX_Std, arTrnY)

arVldPreY = rg.predict(arVldX_Std)

arTstPreY = rg.predict(arTstX_Std)

# arTstPreY = scalerY.inverse_transform(arTstPreY_Std)

fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(15, 10), sharey=True)
# fig, ax = plt.subplots()
ax[0].scatter(arTstY,arTstPreY)
ax[1].scatter(arVldY,arVldPreY)
ax[0].set_xlim(min(arY)-0.5,max(arY)+0.5)
ax[0].set_ylim(min(arY)-0.5,max(arY)+0.5)
plt.show()

# for ix, svr in enumerate(svrs):
# #
# #     axes[ix].plot(data[200:298,6], svr.fit(arTrainX, arTrainY).predict(arTestX), color=model_color[ix], lw=lw,
# #                   label='{} model'.format(kernel_label[ix]))
# #     # axes[ix].scatter(X[svr.support_], y[svr.support_], facecolor="none",
# #     #                  edgecolor=model_color[ix], s=50,
# #     #                  label='{} support vectors'.format(kernel_label[ix]))
# #     # axes[ix].scatter(X[np.setdiff1d(np.arange(len(X)), svr.support_)],
# #     #                  y[np.setdiff1d(np.arange(len(X)), svr.support_)],
# #     #                  facecolor="none", edgecolor="k", s=50,
# #     #                  label='other training data')
# #     # axes[ix].legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),
# #     #                 ncol=1, fancybox=True, shadow=True)
# #
# # fig.text(0.5, 0.04, 'data', ha='center', va='center')
# # fig.text(0.06, 0.5, 'target', ha='center', va='center', rotation='vertical')
# # fig.suptitle("Support Vector Regression", fontsize=14)
# # plt.show()
