"""
1991年全国30个省、自治区、直辖市城镇居民月平均消费
"""

from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
from time import time
import datetime
import numpy as np

# 载入数据
data = np.loadtxt(
    'data/personal consumption of the town dwellers.txt', delimiter='\t')
X = data[:27]
y = np.array([1]*20 + [2]*7)
X = StandardScaler().fit_transform(X)

X_train, X_test, Y_train, Y_test = train_test_split(
    X, y, test_size=0.2, random_state=32)
print("Train on {} samples, test on samples {}".format(
    X_train.shape[0], X_test.shape[0]))

print("Start to train")
kernel = ["linear", "poly", "rbf", "sigmoid"]
for kernel in kernel:
    t0 = time()
    clf = SVC(kernel=kernel,
              degree=1,
              gamma="auto",
              cache_size=50000).fit(X_train, Y_train)
    print("The accuracy under kernel %s is %f" %
          (kernel, clf.score(X_test, Y_test)))
    print(datetime.datetime.fromtimestamp(time()-t0).strftime("%M:%S:%f"))

print("Start to fine tune rbf parameter gamma")
time0 = time()
gamma_range = np.logspace(-10, 1, 20)
coef0_range = np.linspace(0, 5, 10)
param_grid = dict(gamma=gamma_range, coef0=coef0_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, random_state=420)
grid = GridSearchCV(SVC(kernel="poly", degree=1,
                        cache_size=50000), param_grid=param_grid, cv=cv)
grid.fit(X_train, Y_train)
print("The best parameters are %s with score %0.5f" %
      (grid.best_params_, grid.best_score_))
print(datetime.datetime.fromtimestamp(time()-time0).strftime("%M:%S:%f"))
