# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 13:43:20 2018

@author: mojm
"""
import numpy as np
import sklearn.datasets
import matplotlib.pyplot as plt
import sklearn.linear_model
import sklearn.metrics

def plot_decision_boundary(pred_func):  
    # 设定最大最小值，附加一点点边缘填充  
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5  
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5  
    h = 0.01  
  
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))  
  
    # 用预测函数预测一下  
    Z = pred_func(np.c_[xx.ravel(), yy.ravel()])  
    Z = Z.reshape(xx.shape)  
  
    # 然后画出图  
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)  
    plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)

n_samples=600

X, y = sklearn.datasets.make_moons(n_samples, noise=0.20)

sampleRatio = 0.9
targetRatio = round(1-sampleRatio, 2)
sampleBoundary = int(n_samples*sampleRatio)

#test_features = X[shuffleIdx[sampleBoundary:]]
#test_targets = y[shuffleIdx[sampleBoundary:]]

training_features = X[:sampleBoundary]
training_targets = y[:sampleBoundary]

test_features = X[sampleBoundary:]
test_targets = y[sampleBoundary:]

#svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
#svr_lin = SVR(kernel='linear', C=1e3)
#svr_poly = SVR(kernel='poly', C=1e3, degree=2)
'''
SVC参数解释 
（1）C: 目标函数的惩罚系数C，用来平衡分类间隔margin和错分样本的，default C = 1.0； 
（2）kernel：参数选择有RBF(高斯核), Linear, Poly, Sigmoid, 默认的是"RBF"; 
（3）degree：if you choose 'Poly' in param 2, this is effective, degree决定了多项式的最高次幂； 
（4）gamma：核函数的系数('Poly', 'RBF' and 'Sigmoid'), 默认是gamma = 1 / n_features; 
（5）coef0：核函数中的独立项，'RBF' and 'Poly'有效； 
（6）probablity: 可能性估计是否使用(true or false)； 
（7）shrinking：是否进行启发式； 
（8）tol（default = 1e - 3）: svm结束标准的精度; 
（9）cache_size: 制定训练所需要的内存（以MB为单位）； 
（10）class_weight: 每个类所占据的权重，不同的类设置不同的惩罚参数C, 缺省的话自适应； 
（11）verbose: 跟多线程有关，不大明白啥意思具体； 
（12）max_iter: 最大迭代次数，default = 1， if max_iter = -1, no limited; 
（13）decision_function_shape ： ‘ovo’ 一对一, ‘ovr’ 多对多  or None 无, default=None 
（14）random_state ：用于概率估计的数据重排时的伪随机数生成器的种子。 
 ps：7,8,9一般不考虑。 
'''
svc = sklearn.svm.SVC()
logisticRegression = sklearn.linear_model.LogisticRegression()

svc.fit(training_features, training_targets)
logisticRegression.fit(training_features, training_targets)

decision_func = svc.decision_function
print 'decision func is ... ' + str(decision_func)
predict_targets = svc.predict(test_features)
predict_targets_lr = logisticRegression.predict(test_features)

plot_decision_boundary(lambda x: svc.predict(x))
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)

accuracy = sklearn.metrics.accuracy_score(test_targets, predict_targets)
print ('accuracy is ...' + str(accuracy))

#plt.scatter(range(int(n_samples*targetRatio)), predict_targets, color='red')