#!/usr/bin/env python 
#-*-encoding:utf8-*

from sklearn import svm
from matplotlib import pyplot as plt
import pickle


def SVM_Regression(X,Y):

    if not X or not Y:
        X=[[1,2,1,1],[3,4,3,2],[3,6,4,5],[2.5,4,4.5,6.3],[6,7,6,8.2],[1,2.1,3,1.2]]
        Y=[3,4,4,3,6,10]

    print 'train dataset:-----\n',X
    print '\ntrain target dataset:------\n',Y

    #create model based on different kernel
    svr_rbf = svm.SVR(kernel='rbf', C=1e3, gamma=0.1)
    svr_lin = svm.SVR(kernel='linear', C=1e3)
    svr_poly = svm.SVR(kernel='poly', C=1e3, degree=2)

    #choose model kernel.
    regsvm = svr_lin

    reg_model = regsvm.fit(X,Y)

    #print reg_model
    res = reg_model.predict(X)
    print '\nresults of self-verification using train dataset X:\n',res

    plt.scatter(Y, res, c='k', label='data')
    plt.hold(True)
    #print Y
    plt.plot(Y, res, c='g', label='model')
    plt.xlabel('data')
    plt.ylabel('target')
    plt.title('Support Vector Regression')
    plt.legend()
    plt.savefig('pltfig')
    #plt.show()


    #persistence based on dump.
    dumped_model = pickle.dumps(reg_model)
    return dumped_model 


def prediction(pred_feature_list,model_to_dump):
    loads_model = pickle.loads(model_to_dump)
    if not pred_feature_list:
        pred_feature_list = [199, 36, 74, 149, 160, 799, 43, 156, 28, 40, 13, 26, 1, 1, 2, 163, 19, 24, 8, 64, 0, 112, 0, 73, 36, 103, 70, 53, 18, 9, 30, 126]
    
    return loads_model.predict(pred_feature_list)

if __name__ == '__main__':
    SVM_Regression([],[])
