# -*- coding: utf-8 -*-
# __author__ = 'Yuanjiang Huang'
# yuanjiang.huang@socialcredits.cn

import sys, os
from pyspark.mllib.classification import SVMWithSGD, SVMModel
from pyspark.mllib.regression import LabeledPoint
from pyspark import SparkContext
from pyspark import SparkConf
CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + '/'
os.environ['SPARK_HOME'] = "/home/scdev/spark-1.5.2/"

def parsePoint(line):
    values = [float(x) for x in line.split(' ')]
    return LabeledPoint(values[0], values[1:])

if __name__ == "__main__":

	master_url = 'local' #one worker thread
	conf = SparkConf().setMaster(master_url).setAppName('Demo')
	sc = SparkContext(conf=conf)

	data = sc.textFile('file:///'+CURRENT_PATH+"data/sample_svm_data.txt")
	parsedData = data.map(parsePoint)

	# Build the model
	model = SVMWithSGD.train(parsedData, iterations=100)

	# Evaluating the model on training data
	labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
	trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsedData.count())
	print '*'*200
	print("Training Error = " + str(trainErr))
	print '*'*200

	# # Save and load model
	model.save(sc, 'file:///'+CURRENT_PATH+"myModelPath")
	sameModel = SVMModel.load(sc, 'file:///'+CURRENT_PATH+"myModelPath")