# -*- coding: utf-8 -*-
# __author__ = 'Yuanjiang Huang'
# yuanjiang.huang@socialcredits.cn

import sys, os
from pyspark import SparkContext
from pyspark import SparkConf
from sklearn.tree import DecisionTreeClassifier


def model_a(para):
	return {'model name': 'model a', 'score':para[0][2], 'model': DecisionTreeClassifier()}


def model_b(para):
	return {'model name': 'model b', 'score':para[0][2], 'model': DecisionTreeClassifier()}


def select_model(para):
	if para.keys()[0] == 'model_a':
		return model_a(para.values())
	if para.keys()[0] == 'model_b':
		return model_b(para.values())


def main():

	master_url = 'spark://172.17.0.3:7077'
	# master_url = 'spark://192.168.31.150:7070'
	conf = SparkConf().setMaster(master_url).setAppName('Demo')
	sc = SparkContext(conf=conf)
	# sc = SparkContext()
	parameters = [{'model_a':[1,2,0.8]},{'model_a':[5,6,0.9]},{'model_b':[3,4,0.6]}]
	para_RDD = sc.parallelize(parameters)
	precision = para_RDD.map(lambda a: select_model(a)['score'])
	print '*'*200
	print precision.collect()
	print '*'*200
	sc.stop()

if __name__ == "__main__":
	main()