from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.mllib.linalg import Vector
from pyspark.mllib.linalg import Vectors
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.mllib.regression import LabeledPoint
import matplotlib.pyplot

APP_NAME = "MySparkApplication"

def tokenize(item):
    vector = Vectors.dense(float(item[0]),float(item[1]),float(item[2]),float(item[3]),float(item[4]),float(item[5]),float(item[6]),float(item[7]),float(item[8]),float(item[9]),float(item[10]),float(item[11]),float(item[12]),float(item[13]),float(item[14]),float(item[15]))

    if item[16]=='0':
        label = 0.0
    #elif item[16] =='1':
        #label = 1.0
    #elif item[16] =='2':
        #label = 2.0
    else:
        label = 3.0

    item = LabeledPoint(label,vector)
    return item

def main(sc):
    train_lines = sc.textFile('train.data')
    train_lines = train_lines.map(lambda item: item.split(','))
    train_points = train_lines.map(lambda item:tokenize(item))
    print(train_points)

    test_lines = sc.textFile('test.data')
    test_lines = test_lines.map(lambda item: item.split(','))
    test_points = test_lines.map(lambda item:tokenize(item))
    print(test_points)

    #splits = iris_points.randomSplit([0.7,0.3])
    training = train_points
    testing = test_points

    print('训练集的个数是；',training.count())
    print('测试集的个数是：',testing.count())
    model = RandomForest.trainClassifier(training, numClasses=4, categoricalFeaturesInfo={},
                                     numTrees=5,featureSubsetStrategy="auto",
                                     impurity='gini', maxDepth=6, maxBins=32)
    # Evaluate model on test instances and compute test error 评估模型
    predictions = model.predict(testing.map(lambda x: x.features))
    labelsAndPredictions = testing.map(lambda lp: lp.label).zip(predictions)
    result = labelsAndPredictions.collect()
    for i in result:
        print(i[1])
    testCrt = labelsAndPredictions.filter(
        lambda lp: lp[0] == lp[1]).count() / float(testing.count())
    print('Test Correct = ' + str(testCrt))
    print('Learned classification forest model:')
    print(model.toDebugString())

    #model.save(sc, "myRandomForestClassificationModel")
    #sameModel = RandomForestModel.load(sc, "myRandomForestClassificationModel")
    #print(sameModel.predict(testing.collect()[0].features) )


    sc.stop()

if __name__ == "__main__":
    conf = SparkConf().setMaster('local[*]')
    conf = conf.setAppName(APP_NAME)
    sc = SparkContext(conf=conf)
    main(sc)
