from pyspark.mllib.linalg import Vector
from pyspark.mllib.linalg import Vectors
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.evaluation import MulticlassMetrics

APP_NAME = "MySparkApplication"

def tokenize(item):
    vector = Vectors.dense(float(item[0]),float(item[1]),float(item[2]),float(item[3]))
    if item[4]=='Iris-setosa':
        label = 0.0
    elif item[4] =='Iris-versicolor':
        label = 1.0
    else:
        label = 2.0

    item = LabeledPoint(label,vector)
    return item

def main(sc):
    iris_lines = sc.textFile('iris.data')
    iris_lines = iris_lines.map(lambda item: item.split(','))
    iris_points = iris_lines.map(lambda item:tokenize(item))
    print(iris_points)

    splits = iris_points.randomSplit([0.7,0.3],11)
    training = splits[0]
    testing = splits[1]

    print('训练集的个数是；',training.count())
    print('测试集的个数是：',testing.count())
    model = NaiveBayes.train(training,0.1)

    predictionAndLabel = testing.map(lambda p: (model.predict(p.features),p.label))
    accuracy = 1.0*predictionAndLabel.filter(lambda x: x[0] == x[1]).count()/testing.count()
    print('正确率是:',accuracy)
    sc.stop()

if __name__ == "__main__":
    conf = SparkConf().setMaster('local[*]')
    conf = conf.setAppName(APP_NAME)
    sc = SparkContext(conf=conf)
    main(sc)