import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.Vectors

/**
 * 作者: LDL
 * 功能说明:
 * 创建日期: 2015/7/21 10:49
 */
object NaiveBayesTest extends App{
    System.setProperty("hadoop.home.dir", "F:\\tools\\hadoop-common-2.2.0-bin-master");
    val conf = new SparkConf().setMaster("local").setAppName("NativeBayes")
    val sc = new SparkContext(conf)
    val data = sc.textFile("F:\\tools\\spark-1.4.1\\data\\mllib\\sample_naive_bayes_data.txt")
    val parsedData = data.map(line=>{
        val parts = line.split(",")
        LabeledPoint(parts(0).toDouble,Vectors.dense(parts(1).split(" ").map(_.toDouble)))
    })

    val splits = parsedData.randomSplit(Array(0.6,0.4),seed = 11L)
    val training = splits(0)
    val test = splits(1)

    val model = NaiveBayes.train(training,lambda = 2.0)

    val predictionAndLabel = test.map(p=>(model.predict(p.features),p.label))

    val accuracy = 1.0 * predictionAndLabel.filter(x=>x._1==x._2).count()/test.count()

    println(accuracy)
}
