package com.study.spark.scala.ml.classification

import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 朴素贝叶斯算法
 * https://www.cnblogs.com/wuwuwu/p/6162602.html
  *
  * @author: stephen.shen
  * @create: 2019-03-22 9:35
  */
object NaiveBayesDemo {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("Naive Bayes Demo")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    // 读取数据
    val data = sc.textFile("D:\\CodeDir\\BigdataWork\\bigdata-study\\study-spark\\src\\main\\resource\\data\\baive-bayes.data")
    // 转换成需要的格式
    val parsedData = data.map(line => {
      val parts = line.split(",")
      LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(" ").map(_.toDouble)))
    })
    // 把数据的60%作为训练集，40%作为测试集
    val splits = parsedData.randomSplit((Array(0.6,0.4)),seed = 11L)
    // 得到训练集和测试集
    val training = splits(0)
    val test = splits(1)
    // 获得训练模型，第一参数为数据，第二参数为平滑参数，默认为为1
    val model = NaiveBayes.train(training,1.0)
    // 对模型进行准确度分析
    val predictionAndLabel = test.map(p=>(model.predict(p.features),p.label))
    // 准确度
    val accuracy = 1.0 * predictionAndLabel.filter(x=>x._1==x._2).count()/test.count()

    println(s"accuracy ==> $accuracy")
    println("Prediction of (0.0, 2.0, 0.0, 1.0) ==> "+model.predict(Vectors.dense(0.0,1.0,1.0,0.0)))

    sc.stop()
  }
}
