package org.zjt.spark.book

import java.io.File

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.util.MLUtils
import org.zjt.spark.book.MyNaiveBayes.modelFile

/**
  * 朴素贝叶斯 ： 分类算法
  */
object NaiveBayesExample {

  val modelFile = "/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/target/tmp/myNaiveBayesModel"
  val sourceFile = "/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/src/main/resource/sample_libsvm_data.txt"
  val modelType = Array("multinomial", "bernoulli")

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("NaiveBayesExample").setMaster("local[2]")
    val sc = new SparkContext(conf)


    // Load and parse the data file.
    // data: RDD[LabeledPoint] -> (label, 稀疏向量)
    val data: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, sourceFile)


    // Split data into training (60%) and test (40%).
    val Array(training, test) = data.randomSplit(Array(0.6, 0.4))
    val model = NaiveBayes.train(training, lambda = 1.0, modelType = modelType(0))
    val predictionAndLabel = test.map(p => (model.predict(p.features), p.label))


    //测试模型的准确性
    val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / test.count()
    println(accuracy)

    val file = new File("/Users/zhangjuntao/IdeaProjects/myproject/hw-bigdata/scala-demo/target/tmp")
    if(file.exists())
      file.delete()
    // 保存模型
    model.save(sc, modelFile)


    //加载模型
    val sameModel: NaiveBayesModel = NaiveBayesModel.load(sc, modelFile)

    sc.stop()
  }
}
