package com.shujia.spark.mllib

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.mutable.ListBuffer

object Demo06BayesModelTrain {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local[*]") // 设置运行的方式
      .config("spark.sql.shuffle.partitions", "16")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val rdd: RDD[String] = spark
      .sparkContext
      .textFile("spark/data/mllib/data/bayesTrain.txt")

    val stopWordsRDD: RDD[String] = spark
      .sparkContext
      .textFile("spark/data/mllib/data/stopwords.dic")

    val stopWordsSet: Set[String] = stopWordsRDD.collect().toSet
    val stopWordSetBro: Broadcast[Set[String]] = spark.sparkContext.broadcast(stopWordsSet)

    val sourceDF: DataFrame = rdd
      .map(line => {
        val splits: Array[String] = line.split("\t")
        val lable: Double = splits(0).toDouble
        val wordsList: List[String] = Demo05JiebaUtil.fit(splits(1))
        val lb: ListBuffer[String] = ListBuffer[String]()
        wordsList.foreach(word => {
          if (!stopWordSetBro.value.contains(word.trim)) {
            lb.append(word.trim)
          }
        })
        (lable, lb.mkString(" "))
      })
      .toDF("label", "sentence")
      .where(trim($"sentence") =!= "")

        // 英文分词器，会对标点符号以及空格进行分词
        val tokenizer: Tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
        val wordsData: DataFrame = tokenizer.transform(sourceDF)

        // 使用TF-IDF评估每个词语的重要程度
        // 统计每个词语的TF-IDF
        val hashingTF: HashingTF = new HashingTF()
          .setInputCol("words")
          .setOutputCol("rawFeatures")
          // 设置特征的数量，默认是：2*18 = 262144
    //      .setNumFeatures(20)

        val featurizedData: DataFrame = hashingTF.transform(wordsData)
        // alternatively, CountVectorizer can also be used to get term frequency vectors

        val idf: IDF = new IDF().setInputCol("rawFeatures").setOutputCol("features")
        val idfModel: IDFModel = idf.fit(featurizedData)

        val rescaledData: DataFrame = idfModel.transform(featurizedData)

        val Array(trainingData, testData)  = rescaledData.randomSplit(Array(0.8,0.2))

        // Train a NaiveBayes model.
        val model: NaiveBayesModel = new NaiveBayes()
          .fit(trainingData)

        // Select example rows to display.
        val predictions: DataFrame = model.transform(testData)

        predictions.cache()
        println(s"模型的准确率为：${predictions.where($"label" === $"prediction").count().toDouble / predictions.count()}")

        predictions.show()


        predictions.unpersist()


  }

}
