package com.shujia.spark.mllib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07BayesModel {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo07BayesModel")
      .master("local[*]")
      .getOrCreate()

    import spark.implicits._

    val bayesTrainRDD: RDD[String] = spark.sparkContext
      .textFile("spark/data/mllib/data/bayesTrain.txt")

    val sentenceData: DataFrame = bayesTrainRDD
      .map(line => {
        val splits: Array[String] = line.split("\t")
        val label: Double = splits(0).toDouble
        // 对每一句文本使用IK分词器进行分词处理
        val wordStr: String = Demo06IK.fit(splits(1))
        (label, wordStr)
      })
      // 过滤没有分词结果或者分词结果为空的数据
      .filter(t2 => t2._2 != "" && t2._2 != null)
      .toDF("label", "sentence")

    // Tokenizer英文的分词器，可以将一句话通过标点符号以及空格进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(sentenceData)


    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      // 默认2的18次方：262144
      .setNumFeatures(Math.pow(2, 18).toInt) // 设置特征的数量，会决定向量的size，最终影响“大字典”的大小

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    val idf: IDF = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel: IDFModel = idf.fit(featurizedData)

    val rescaledData: DataFrame = idfModel.transform(featurizedData)

    // Split the data into training and test sets (30% held out for testing)
    val Array(trainingData, testData) = rescaledData.randomSplit(Array(0.8, 0.2))

    // Train a NaiveBayes model.
    val model: NaiveBayesModel = new NaiveBayes()
      .fit(trainingData)

    // Select example rows to display.
    val predictions: DataFrame = model.transform(testData)
    predictions.show(100, truncate = false)

    println(s"贝叶斯模型的准确率为：${1 - predictions.where($"label" =!= $"prediction").count() / testData.count().toDouble}")


    // 使用模型对新的数据进行预测
    val commentRDD: RDD[String] = spark
      .sparkContext
      .textFile("spark/data/mllib/data/comment.csv")

    val commentDF: DataFrame = commentRDD.map(line => {
      val splits: Array[String] = line.split(",")
      Demo06IK.fit(splits(3))
    }).toDF("sentence")

    val tokenizerCommentDF: DataFrame = tokenizer.transform(commentDF)
    val featurizedCommentDF: DataFrame = hashingTF.transform(tokenizerCommentDF)
    val rescaledCommentDF: DataFrame = idfModel.transform(featurizedCommentDF)

    val commentPredictDF: DataFrame = model.transform(rescaledCommentDF)
    commentPredictDF.show(100, truncate = false)
  }

}
