package com.shujia.mllib

import org.apache.spark.ml.classification.NaiveBayesModel
import org.apache.spark.ml.feature.{HashingTF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo10BayesPredict {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo10BayesPredict")
      .master("local[*]")
      .config("spark.sql.shuffle.partition", "16")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val commentSourceDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("comment_id String,comment_time String,like_cnt Int,comment_text String,user_id String,weibo_id String")
      .load("Spark/data/mllib/data/comment.csv")
      .select($"comment_id", $"comment_text")
      .where($"comment_text".isNotNull)
      .repartition(16)

    // 对每条数据进行分词
    val sentenceData: DataFrame = commentSourceDF
      .as[(String, String)]
      .map {
        case (comment_id: String, comment_text: String) =>
          (comment_id, comment_text, Demo08IK.fit(comment_text))
      }
      .filter(t3 => t3._3 != "")
      .toDF("comment_id", "comment_text", "sentence")

    // 英文分词器：可以将数据按照空格或者是标点进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(sentenceData)
    wordsData.show()

    // TF模型：计算词语出现的频率
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      .setNumFeatures(Math.pow(2, 18).toInt) // 数据集经过分词之后有多少个词语 就可以设置多少个特征 默认2的18次方

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    // 加载IDF模型，并计算IDF
    val idfModel: IDFModel = IDFModel.load("spark/data/mllib/bayes/idf")

    val rescaledData: DataFrame = idfModel.transform(featurizedData)

    // 加载Bayes模型，并对数据进行预测
    val bayesModel: NaiveBayesModel = NaiveBayesModel.load("spark/data/mllib/bayes/bModel")

    val transDF: DataFrame = bayesModel.transform(rescaledData)

    transDF.show(50)


  }

}
