package com.shujia.mllib

import org.apache.spark.ml.classification.NaiveBayesModel
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, DataFrameReader, SparkSession}

object Demo12BayesPredict {
  def main(args: Array[String]): Unit = {

    // 构建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo12BayesPredict")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8")
      .getOrCreate()

    // 1、加载数据 进行数据特征工程处理
    val commentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("comment_id String,comment_time String,like_cnt Int,comment String,blog_id String,user_id String")
      .load("Spark/data/mllib/data/comment.csv")
    commentDF.show()
    import spark.implicits._
    val wordsDF: DataFrame = commentDF
      .select("comment")
      .as[String]
      .map {
        case comment: String =>
          Tuple1(Demo08IK.fit(comment).mkString(" ").stripMargin)
      }
      .filter(t1 => {
        !"".equals(t1._1)
      })
      .toDF("words_str")


    // 使用英文分词器进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("words_str").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(wordsDF)

    // 构建TF模型 计算词频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words") // 指定输入的列
      .setOutputCol("rawFeatures") // 指定输出的列名
      .setNumFeatures(262144) // 设置特征数量 即单词数量

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    // alternatively, CountVectorizer can also be used to get term frequency vectors

    // 加载IDF模型
    val idf: IDFModel = IDFModel.load("Spark/data/mllib/idf")
    val preDF: DataFrame = idf.transform(featurizedData)

    // 使用贝叶斯模型进行预测
    val bayesModel: NaiveBayesModel = NaiveBayesModel.load("Spark/data/mllib/bayes")

    bayesModel.transform(preDF).show()
  }

}
