package com.shujia.mllib

import org.apache.spark.ml.classification.NaiveBayesModel
import org.apache.spark.ml.feature.{HashingTF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo09BayesPredict {
  def main(args: Array[String]): Unit = {

    // 使用训练好的贝叶斯模型预测MySQL中的微博评论数据
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo09BayesPredict")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 读取MySQL中的数据
    val commentDF: DataFrame = spark
      .read
      .format("jdbc")
      .option("url", "jdbc:mysql://rm-bp1h7v927zia3t8iwho.mysql.rds.aliyuncs.com:3306/weibospider?useSSL=false")
      .option("dbtable", "comment")
      .option("user", "shujia016")
      .option("password", "123456")
      .load()


    // 分词
    val ikFitDF: DataFrame = commentDF
      .map(row => {
        val comment_id: String = row.getAs[String]("comment_id")
        val comment_text: String = row.getAs[String]("comment_text")
        (comment_id, comment_text, Demo07IK.fit(comment_text))
      })
      .filter(_._3.nonEmpty)
      .map(t3 => (t3._1, t3._2, t3._3.mkString(" ")))
      .toDF("comment_id", "comment_text", "sentence")

    // TF-IDF
    // 使用英文分词器进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(ikFitDF)

    // 计算TF 词频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      .setNumFeatures(262144)

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    // 加载之前的IDF模型 对新的数据集进行transform
    val idfModel: IDFModel = IDFModel.load("Spark/data/mllib/idf")
      .setInputCol("rawFeatures")
      .setOutputCol("features")

    val rescaledData: DataFrame = idfModel.transform(featurizedData)

    // 加载贝叶斯模型
    val bayesModel: NaiveBayesModel = NaiveBayesModel.load("Spark/data/mllib/bayes")


    // 使用模型进行预测
    val resDF: DataFrame = bayesModel.transform(rescaledData)
    resDF.printSchema()

    resDF.show(false)

    /**
     * 基于最后输出结果中的probability列
     * 如果两个概率的差值小于50%则算为 中性 用 2 标记(即prediction为2)
     */

    resDF
      .select($"comment_id", $"comment_text", $"sentence", $"prediction")
      .write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("Spark/data/mllib/data/commentRes")


  }

}
