package com.shujia.mllib

import org.apache.spark.ml.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

import scala.collection.mutable

object Demo08Bayes {
  def main(args: Array[String]): Unit = {
    // 1、读取数据 并进行数据特征工程处理 （使用TF-IDF）
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo08Bayes")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val sourceDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("label Double,text String")
      .load("Spark/data/mllib/data/bayesTrain.txt")
      .repartition(8)

    val fitDF: DataFrame = sourceDF
      .as[(Double, String)]
      .map {
        case (label: Double, text: String) =>
          (label, text, Demo07IK.fit(text))
      }
      .filter(_._3.nonEmpty)
      // 将分词后得到的List按空格拼接 方便带入英文分词器
      .map(t3 => (t3._1, t3._2, t3._3.mkString(" ")))
      .toDF("label", "text", "sentence")

    // 使用英文分词器进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(fitDF)

    //    wordsData.show(100, truncate = false)

    // 计算TF 词频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words")
      .setOutputCol("rawFeatures")
      .setNumFeatures(262144) // 根据词语的数量去设置 默认是 2的18次方

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    //    featurizedData.show(100, truncate = false)


    // alternatively, CountVectorizer can also be used to get term frequency vectors

    val idf: IDF = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel: IDFModel = idf.fit(featurizedData)

    val rescaledData: DataFrame = idfModel.transform(featurizedData)

    // 后续如果有新的文本数据过来要进行预测 也需要做同样的特征工程
    idfModel.save("Spark/data/mllib/idf")

    //    rescaledData.cache()
    //
    //    val totalCnt: Long = rescaledData.count()
    //    println("总的文档的数量:" + totalCnt)

    //    rescaledData.printSchema()

    // 统计包含 ”爱你“ 这个词语的文档的数量
    //    val cnt: Long = rescaledData
    //      .filter(row => {
    //        val words: mutable.WrappedArray[String] = row.getAs[mutable.WrappedArray[String]]("words")
    //        words.contains("爱你")
    //      })
    //      .count()
    //    println("包含‘爱你’这个词语的文档数量：" + cnt)
    //    println("'爱你'这个词语的IDF:" + Math.log((totalCnt + 1) / (cnt + 1)))

    //    rescaledData.show(100, truncate = false)

    // 2、切分数据集
    val splits: Array[Dataset[Row]] = rescaledData.randomSplit(Array(0.7, 0.3))
    val trainDF: Dataset[Row] = splits(0)
    val testDF: Dataset[Row] = splits(1)

    // 3、选择合适的模型===> NaiveBayes
    val bayes: NaiveBayes = new NaiveBayes()

    // 4、将训练集带入模型
    val bayesModel: NaiveBayesModel = bayes.fit(trainDF)

    // 5、使用测试集评估模型
    val resDF: DataFrame = bayesModel.transform(testDF)

    resDF
      .select(sum(when($"label" === $"prediction", 1).otherwise(0)) / count("*"))
      .show()

    // 6、如果模型通过评估 即可将模型进行保存
    bayesModel.save("Spark/data/mllib/bayes")


  }

}
