package com.shujia.mllib

import org.apache.spark.ml.feature.{HashingTF, IDF, IDFModel, Tokenizer}
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}

object Demo10BayesPrePro {
  def main(args: Array[String]): Unit = {
    // 构建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo10BayesPrePro")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions

    // 1、读取文件 进行数据特征工程
    val bayesTrainDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("label Double,sentence String")
      .load("Spark/data/mllib/data/bayesTrain.txt")

    // 对每一句话进行分词
    val ikDF: DataFrame = bayesTrainDF
      .as[(Double, String)]
      .map {
        case (label: Double, sentence: String) =>
          (label, sentence, Demo08IK.fit(sentence).mkString(" ").stripMargin)
      }
      .filter(t3 => {
        !"".equals(t3._3)
      })
      .toDF("label", "sentence", "words_str")


    // 使用英文分词器进行分词
    val tokenizer: Tokenizer = new Tokenizer().setInputCol("words_str").setOutputCol("words")
    val wordsData: DataFrame = tokenizer.transform(ikDF)

    // 构建TF模型 计算词频
    val hashingTF: HashingTF = new HashingTF()
      .setInputCol("words") // 指定输入的列
      .setOutputCol("rawFeatures") // 指定输出的列名
      .setNumFeatures(262144) // 设置特征数量 即单词数量

    val featurizedData: DataFrame = hashingTF.transform(wordsData)

    // alternatively, CountVectorizer can also be used to get term frequency vectors

    // 创建IDF模型
    val idf: IDF = new IDF()
      .setInputCol("rawFeatures")
      .setOutputCol("features")
    val idfModel: IDFModel = idf.fit(featurizedData)
    val rescaledData: DataFrame = idfModel.transform(featurizedData)
    //    rescaledData.show(2000, truncate = false)

    idfModel.write.overwrite().save("Spark/data/mllib/idf")

    rescaledData
      .write
      .format("parquet")
      .mode(SaveMode.Overwrite)
      .save("Spark/data/mllib/data/rescaledData")

    //    rescaledData
    //      .where($"words_str".contains("中国"))
    //      .show()

  }

}
