import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import scala.collection.mutable

object Test01 {
  def main(args: Array[String]): Unit = {

    // 构建SparkSession
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Test01")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8")
      .getOrCreate()

    // 加载idf计算过后的数据

    val rescaledData: DataFrame = spark
      .read
      .format("parquet")
      .load("Spark/data/mllib/data/rescaledData")
    import spark.implicits._
    rescaledData.show(truncate = false)
    rescaledData.printSchema()

    val cnt: Long = rescaledData
      .select($"words")
      .filter(row => {
        val words: mutable.WrappedArray[String] = row.getAs[mutable.WrappedArray[String]]("words")
        words.toSet.contains("爱你")
      })
      .count()
    println(cnt)

    // 总的文档数量
    val total_cnt: Long = rescaledData.count()

    //    val containsCnt: Long = rescaledData
    //      .where($"words_str".contains("爱你"))
    //      .count()

    println(s"包含'爱你'这个词语的文档的数量：$cnt")
    println(s"总的文档的数量：$total_cnt")
    println(s"'爱你'这个词语的IDF:${Math.log((total_cnt + 1).toDouble / (cnt + 1))}")

  }

}
