package cn.doitedu.ml.tfidf

import org.apache.spark.sql.SparkSession

object TFIDFHand {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("")
      .master("local")
      .getOrCreate()

    import spark.implicits._

    // 加载样本
    val docs = spark.read.option("header", "true").csv("userprofile/data/tfidf")
    val total = docs.count()

    /**
     * +-----+----+
     * |docid|word|
     * +-----+----+
     * |doc0 |苹果|
     * |doc0 |官网|
     * |doc0 |苹果|
     * |doc0 |宣布|
     * |doc0 |骁龙|
     */

    docs.createTempView("docs")
    val idf = spark.sql(
      s"""
         |
         |with tmp as (
         |select
         |  docid,
         |  word
         |from docs lateral view explode(split(doc,' ')) o as word
         |)
         |
         |select
         |word,   log10(${total}/count(distinct docid)) as idf
         |from tmp
         |group by word
         |
         |""".stripMargin)


    /**
     * +----+------------------+
     * |word|idf               |
     * +----+------------------+
     * |香蕉|0.6020599913279624|
     * |桔子|0.3010299956639812|
     * |保鲜|0.6020599913279624|
     * |梨  |0.3010299956639812|
     * |官网|0.3010299956639812|
     * |骁龙|0.3010299956639812|
     * |手机|0.6020599913279624|
     * |宣布|0.3010299956639812|
     * |科技|0.6020599913279624|
     */

    val idfmap = idf.rdd.map(row => {
      val word = row.getAs[String]("word")
      val idf = row.getAs[Double]("idf")
      (word, idf)
    }).collectAsMap()

    val bc = spark.sparkContext.broadcast(idfmap)


    // 计算每篇文章中每个词的TF-IDF值

    val res = docs.rdd.map(row => {
      val idf = bc.value
      val docid = row.getAs[String]("docid")
      val doc = row.getAs[String]("doc")
      val words = doc.split(" ")
      val cnt = words.groupBy(w => w).map(tp => (tp._1, tp._2.size))
      val tfidfArray = for (elem <- cnt) yield {
        val tfidf = elem._2 * idf.getOrElse(elem._1, 1.0)
        (elem._1, tfidf)
      }
      (docid,tfidfArray)
    })

    res.toDF("word", "tf-idf").show(100, false)


    spark.close()


  }


}
