package org.qnit.dw

import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel, IDF}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.qnit.util.SparkUtil

import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer

object DwbApp {

  def getRescaledData(documentDF: DataFrame) = {
    // CountVectorizer 可以得到精确的TF值，我们采用此方法
    val cvModel: CountVectorizerModel = new CountVectorizer()
      .setInputCol("content_term") // 输入的列，这一列就是我们分好的词
      .setOutputCol("raw_feature") // 输出列
      .setVocabSize(512 * 512) // 词汇表大小
      .setMinDF(1) // 一个词至少在几个不同文档中出现才计算在内
      .fit(documentDF)

    // 执行CountVectorizer，之后生成的结果，raw_feature列的每行值会类似如下
    // (461,[8,93,134,245,270,379,458,459],[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0])
    // 461 表示词汇表大小，就是我们设置的VocabSize值。如果你的词汇表大小没有达到设定值，会按照你词汇表的值
    // 第一个数组表示，每个词在词汇表的索引值
    // 第二个数组表示，每个词出现的词频
    val featurizedData = cvModel.transform(documentDF)

    // IDF 模型，输入是 raw_feature，就是CountVectorizerModel的输出列
    val idf = new IDF().setInputCol("raw_feature").setOutputCol("feature")
    val idfModel = idf.fit(featurizedData)

    // 计算TF-IDF值
    val rescaledData = idfModel.transform(featurizedData)
    rescaledData
  }

  def loadUdf(spark: SparkSession, topK: Int) = {
    spark.udf.register("get_content_term", (content: String) => {
      val termList = StandardTokenizer.segment(content)
      // 去除停用词
      val termListNoStop = CoreStopWordDictionary.apply(termList)
      termListNoStop.filter(term => {
        val nature = term.nature.toString
        // 只保留名词相关的词，即词性是n开头的词, 同时去掉单个字
        (nature == "n" || nature == "nz") && term.word.length > 1
      }).map(term => term.word).distinct // 只取出词不要词性
    })

    spark.udf.register("tfidf", (contentTerm: Seq[String], feature: Vector) => {
      // 这个map操作为了将生成的TF-IDF词向量按照TF-IDF值的大小排序，同时和原词做映射，得到topk的词
      // feature 类似如下
      // (461,[8,93,134,245,270,379,458,459],[1.0116009116784799,1.7047480922384253,1.7047480922384253,1.7047480922384253,1.7047480922384253,1.7047480922384253,1.7047480922384253,1.7047480922384253])
      // 这是一个稀疏向量的标识，第一个值表示向量的维度，第二个数据表示维度的索引，同时是所有在该维度上有值的索引。
      // 第三个数据是每个维度下的值，也就是每个词的TF-IDF值
      val tfidfValueArray = ArrayBuffer[Double]()
      feature.foreachActive((_: Int, value: Double) => tfidfValueArray += value)
      // 将TF-IDF和原始词做zip，按照TF-IDF值大小排序, 取topk的词
      contentTerm.zip(tfidfValueArray).toList
        .sortBy(-_._2).take(topK).toMap.keys.toArray
        .mkString(",")
    })
  }

  def writeLabelTable(spark: SparkSession, dimension: String, tableName: String, topK: Int) = {
    println(s"==================== Write To dwb.$tableName ====================")
    loadUdf(spark, topK)
    val sourceDataSQL =
      s"""
         |select $dimension, get_content_term(content) content_term
         |from (
         |    select $dimension, concat_ws('。', collect_set(content)) content
         |    from dwd.dwd_expert_logbook_info
         |    group by $dimension
         |) t
         |""".stripMargin
    val documentDF = spark.sql(sourceDataSQL)
    documentDF.show()

    val rescaledData = getRescaledData(documentDF)
    rescaledData.show()
    rescaledData.createOrReplaceTempView("rescaledData")

    val topTermDF = spark.sql(
      s"""
         |select $dimension, tfidf(content_term, feature) top_term,
         |       concat_ws(',', content_term) content_term
         |from rescaledData
         |""".stripMargin)
    topTermDF.show()

    // 将分词后的数据保存到HDFS上，数仓dwb层
    topTermDF.repartition(1)
      .write.mode(SaveMode.Overwrite)
      .format("parquet")
      .saveAsTable(s"dwb.$tableName")
  }

  def main(args: Array[String]): Unit = {
    val env = args(0)

    println(s"env=$env")
    if (!SparkUtil.verifyEnv(env))
      System.exit(1)
    val spark: SparkSession = SparkUtil.initSparkSession(env, "DwbApp")

    writeLabelTable(spark, "user_id", "dwb_user_label", 5)
    writeLabelTable(spark, "province_name,city_name,county_name", "dwb_county_label", 8)
    writeLabelTable(spark, "province_name,city_name", "dwb_city_label", 10)
    writeLabelTable(spark, "province_name", "dwb_province_label", 15)

    spark.stop()
  }

}
