package org.qnit.dw

import org.apache.spark.ml.feature.Word2Vec
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.stat.Summarizer
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.qnit.util.SparkUtil

//生成每个top term的词向量表示
object Word2Vec {

  def main(args: Array[String]): Unit = {
    val env = args(0)

    println(s"env=$env")
    if (!SparkUtil.verifyEnv(env))
      System.exit(1)
    val spark: SparkSession = SparkUtil.initSparkSession(env, "Word2Vec")

    //数据准备
    val source = spark.table("dwb.dwb_user_label")
      .where("content_term > ''")
      .withColumn("content_term", split(col("content_term"), ","))
    source.persist()
    source.printSchema()
    source.show()
    /**
     * +--------------------+----------------------------------+------------------------------+
     * |             user_id|                          top_term|                  content_term|
     * +--------------------+----------------------------------+------------------------------+
     * |0039681c94f84da9a...|        结构,硫磺,草绳,生石灰,主干| [果园, 粪肥, 化肥, 使用量,...|
     * |005b540c40414f939...|      新型农村,项目,大棚,制度,经济|[农业, 农村, 月份, 财务管理...|
     * |0136479b3a364a72b...|      小麦,群众,进程,一喷三防,技术| [芦笋, 基地, 科技, 病虫害,...|
     */

    //1.得到每个词的词向量表示
    val word2Vec = new Word2Vec()
      .setInputCol("content_term") //输入array，不能为top_terms
      .setOutputCol("vector") //表示是文档的向量表示
      .setVectorSize(64) //设置词向量的维度，就是想要将词映射到多少维的空间内，⼤语料集⼀般选择 128，256
      .setMinCount(5) //只要超过⼀定词频的词才会参于训练，才会被参与训练，默认是5
      //.setNumPartitions(2) //语料库数据集训练时的分区数，分区数越多，速度越快，但是精度越低
      //.setMaxIter(2) //迭代次数，⼀般⼩于或等于分区数据。迭代次数越多，精度越⾼，速度越慢
      .setWindowSize(8) //窗⼝⼤⼩，默认5。Word2Vec可以学习出词的上下⽂信息，这个参数影响⼀个词周围多少个词作为上下⽂
      .setMaxIter(1)
    val word2VecModel = word2Vec.fit(source) //可能内存不足 .set("spark.kryoserializer.buffer.max","512m")
    word2VecModel.transform(source).show() //这个结构中的vector是表示文章的向量
    /**
     * +--------------------+----------------------------------+------------------------------+--------------------+
     * |             user_id|                          top_term|                  content_term|              vector|
     * +--------------------+----------------------------------+------------------------------+--------------------+
     * |0039681c94f84da9a...|        结构,硫磺,草绳,生石灰,主干| [果园, 粪肥, 化肥, 使用量,...|[-0.0129333647903...|
     * |005b540c40414f939...|      新型农村,项目,大棚,制度,经济|[农业, 农村, 月份, 财务管理...|[0.00168815281485...|
     * |0136479b3a364a72b...|      小麦,群众,进程,一喷三防,技术| [芦笋, 基地, 科技, 病虫害,...|[-0.0457831886596...|
     */

    //得到每个词的向量表示
    val word2vecDF = word2VecModel.getVectors //模型训练出现就先有了词向量,并不需要transform
    word2vecDF.show(false)
    /**
     * +------+--------------------+
     * |  word|              vector|
     * +------+--------------------+
     * |  广义|[0.02121375501155...|
     * |  牌匾|[0.03025485388934...|
     * |  伙伴|[0.02909577637910...|
     */

    //2.得到每个top term的词向量表示  dwb_news.article_top_terms_w2v
    //获得article_id及top_terms 并展开top_terms
    val articleTopTerm = source.select(col("user_id"), explode(split(col("top_term"), ",")).as("top_term"))
    articleTopTerm.show()

    /**
     * +--------------------+--------+
     * |             user_id|top_term|
     * +--------------------+--------+
     * |0039681c94f84da9a...|    结构|
     * |0039681c94f84da9a...|    硫磺|
     * |0039681c94f84da9a...|    草绳|
     */

    //udf: vector转array
    //spark自定义udf函数
    val toArr: Any => Array[Double] = {
      _.asInstanceOf[Vector].toArray
    }
    val toArrUdf = udf(toArr)
    val hashUdf = udf((key: String) => {
      val hashUserIdInt = key.hashCode
      if (hashUserIdInt >= 0) s"1$hashUserIdInt".toLong  else s"2${Math.abs(hashUserIdInt)}".toLong
    })
    val w2v = articleTopTerm.join(word2vecDF, col("top_term") === col("word"), "left")
      .filter("word is not null")
      .select(col("user_id"), col("top_term"), hashUdf(col("top_term")).as("milvus_key"), toArrUdf(col("vector")).as("vector")) //注意重命名列名，防止"UDF(vector)"保存失败
    w2v.printSchema()
    w2v.show()
    /**
     * +--------------------+--------+--------------------+
     * |             user_id|top_term|              vector|
     * +--------------------+--------+--------------------+
     * |0039681c94f84da9a...|    结构|[0.14836685359477...|
     * |0039681c94f84da9a...|    硫磺|[-0.0713312253355...|
     * |0039681c94f84da9a...|    草绳|[0.08700962364673...|
     */

    // 这里转为稠密向量DenseVector，方便计算
    val array2vec = udf((array: Seq[Double]) => {
      Vectors.dense(array.toArray)
    })

    val vec2array = udf((vec: Any) => {
      vec.asInstanceOf[org.apache.spark.ml.linalg.Vector].toArray
    })

    w2v.repartition(1)
      .write.mode(SaveMode.Overwrite)
      .format("parquet")
      .saveAsTable("dws.dws_user_label_w2v")

    val userEmbedding = w2v.withColumn("vector", array2vec(col("vector")))
      .groupBy("user_id")
      .agg(Summarizer.mean(col("vector")).as("vector")) //计算较费时
      .withColumn("milvus_key", hashUdf(col("user_id")))
      .withColumn("vector", vec2array(col("vector")))
    userEmbedding.show()

    userEmbedding.repartition(1)
      .write.mode(SaveMode.Overwrite)
      .format("parquet")
      .saveAsTable("dws.dws_user_embedding")

    spark.stop()
  }

}
