package cn.itcast.tags.tools

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.clustering.KMeansModel
import org.apache.spark.ml.linalg
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType

/**
 * 针对标签进行相关操作工具类
 */
object TagTools {
  def convertIndexMap(clusterCenters: Array[linalg.Vector], tagDF: DataFrame): Map[Int, Long] = {

    val rulesMap: Map[String, Long] = convertMap(tagDF)
    val zipWithIndex: Array[((Double, Int), Int)] = clusterCenters
      .zipWithIndex
      .map {
        case (vector, centerIndex) => (vector.toArray.sum, centerIndex)
      }
      .sortBy(-_._1)
      .zipWithIndex
    val indexMap: Map[Int, Long] = zipWithIndex
      .map {
        case ((_, clusterIndex), index) =>
          clusterIndex -> rulesMap(index.toString)
      }.toMap
    indexMap

  }


  /**
   * 将[属性标签]数据中规则：rule与标签ID：tagId转换为Map集合
   *
   * @param tagDF 属性标签数据
   * @return Map 集合
   */
  def convertMap(tagDF: DataFrame): Map[String, Long] = {
    import tagDF.sparkSession.implicits._
    tagDF
      // 获取属性标签数据
      .filter($"level" === 5)
      // 选择标签规则rule和标签Id
      .select($"rule", $"id".as("tagId"))
      // 转换为Dataset
      .as[(String, Long)]
      // 转换为RDD
      .rdd
      // 转换为Map集合
      .collectAsMap().toMap
  }

  /**
   * 依据[标签业务字段的值]与[标签规则]匹配，进行打标签（userId, tagId)
   *
   * @param dataframe 标签业务数据
   * @param field     标签业务字段
   * @param tagDF     标签数据
   * @return 标签模型数据
   */
  def ruleMatchTag(dataframe: DataFrame, field: String,
                   tagDF: DataFrame): DataFrame = {
    val spark: SparkSession = dataframe.sparkSession
    import spark.implicits._
    // 1. 获取规则rule与tagId集合
    val ruleTagMap: Map[String, Long] = convertMap(tagDF)
    // 2. 将Map集合数据广播出去
    val ruleTagMapBroadcast = spark.sparkContext.broadcast(ruleTagMap)
    // 3. 自定义UDF函数, 依据Job职业和属性标签规则进行标签化
    val field_to_tag: UserDefinedFunction = udf(
      (field: String) => ruleTagMapBroadcast.value(field)
    )
    // 4. 计算标签，依据业务字段值获取标签ID
    val modelDF: DataFrame = dataframe
      .select(
        $"id".as("uid"), //
        field_to_tag(col(field)).cast(StringType).as("tagId")
      )
    //modelDF.printSchema()
    //modelDF.show(50, truncate = false)

    // 5. 返回计算标签数据
    modelDF
  }

  def convertTuple(tagDF: DataFrame): DataFrame = {
    import tagDF.sparkSession.implicits._

    val rule_to_tuple: UserDefinedFunction = udf(
      (rule: String) => {
        val Array(start, end) = rule.split("-").map(_.toInt)
        (start, end)
      }
    )

    val ruleDF: DataFrame = tagDF
      .filter($"level"===5)
      .select(
        $"id".as("tagId"),
        rule_to_tuple($"rule").as("rules")
      )
      .select(
        $"tagId",
        $"rules._1".as("start"),
        $"rules._2".as("end")
      )

    ruleDF
  }

  def kmeansMatchTag(kmeansModel:KMeansModel,dataFrame: DataFrame,tagDF:DataFrame):DataFrame={
    import dataFrame.sparkSession.implicits._
    val spark:SparkSession =dataFrame.sparkSession

    val clusterCenters: Array[linalg.Vector] = kmeansModel.clusterCenters

    val indexTagMap:Map[Int,Long] = TagTools.convertIndexMap(clusterCenters, tagDF)

    val indexTagMapBroadcast: Broadcast[Map[Int, Long]] = spark.sparkContext.broadcast(indexTagMap)

    val index_to_tag: UserDefinedFunction = udf(
      (prediction: Int) => {
        indexTagMapBroadcast.value(prediction)
      }
    )
    dataFrame.select(
      $"uid",
      index_to_tag($"prediction").as("tagId")
    )
  }
}
