package cn.itcast.czxy.BD18.tools

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction

/**
 * @author 红尘丶世界
 * @version v 1.0
 */
class Common {

  /**
   * 返回新标签和旧标签join之后的结果
   * @param oldTags 旧标签
   * @param newTags 新标签
   * @return
   */
  def allTags(oldTags: DataFrame, newTags: DataFrame): DataFrame = {
    //导入隐式转换
    import org.apache.spark.sql.functions._
    //使用join 将两个数据的tagsId合并到一起
    val allTags: DataFrame = oldTags.join(newTags, oldTags("userId") === newTags("userId"))
    //  创建一个新的udf函数,用来拼接 tagsId
    val getAllTags: UserDefinedFunction = udf((oldTagsId: String, newTagsId: String) => {
      if (oldTagsId == "" && newTagsId != "") {
        newTagsId
      } else if (oldTagsId != "" && newTagsId == "") {
        oldTagsId
      } else if (oldTagsId == "" && newTagsId == "") {
        ""
      } else {
        val str: String = oldTagsId + "," + newTagsId
        str.split(",").distinct.mkString(",")
      }
    })

    //获取最终结果
    val allTagsDF: DataFrame = allTags.select(
      when(oldTags("userId").isNotNull, oldTags("userId"))
        .when(newTags("userId").isNotNull, newTags("userId"))
        .as("userId"),
      getAllTags(oldTags("tagsId"), newTags("tagsId"))
        .as("tagsId")
    )
    allTagsDF
  }

  /**
   * 切分四级标签中的rule
   * @param fourDS 接收四级标签中的所有数据
   * @param spark 接收SparkSession对象
   */
  def ruleSplit(fourDS: Dataset[Row],spark:SparkSession): Map[String, String] ={
    import spark.implicits._
    val fourMap: Map[String, String] = fourDS.map(row => {
      //使用##  切分再使用=切分
      row.getAs("rule").toString.split("##") //zkHosts=192.168.10.20      zkPort=2181
        .map(kv => {
          //zkHosts=192.168.10.20
          //zkPort=2181
          val arr: Array[String] = kv.split("=") // zkHosts    192.168.10.20,zkPort    2181
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap
    fourMap
  }
}
