package cn.itcast.up.model.base

import java.util.Properties

import cn.itcast.up.model.bean.{HBaseMeta, TagRule}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 定义一个标签计算的通用基类
  */
trait BaseModel {
  /**
    * 设置当前应用的名字
    *
    * @return
    */
  def setAppName(): String

  /**
    * 设置4级标签ID
    *
    * @return
    */
  def setFourTagID(): Int


  //定义SparkSession对象
  val spark: SparkSession = SparkSession.builder()
    .appName(setAppName())
    .master("local[*]")
    .getOrCreate()
  //导入隐式参数
  import spark.implicits._
  import org.apache.spark.sql.functions._

  //获取MySQL数据源
  def getMysqlSource(): DataFrame = {
    val url = "jdbc:mysql://bd001:3306/tags_new?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val tableName = "tbl_basic_tag"
    val props = new Properties
    val mysqlSource: DataFrame = spark.read.jdbc(url, tableName, props)
    mysqlSource
  }

  //加载4级标签规则
  def getFourRule(mysqlSource: DataFrame): Map[String, String] = {
    mysqlSource.select("id", "rule")
      .where("id = " + setFourTagID)
      .rdd.map(row => {
        //    inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,job
        val ruleStr: String = row.getAs[String]("rule")
        val tuples: Array[(String, String)] = ruleStr.split("##")
          .map(kv => {
            val arr: Array[String] = kv.split("=")
            (arr(0), arr(1))
          })
        val ruleMap: Map[String, String] = tuples.toMap
        ruleMap
      }).collect()(0)
  }
  //加载5级标签规则,因为5级规则比较灵活,我们直接返回一个DataFrame
  def getFiveDF(mysqlSource: DataFrame): DataFrame = {
    val fiveDF: DataFrame = mysqlSource.select('id, 'rule)
      .where("pid = " + setFourTagID())
    fiveDF
  }

  //加载HBase数据源
  def loadHBaseData(map: Map[String, String]): DataFrame = {
    spark.read
      .format("cn.itcast.up.model.tools.HBaseDataSource")
      .options(map)
      .load()
  }

  /**
    * 标签计算
    * @param hbaseDF HBase数据源
    * @param fiveDF 规则数据
    * @return 最终的计算结果:userid, tagIds
    */
  def computeTagIds(hbaseDF: DataFrame, fiveDF: DataFrame): DataFrame



  //加载历史标签数据
  def loadOldData(map: Map[String, String]): DataFrame={
    val oldDF: DataFrame = spark.read
      //自定义数据源的全类名.
      .format("cn.itcast.up.model.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, map.getOrElse("zkHosts", ""))
      .option(HBaseMeta.ZKPORT, map.getOrElse("zkPort", ""))
      .option(HBaseMeta.HBASETABLE, "test32")
      .option(HBaseMeta.FAMILY, map.getOrElse("family", ""))
      .option(HBaseMeta.SELECTFIELDS, "userid,tagIds")
      .load()
    oldDF
  }
  //新老数据合并
  def mergeTotalTag(newDF: DataFrame, oldDF: DataFrame): DataFrame ={
    //   7. 使用本次的计算标签结果和HBase中的历史标签数据进行整合.(全连接)
    val joinResult: DataFrame = newDF.join(oldDF,newDF.col("userid") === oldDF.col("userid"),"full")
    //标签合并
    //定义自定义函数进行合并操作
    val mergeTag = udf((newTag: String, oldTag: String) => {
      var tagIds = ""
      //判断新老数据是否为空 isBlank ""
      val string = "" //不是null,是空
      val str1 = "                   " //是空
      if (StringUtils.isBlank(newTag)){
        tagIds = oldTag
      }
      if (StringUtils.isBlank(oldTag)){
        tagIds = newTag
      }
      //如果新老数据都不为空.那么就开始整合
      if (StringUtils.isNotBlank(newTag) && StringUtils.isNotBlank(oldTag)){
        val tmpTag: String = oldTag + "," + newTag
        //将数据使用,进行切割,之后转换为set去重,最后再转换为字符串
        tagIds = tmpTag.split(",").toSet.mkString(",")
      }
      //返回最终结果
      tagIds
    })
    //对标签结果数据进行选择(获取)
    val result: DataFrame = joinResult.select(
      //如果新数据用户ID不为空,就使用新数据的ID
      when(newDF.col("userid").isNotNull, newDF.col("userid"))
        .when(newDF.col("userid").isNull, oldDF.col("userid"))
        .as("userid"),
      mergeTag(newDF.col("tagIds"), oldDF.col("tagIds")).as("tagIds")
    )
    result
  }
  //结果落地
  def sinkData(result: DataFrame, map: Map[String, String])={
    //      1. 使用自定义数据源将表情结果存入HBase
    result.write
      .format("cn.itcast.up.model.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, map.getOrElse("zkHosts", ""))
      .option(HBaseMeta.ZKPORT, map.getOrElse("zkPort", ""))
      .option(HBaseMeta.HBASETABLE, "test32")
      .option(HBaseMeta.FAMILY, map.getOrElse("family", ""))
      .save()
  }

  /**
    * 将上面的流程整体进行串联
    */
  def executeCompute()={
    //加载MySQL数据源
    val mysqlSource: DataFrame = getMysqlSource
    //获取4级规则的map
    val map: Map[String, String] = getFourRule(mysqlSource)
    //获取5级规则的DataFrame
    val fiveDF: DataFrame = getFiveDF(mysqlSource)
    //加载MySQL数据源
    val hbaseSource: DataFrame = loadHBaseData(map)
    //开始进行标签计算
    val newDF: DataFrame = computeTagIds(hbaseSource, fiveDF)
    //加载历史数据
    val oldDF: DataFrame = loadOldData(map)
    //进行标签合并
    val result: DataFrame = mergeTotalTag(newDF, oldDF)
    //将合并之后的数据落地
    sinkData(result, map)
  }


}
