package cn.itcast.czxy

import java.util.Properties

import bean.{HBaseMeta, TagRule}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object MarriageTags {

  def main(args: Array[String]): Unit = {
    /*
    权禹向婚姻状况规则标签开发
     */
    //1 创建spark用于读取MySQL HBASE中数据
    val spark = SparkSession.builder().master("local[*]").appName("marriage").getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 连接MySQL
    //传递三个参数
    val url = "jdbc:mysql://bd001:3306/tags_new?userUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table = "tbl_basic_tag"
    val properties = new Properties()
    val mysqlCoon: DataFrame = spark.read.jdbc(url, table, properties)

    //隐式转化
    import spark.implicits._
    //java和scala互转
    import scala.collection.JavaConverters._
    //udf
    import org.apache.spark.sql.functions._

    //3 读取MySQL四级标签 封装到样例类 为读取HBASE中数据做准备
    val fourRow: Dataset[Row] = mysqlCoon.select('rule).where("id=73")
    val fourMap: Map[String, String] = fourRow.map(row => {
      //首先获取需要字段
      row.getAs("rule").toString
        //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_logs##family=detail##selectFields=id,marriage
        //按照##切分
        .split("##")
        .map(line => {
          //##切分后元素是inType=HBase 接下来按照=切分
          val arr = line.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap

    //调用FourRuleToCC方法封装规则为样例类
    val hBaseMeta: HBaseMeta = FourRuleToCC(fourMap)

    // println(hBaseMeta.family+"  "+hBaseMeta.selectFields)

    //4 读取MySQL五级标签 封装到样例类 为匹配婚姻状况做准备
    val fiveRow: Dataset[Row] = mysqlCoon.select('id, 'rule).where("pid=73")
    val fiveList: List[TagRule] = fiveRow.map(row => {
      val id: Int = row.getAs("id").toString.toInt
      val rule: String = row.getAs("rule").toString
      TagRule(id, rule)
    }).collectAsList()
      //得到的是java的util.list，取不到值，所以转化为scala的list
      .asScala.toList
    //      for(i<-fiveList){
    //      println(i.rule)
    //    }

    //5 读取HBASE中数据
    val HBaseDF: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hBaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hBaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hBaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hBaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hBaseMeta.selectFields)
      .load()
    // HBaseDF.show(100)

    //6 根据MySQL五级标签与HBASE中  进行匹配 得到最新数据 用udf函数
    val getTags = udf((HMarriage: String) => {
      var id = 0
      //遍历五级标签 当rule和hbase中的marriage相同时，把id改为五级标签的id
      for (rule <- fiveList) {
        if (rule.rule == HMarriage) {
          id = rule.id
        }
      }
      id
    })

    val newTagsDF: DataFrame = HBaseDF.select('id as ("userId"), getTags('marriage) as ("tagsId"))
    //newTags.show(20)

    //创建udf函数用于对标签进行处理，添加、去重
    val unionAndDistinctTags: UserDefinedFunction = udf((historyId: String, newTagsId: String) => {
      if (historyId == "") {
        newTagsId
      } else if (newTagsId == "") {
        historyId
      } else if (historyId == "" && newTagsId == "") {
        ""
      } else {
        val arr: String = historyId + "," + newTagsId
        arr.split(",").distinct.mkString(",")
      }

    })

    //7 解决代码多次执行，相同标签重复累加的问题。
    //a 读取HBASE历史数据
    val historyDF: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hBaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hBaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()
    //historyDF.show(20)
    //b 将历史数据与最新数据进行join
    val TagsJoinDF: DataFrame = historyDF.join(newTagsDF, historyDF("userId") === newTagsDF("userId"))
    //TagsJoinDF.show(20)
    //c 对join后数据进行处理，选取需要的字段，书写udf函数进行标签累加
    val UDTagsDF: DataFrame = TagsJoinDF.select(when(historyDF("userId").isNotNull, historyDF("userId")) as ("userId")
      , unionAndDistinctTags(historyDF("tagsId"), newTagsDF("tagsId")) as("tagsId")
      //书写udf函数处理tagsId，添加、去重
    )
  //  UDTagsDF.show(20)


    //8 将最终标签写入HBASE表中
    UDTagsDF.write.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS,hBaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT,hBaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY,"detail")
      .option(HBaseMeta.SELECTFIELDS,"userId,tagsId")
      .save()

    sc.stop()

  }

  //封装四级标签规则
  def FourRuleToCC(fourMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(
      fourMap.getOrElse(HBaseMeta.INTYPE, ""),
      fourMap.getOrElse(HBaseMeta.ZKHOSTS, ""),
      fourMap.getOrElse(HBaseMeta.ZKPORT, ""),
      fourMap.getOrElse(HBaseMeta.HBASETABLE, ""),
      fourMap.getOrElse(HBaseMeta.FAMILY, ""),
      fourMap.getOrElse(HBaseMeta.SELECTFIELDS, ""),
      fourMap.getOrElse(HBaseMeta.ROWKEY, "")
    )

  }
}
