package cn.itcast.czxy

import java.util.Properties

import bean.{HBaseMeta, TagRule}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object NationalityTag {
  def main(args: Array[String]): Unit = {
    //1 创建spark-sql 用于读取MySQL HBASE数据
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("nationalityTag").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 连接MySQL数据库
    val url = "jdbc:mysql://bd001:3306/tags_new?userUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table = "tbl_basic_tag"
    val properties = new Properties()
    val mysqlCoon: DataFrame = spark.read.jdbc(url, table, properties)

    //隐式转换
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._


    //3 读取MySQL中的四级标签  为读取hbase数据做准备
    //id=81
    //rule=inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,gender
    val fourTags: Dataset[Row] = mysqlCoon.select("id", "rule").where("id=111")
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,gender  不好用，转为map或样例类
    //将上述数据转化为样例类
    val KVMap: Map[String, String] = fourTags.map(row => {
      //读取数据中的rule字段，转化为String
      row.getAs("rule").toString
        //使用"##对数据进行切分"
        .split("##")
        /*
      inType=HBase
      zkHosts=192.168.10.20
      zkPort=2181##hbaseTable=tbl_users
      family=detail
      selectFields=id,gender
       */
        //再使用“=”继续切分
        .map(kv => {
          //inType HBase
          val arr: Array[String] = kv.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap
    //println(KVMap)

    //将KVMap封装成样例类HbaseMeta
    val hbaseMeta: HBaseMeta = toHbaseMeta(KVMap)
   // println(hbaseMeta.selectFields)

    //4 读取MySQL中的五级标签 匹配性别
    val fiveTags: Dataset[Row] = mysqlCoon.select('id, 'rule).where("pid=111")
    //将五级标签封装成样例类
    val fiveMap: Map[String, String] = fiveTags.map(row => {
      //row 是一条数据  获取出id和rule
      val id: String = row.getAs("id").toString
      val rule: String = row.getAs("rule").toString
      //封装样例类
      (rule, id)
    }).collect().toMap

    //5 根据MySQL数据中的四级数据 读取HBASE数据
    //若使用HBASE客户端读取效率较慢，将HBASE作为数据源，读取效率较快
    val hbaseDatas: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option("zkHosts", hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()
    // hbaseDatas.show(100)

    //6 标签匹配 根据五级标签数据和HBASE数据进行标签匹配 得到最终标签
    //编写udf函数将性别输入是1->112、2->113 ...

    var GetTagId = udf((HNationality: String) => {
      //HNationality就是hbase中的  1  2 3 4 5 6
      val id: String = fiveMap.getOrElse(HNationality, "no")
      id
    })
    val newNationalityTagsDF: DataFrame = hbaseDatas.select('id as ("userId"), GetTagId('nationality).as("tagsId"))
   // userTagsDF.show()

    //7 解决程序多次运行  标签重复问题
    //自定义函数 用于处理标签追加、去重问题
    val getAllTags = udf((historyTagId: String, newTagId:String) => {
      if (historyTagId == "") {
        newTagId
      } else if (newTagId == "") {
        historyTagId
      } else if (newTagId == "" && historyTagId == "") {
        ""
      } else {
        //拼接历史数据和新数据（多次运行可能有重复数据）
        val alltags: String = historyTagId + "," + newTagId
        //使用，分割去重后返回字符串类型
        alltags.split(",").distinct.mkString(",")
      }
    })

    //a读取test内历史标签数据 已经计算出来的标签
    val historyTag: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()
    //  historyTag.show(20)
    //b追加计算出来的标签到历史数f据
    val JoinTagsDF: DataFrame = historyTag.join(newNationalityTagsDF, historyTag("userId") === newNationalityTagsDF("userId"))
    val updateTags: DataFrame = JoinTagsDF.select(
      when(historyTag.col("userId").isNotNull, historyTag.col("userId"))
        .when(newNationalityTagsDF.col("userId").isNotNull, newNationalityTagsDF.col("userId"))
        .as("userId"),
      //处理第二个字段 将两个字段合并到一起
      //自定义函数用于做数据的拼接
      getAllTags(historyTag.col("tagsId"), newNationalityTagsDF.col("tagsId")).as("tagsId")

    )

  //  updateTags.show()


    //8 将最终标签写入HBASE
    updateTags.write.format("tools.HBaseDataSource")
      .option("zkHosts", hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .save()

  }

  //将KVMap封装成样例类
  def toHbaseMeta(KVMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(
      KVMap.getOrElse("inType", ""),
      KVMap.getOrElse("zkHosts", ""),
      KVMap.getOrElse("zkPort", ""),
      KVMap.getOrElse("hbaseTable", ""),
      KVMap.getOrElse("family", ""),
      KVMap.getOrElse("selectFields", ""),
      KVMap.getOrElse("rowKey", "")
    )

  }
}
