package cn.itcast.czxy

import java.util.Properties

import bean.HBaseMeta
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object JobTypeTags {

  def main(args: Array[String]): Unit = {
    //1 创建spark-sql 用于读取MySQL HBASE数据
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("jobTypeTags").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 连接MySQL数据库
    val url = "jdbc:mysql://bd001:3306/tags_new?userUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table = "tbl_basic_tag"
    val properties = new Properties()
    val mysqlCoon: DataFrame = spark.read.jdbc(url, table, properties)

    //隐式转换
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._

    //3 读取MySQL中的四级标签  为读取hbase数据做准备
    //id=81
    //rule=inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,gender
    val fourTags: Dataset[Row] = mysqlCoon.select("id", "rule").where("id=120")
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,gender  不好用，转为map或样例类
    //将上述数据转化为样例类
    val KVMap: Map[String, String] = fourTags.map(row => {
      //读取数据中的rule字段，转化为String
      row.getAs("rule").toString
        //使用"##对数据进行切分"
        .split("##")
        /*
      inType=HBase
      zkHosts=192.168.10.20
      zkPort=2181##hbaseTable=tbl_users
      family=detail
      selectFields=id,gender
       */
        //再使用“=”继续切分
        .map(kv => {
          //inType HBase
          val arr: Array[String] = kv.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap
    //println(KVMap)

    //将KVMap封装成样例类HbaseMeta
    val hbaseMeta: HBaseMeta = toHbaseMeta(KVMap)
    // println(hbaseMeta.selectFields)

    //4 读取MySQL中的五级标签 匹配性别
    val fiveTags: Dataset[Row] = mysqlCoon.select('id, 'rule).where("pid=120")
    //将五级标签封装成样例类
    val fiveMap: Map[String, String] = fiveTags.map(row => {
      //row 是一条数据  获取出id和rule
      val id: String = row.getAs("id").toString
      val rule: String = row.getAs("rule").toString
      //封装样例类
      (rule, id)
    }).collect().toMap

    //5 根据MySQL数据中的四级数据 读取HBASE数据
    //若使用HBASE客户端读取效率较慢，将HBASE作为数据源，读取效率较快
    val hbaseDatas: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option("zkHosts", hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()
    hbaseDatas.show()




  }
  def toHbaseMeta(KVMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(
      KVMap.getOrElse(HBaseMeta.INTYPE,""),
      KVMap.getOrElse(HBaseMeta.ZKHOSTS,""),
      KVMap.getOrElse(HBaseMeta.ZKPORT,""),
      KVMap.getOrElse(HBaseMeta.HBASETABLE,""),
      KVMap.getOrElse(HBaseMeta.FAMILY,""),
      KVMap.getOrElse(HBaseMeta.SELECTFIELDS,""),
      KVMap.getOrElse(HBaseMeta.ROWKEY,"")
    )

  }

}
