package test

import java.util.Properties

import bean.{HbaseCase, TagRule}
import org.apache.spark.SparkContext
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object sexTag2 {
  def main(args: Array[String]): Unit = {
    //1 创建sparkSql 用于读取MySQL、和HBASE中数据
    val spark: SparkSession = SparkSession.builder().master("local[*]").appName("sexTag2").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 连接MySQL
    val url = "jdbc:mysql://bd001:3306/tags_new?userUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table = "tbl_basic_tag"
    val properties = new Properties()
    val conn: DataFrame = spark.read.jdbc(url, table, properties)

    //导入隐式转化
    import spark.implicits._
    import scala.collection.JavaConverters._
    import org.apache.spark.sql.functions._


    //3 读取MySQL中的四级标签 为读取HBASE做准备(根据定义的规则读取数据进行匹配)
    val fourData: Dataset[Row] = conn.select('id, 'rule).where("id=63")
    val kvMap: Map[String, String] = fourData.map(row => {
      row.getAs("rule").toString
        .split("##")
        //inType=HBase
        // zkHosts=192.168.10.20
        // zkPort=2181##hbaseTable=tbl_users
        // family=detail
        // selectFields=id,gender
        .map(line => {
          val kv: Array[String] = line.split("=")
          (kv(0), kv(1))
        })
    }).collectAsList().get(0).toMap

    //将kvMap封装到hbaseCase中
    val hbaseCase: HbaseCase = toHbaseCase(kvMap)


    //4 读取MySQL中的五级数据  为匹配性别做准备
    val fiveData: Dataset[Row] = conn.select('id, 'rule).where("pid=63")
    val fiveList: List[TagRule] = fiveData.map(row => {
      val id: Int = row.getAs("id").toString.toInt
      val rule: String = row.getAs("rule").toString
      //封装样例类
      TagRule(
        id,
        rule
      )
    }).collectAsList() //转换得到的是java的util.list拿不到数据 需要导入隐式转换
      .asScala.toList//转化为scala的list
    

    //5 根据四级标签 读取HBASE中的数据
    val HBaseData: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HbaseCase.INTYPE, hbaseCase.inType)
      .option(HbaseCase.ZKHOSTS, hbaseCase.zkHosts)
      .option(HbaseCase.ZKPORT, hbaseCase.zkPort)
      .option(HbaseCase.HBASETABLE, hbaseCase.hbaseTable)
      .option(HbaseCase.FAMILY, hbaseCase.family)
      .option(HbaseCase.SELECTFIELDS, hbaseCase.selectFields)
      .load


    //6 匹配规则 1->64 2->65
    //使用sparkSql的udf函数
    val GetGenderTag = udf((Hgender: String) => {
      var id = 0
      for (fiveTag <- fiveList) {
        if (Hgender == fiveTag.rule) {
          id = fiveTag.id
        }
      }
      id
    })
    val genderResultTags: DataFrame = HBaseData.select('id as ("userId"), GetGenderTag('gender).as("tagsId"))

    //7 将匹配结果写入HBASE
    genderResultTags.write.format("tools.HBaseDataSource")
      .option(HbaseCase.INTYPE, hbaseCase.inType)
      .option(HbaseCase.ZKHOSTS, hbaseCase.zkHosts)
      .option(HbaseCase.ZKPORT, hbaseCase.zkPort)
      .option(HbaseCase.HBASETABLE, "mytest")
      .option(HbaseCase.FAMILY, "info")
      .option(HbaseCase.SELECTFIELDS, "userId,tagsId")
      .save()
  }
def  toHbaseCase(kvMap:Map[String,String]):HbaseCase ={
 HbaseCase(
    kvMap.getOrElse("inType", ""),
    kvMap.getOrElse("zkHosts", ""),
    kvMap.getOrElse("zkPort", ""),
    kvMap.getOrElse("hbaseTable", ""),
    kvMap.getOrElse("family", ""),
    kvMap.getOrElse("selectFields", "")
  )
}
}
