package cn.itcast.czxy

import java.util.Properties

import cn.itcast.czxy.BD18.bean.{HBaseMeta, TagRule}
import cn.itcast.czxy.BD18.tools.{Common, HBaseDataSource}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 黑名单标签
 */
object BlackListTag {
  //程序入口
  def main(args: Array[String]): Unit = {
    //创建工具包对象
    val common: Common = new Common
    //1. 创建sparkSession对象,用于读取mysql数据和HBase数据
    val spark: SparkSession = SparkSession.builder().appName("BLackListTag").master("local[*]").getOrCreate()

    //导入隐式转换
    import org.apache.spark.sql.functions._
    import spark.implicits._
    import scala.collection.JavaConverters._

    //2. 连接mysql
    //设置url
    val url: String = "jdbc:mysql://bd001:3306/tags_new2?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    //设置表名
    val table: String = "tbl_basic_tag"
    //配置文件
    val properties: Properties = new Properties
    //连接
    val mysqlConnection: DataFrame = spark.read.jdbc(url, table, properties)

    //3. 读取mysql数据库中的四级标签
    val fourTags: Dataset[Row] = mysqlConnection.select("id", "rule").where("id = 37")

    val kvMap: Map[String, String] = common.ruleSplit(fourTags, spark)
    //调用方法进行封装
    val hBaseMeta: HBaseMeta = (new HBaseDataSource).parseMeta(kvMap)
    println(hBaseMeta.toString)

    //4. 读取mysql数据库中的五级标签
    //匹配黑名单标签
    val fiveTags: Dataset[Row] = mysqlConnection.select("id", "rule").where("pid = 37")
    val fiveTagsList: List[TagRule] = fiveTags.map(row => {
      //把一行数据封装成样例类
      TagRule(
        row.getAs("id").toString.toInt,
        row.getAs("rule").toString
      )
    }).collectAsList().asScala.toList //把dataset转化成List

    //5. 根据mysql数据库中的四级标签 读取HBase数据
    val hBaseData: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hBaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hBaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hBaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hBaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hBaseMeta.selectFields)
      .load()

    //6. 标签匹配
    //根据五级标签数据和HBase数据进行标签匹配 匹配的最终的标签
    //编写udf函数 输入是 true,false  输出是 38  39
    val getTagId: UserDefinedFunction = udf((isBlackList: String) => {
      var id: Int = 0
      for (tagRule <- fiveTagsList) {
        val str: String = if (tagRule.rule == "1") "true" else "false"
        if (isBlackList == str) {
          id = tagRule.id
        }
      }
      id
    })

    //新标签结果
    val newTags: DataFrame = hBaseData.select('id.as("userId"), getTagId('is_blackList).as("tagsId"))

    //7.解决业务数据覆盖问题
    //读取test数据 追加标签后覆盖写入
    //  7.1 读取test内的历史标签
    val oldTags: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, "192.168.10.20")
      .option(HBaseMeta.ZKPORT, "2181")
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()
    oldTags.show(5)

    //调用方法,解决业务数据覆盖问题
    val allTags: DataFrame = common.allTags(oldTags, newTags)
    allTags.show(5)

//    allTagsDF.write.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
//      .option(HBaseMeta.ZKHOSTS, hBaseMeta.zkHosts)
//      .option(HBaseMeta.ZKPORT, hBaseMeta.zkPort)
//      .option(HBaseMeta.HBASETABLE, "test")
//      .option(HBaseMeta.FAMILY, "detail")
//      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
//      .save()
    //8. 断开连接
    spark.close()
  }

}
