package cn.itcast.czxy.BD18

import java.util.Properties

import cn.itcast.czxy.BD18.bean.{TagesRule, Tagsfour}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/*
用于开发黑名单的标签 梁耀华
 */
object BlackList {



  //程序入口
  def main(args: Array[String]): Unit = {
    //1、创建sparkSQL实例没用于读取hbase    mysql数据
    val spark: SparkSession = SparkSession.builder().appName("BlackList").master("local[*]").getOrCreate()

    //2、链接mysql数据库
    //url: String, table: String, properties: Properties
    var url="jdbc:mysql://bd001:3306/tags_new?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    var table="tbl_basic_tag"
    var properties=new Properties
    val mysqlConn: DataFrame = spark.read.jdbc(url,table,properties)

    //隐式转换
    import spark.implicits._
    import scala.collection.JavaConverters._
    import org.apache.spark.sql.functions._

    //是否黑名单id
    var uid="99"
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,blacklist
    //3、读取四级标签数据
    val fourDS: Dataset[Row] = mysqlConn.select("rule").where(s"id=${ida}")


    val fourMap: Map[String, String] = fourDS.map(row => {
      //使用##  切分再使用=切分
      row.getAs("rule").toString.split("##")
        .map(kv => {
          val arr: Array[String] = kv.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap

    //将map 转换成样TagesRule例类
    var tagsfour:Tagsfour=gettagsfour(fourMap)

    //println(tagsfour.selectFields)


    //4、读取五级标签数据
    val fiveDS: Dataset[Row] = mysqlConn.select("id","rule").where(s"pid=${ida}")
    //封装成 TagRule样例类
    val blackRule: List[TagesRule] = fiveDS.map(row => {
      //获取数据
      val id: Int = row.getAs("id").toString.toInt
      val rule: String = row.getAs("rule").toString
      //封装
      TagesRule(id, rule)
    }).collectAsList().asScala.toList

    /*
         for(a<-blackRule){
            println(a.rule)
         }
    */


    //5、读取hbase数据
    val HBaseDatas: DataFrame = spark.read.format("cn.itcast.czxy.BD18.BlackList.tools.HBaseDataSource")
      .option(Tagsfour.ZKHOSTS, tagsfour.zkHosts)
      .option(Tagsfour.ZKPORT, tagsfour.zkPort)
      .option(Tagsfour.HBASETABLE, tagsfour.hbaseTable)
      .option(Tagsfour.FAMILY, tagsfour.family)
      .option(Tagsfour.SELECTFIELDS, tagsfour.selectFields)
      .load()
    // HBaseDatas.show()



    //职业变换的自定义函数
    var  getBlacks=udf((rule:String)=>{
      //遍历每一个rule  判断是否与数据中的相同，若相同返回对应的ID
      //默认的tag ID0
      var blID=0
      for(blRule<-blackRule){
        if (blRule.rule==rule){
          blID=blRule.id
        }
      }
      blID
    })

    //6、使用五级标签与hbase数据进行匹配获得标签
    //id    blacklist
    val JobNewBlacks: DataFrame = HBaseDatas.select('id. as ("userId"),getBlacks('blacklist).as("blacklistId"))
    JobNewBlacks.show()

    //新数据（job）的结构  userId   blacklistId
    //老数据（sex）的结构  userId   blacklistId
    //使用join 将两个数据的blacklistId合并到一起

    var getAllBlacks=udf((oldblacklistId:String,newblacklistId:String)=>{
      if (oldblacklistId==""){
        newblacklistId
      }else if (newblacklistId==""){
        oldblacklistId
      }else if(oldblacklistId==""&& newblacklistId==""){
        ""
      }else{
        //拼接历史数据和新数据（可能有重复的数据）
        val alltags   = oldblacklistId+","+newblacklistId
        //对重复数据区中去重
        alltags.split(",").distinct
          //使用逗号分隔，返回字符串类型。
          .mkString(",")//83,94
      }
    })


    //7、解决数据覆盖的问题
    //   读取test，追加标签后覆盖写入
    // 读取test内的历史标签数据，追加新计算出来的标签到历史数据，最后覆盖写入hbase
    //读取test内的历史标签数据
    val oldBlacks: DataFrame = spark.read.format("cn.itcast.czxy.BD18.BlackList.tools.HBaseDataSource")
      .option(Tagsfour.ZKHOSTS, tagsfour.zkHosts)
      .option(Tagsfour.ZKPORT, tagsfour.zkPort)
      .option(Tagsfour.HBASETABLE,"test")
      .option(Tagsfour.FAMILY, "detail")
      .option(Tagsfour.SELECTFIELDS, "userId,blacklistId")
      .load()

    //追加新计算出来的标签到历史数据
    //新表join新表，条件是两个表的userId相等
    val joinTagas: DataFrame = oldBlacks.join(JobNewBlacks,    oldBlacks("userId")===JobNewBlacks("userId"))

    val allTags: DataFrame = joinTagas.select(
      //处理第一个字段    两个表中的多个userId字段，只读取一个
      when((oldBlacks.col("userId").isNotNull),(oldBlacks.col("userId")))
        .when((JobNewBlacks.col("userId").isNotNull),(JobNewBlacks.col("userId")))
        .as("userId"),

      //处理第二个字段  将两个字段个合并一起
      //自定义函数用于做数据的凭借
      getAllBlacks(oldBlacks.col("blacklistId"),JobNewBlacks.col("blacklistId")).as("blacklistId")
    )

    //最后覆盖写入hbase
    //8、将最终数据写入hbase
    allTags.write.format("cn.itcast.czxy.BD18.Job.tools.HBaseDataSource")
      .option("zkHosts", tagsfour.zkHosts)
      //tagsfour.ZKPORT=>"zkPort"
      //tagsfour.zkPort=>2181
      .option(Tagsfour.ZKPORT, tagsfour.zkPort)

      .option(Tagsfour.HBASETABLE,"test")
      .option(Tagsfour.FAMILY, "detail")
      .option(Tagsfour.SELECTFIELDS, "userId,blacklistId")
      .save()

  }

  // //将map 转换成样tagsfour例类
  def gettagsfour(fourMap: Map[String, String]): Tagsfour = {
    Tagsfour(fourMap.getOrElse(Tagsfour.INTYPE,""),
      fourMap.getOrElse(Tagsfour.ZKHOSTS,""),
      fourMap.getOrElse(Tagsfour.ZKPORT,""),
      fourMap.getOrElse(Tagsfour.HBASETABLE,""),
      fourMap.getOrElse(Tagsfour.FAMILY,""),
      fourMap.getOrElse(Tagsfour.SELECTFIELDS,""),
      fourMap.getOrElse(Tagsfour.ROWKEY,"")
    )
  }

}