package cn.itcast.czxy

import java.util.Properties

import cn.itcast.czxy.BD18.bean.HBaseMeta
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/*
编写政治面貌的标签
老韩写的版本
 */
object PoliticalFaceTagTeacher {

  def main(args: Array[String]): Unit = {

    //1、创建sparkSQL实例 用于读取hbase    mysql数据
    val spark: SparkSession = SparkSession.builder().appName("PoliticalFaceTag").master("local[*]").getOrCreate()

    //2、链接mysql数据库
    //url: String, table: String, properties: Properties
    var url="jdbc:mysql://bd001:3306/tags_new?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    var table="tbl_basic_tag"
    var properties=new Properties
    val mysqlConn: DataFrame = spark.read.jdbc(url,table,properties)

    //隐式转换
    import org.apache.spark.sql.functions._
    import spark.implicits._

    //3、读取四级标签数据
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,job
    val fourDS: Dataset[Row] = mysqlConn.select("rule").where("id=101")

    val fourMap: Map[String, String] = fourDS.map(row => {
      //使用##  切分再使用=切分
      row.getAs("rule").toString.split("##") //zkHosts=192.168.10.20      zkPort=2181
        .map(kv => {
        //zkHosts=192.168.10.20
        //zkPort=2181
        val arr: Array[String] = kv.split("=") // zkHosts    192.168.10.20,zkPort    2181
        (arr(0), arr(1))
      })
    }).collectAsList().get(0).toMap

    //将map 转换成样HBaseMeta例类
    var hbaseMeta:HBaseMeta=getHBaseMeta(fourMap)


    //4、读取五级标签数据
    val fiveDS: Dataset[Row] = mysqlConn.select("id","rule").where("pid=101")

    //封装成 TagRule样例类
    val filveTagMap: Map[String, Int] = fiveDS.map(row => {
      //获取数据
      val id: Int = row.getAs("id").toString.toInt
      val rule: String = row.getAs("rule").toString
      //封装
      (rule, id)
      /*
       		  1    102
      	 		2    103
      	 		3    104
       */
    }).collect().toMap


    //5、读取hbase数据
    val HBaseDatas: DataFrame = spark.read.format("cn.itcast.czxy.BD18.Job.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()



    //职业变换的自定义函数
    var  getTags=udf((rule:String)=>{
      //rule    1  2  3
      filveTagMap.get(rule)
    })

    //6、使用五级标签与hbase数据进行匹配获得标签
    //id    job
    val politicalFaceTags: DataFrame = HBaseDatas.select('id. as ("userId"),getTags('politicalFace).as("tagsId"))
    politicalFaceTags.show()



    var getAllTagas=udf((oldTagsId:String,newTagsId:String)=>{
      if (oldTagsId==""){
        newTagsId
      }else if (newTagsId==""){
        oldTagsId
      }else if(oldTagsId==""&& newTagsId==""){
        ""
      }else{
        //拼接历史数据和新数据（可能有重复的数据）
        val alltags   = oldTagsId+","+newTagsId   //83,94,94
        //对重复数据区中去重
        alltags.split(",").distinct//83  94
          //使用逗号分隔，返回字符串类型。
          .mkString(",")//83,94
      }
    })



    //7、解决数据覆盖的问题【职业标签会覆盖前面的所有标签】
    //   读取test，追加标签后覆盖写入
    // 读取test内的历史标签数据，追加新计算出来的标签到历史数据，最后覆盖写入hbase
    //A    读取test内的历史标签数据【不是职业标签，是其他的，已经计算出来的标签】
    val oldTags: DataFrame = spark.read.format("cn.itcast.czxy.BD18.Job.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE,"test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()

    //B    追加新计算出来的标签到历史数据
    //新表join新表，条件是两个表的userId相等
    val joinTagas: DataFrame = oldTags.join(politicalFaceTags,    oldTags("userId")===politicalFaceTags("userId"))

    val allTags: DataFrame = joinTagas.select(
      //处理第一个字段    两个表中的多个userId字段，只读取一个
      when((oldTags.col("userId").isNotNull),(oldTags.col("userId")))
        .when((politicalFaceTags.col("userId").isNotNull),(politicalFaceTags.col("userId")))
        .as("userId"),

      //处理第二个字段  将两个字段个合并一起
      //自定义函数用于做数据的凭借
      getAllTagas(oldTags.col("tagsId"),politicalFaceTags.col("tagsId")).as("tagsId")
    )


    //C    最后覆盖写入hbase
    //8、将最终数据写入hbase
    allTags.write.format("cn.itcast.czxy.BD18.Job.tools.HBaseDataSource")
      .option("zkHosts", hbaseMeta.zkHosts)
      //HBaseMeta.ZKPORT=>"zkPort"
      //hbaseMeta.zkPort=>2181
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE,"test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .save()


  }

  // //将map 转换成样HBaseMeta例类
  def getHBaseMeta(fourMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(fourMap.getOrElse(HBaseMeta.INTYPE,""),
      fourMap.getOrElse(HBaseMeta.ZKHOSTS,""),
      fourMap.getOrElse(HBaseMeta.ZKPORT,""),
      fourMap.getOrElse(HBaseMeta.HBASETABLE,""),
      fourMap.getOrElse(HBaseMeta.FAMILY,""),
      fourMap.getOrElse(HBaseMeta.SELECTFIELDS,""),
      fourMap.getOrElse(HBaseMeta.ROWKEY,"")
    )
  }


}
