package cn.itcast.czxy

import java.util.Properties

import cn.itcast.czxy.BD18.bean.{HBaseMeta, TagRule}
import cn.itcast.czxy.BD18.tools.{Common, HBaseDataSource}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * @author 红尘丶世界
 * @version v 1.0
 */
/*
用于开发职业的标签
 */
object JobTag {

  //程序入口
  def main(args: Array[String]): Unit = {
    //创建工具包对象
    val common: Common = new Common

    //1、创建sparkSQL实例没用于读取hbase    mysql数据
    val spark: SparkSession = SparkSession.builder().appName("JobTag").master("local[*]").getOrCreate()

    //2、链接mysql数据库
    //url: String, table: String, properties: Properties
    val url: String = "jdbc:mysql://bd001:3306/tags_new2?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table: String = "tbl_basic_tag"
    val properties: Properties = new Properties
    val mysqlConn: DataFrame = spark.read.jdbc(url, table, properties)

    //隐式转换
    import org.apache.spark.sql.functions._
    import spark.implicits._
    import scala.collection.JavaConverters._


    //3、读取四级标签数据
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,job
    val fourDS: Dataset[Row] = mysqlConn.select("rule").where("id=40")

    //调用方法切分四级标签中的rule
    val fourMap: Map[String, String] = common.ruleSplit(fourDS,spark)

    //将map 转换成样HBaseMeta例类
    val hbaseMeta: HBaseMeta = (new HBaseDataSource).parseMeta(fourMap)

    //4、读取五级标签数据
    val fiveDS: Dataset[Row] = mysqlConn.select("id", "rule").where("pid=40")
    //封装成 TagRule样例类
    val fiveTagRule: List[TagRule] = fiveDS.map(row => {
      //获取数据
      val id: Int = row.getAs("id").toString.toInt
      val rule: String = row.getAs("rule").toString
      //封装
      TagRule(id, rule)
    }).collectAsList().asScala.toList

    //5、读取hbase数据
    val HBaseDatas: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()

    //职业变换的自定义函数
    val getTags: UserDefinedFunction = udf((rule: String) => {
      //遍历每一个rule  判断是否与数据中的相同，若相同返回对应的ID
      //默认的tag ID0
      var tagId: Int = 0
      for (tagRule <- fiveTagRule) {
        if (tagRule.rule == rule) {
          tagId = tagRule.id
        }
      }
      tagId
    })

    //6、使用五级标签与hbase数据进行匹配获得标签
    val JobNewTags: DataFrame = HBaseDatas.select('id.as("userId"), getTags('job).as("tagsId"))
    JobNewTags.show()

    //7.邓爱写的
    val oldTags: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()

    //调用方法拼接历史数据
    val allTags: DataFrame = common.allTags(oldTags, JobNewTags)
    allTags.show(5)

//    //8、将最终数据写入hbase
//    allTags.write.format("cn.itcast.czxy.BD18.job.tools.HBaseDataSource")
//      .option("zkHosts", hbaseMeta.zkHosts)
//      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
//      .option(HBaseMeta.HBASETABLE, "test")
//      .option(HBaseMeta.FAMILY, "detail")
//      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
//      .save()
  }
}
