package cn.itcast.czxy

import java.util.Properties

import cn.itcast.czxy.BD18.bean.{HBaseMeta, TagRule}
import cn.itcast.czxy.BD18.tools.{Common, HBaseDataSource}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 婚姻状况标签
 *
 * 黄东虎 & 邓爱 & 彭安
 */
object MarriageTag {
  def main(args: Array[String]): Unit = {
    //创建工具包对象
    val common: Common = new Common

    //初始化sparksession对象
    val spark: SparkSession = SparkSession.builder().appName("MarriageTag").master("local[*]").getOrCreate()

    //连接MySQL数据库
    val url: String = "jdbc:mysql://bd001:3306/tags_new2?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table: String = "tbl_basic_tag" //mysql数据表的表名
    val properties: Properties = new Properties
    val mysqlConn: DataFrame = spark.read.jdbc(url, table, properties)


    //引入隐式转换
    import spark.implicits._
    //引入java 和scala相互转换
    import scala.collection.JavaConverters._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._

    //读取mysql数据库中的四级标签
    //为读取hbase数据做准备
    //id=47
    //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,marriage


    val fourTagsDS: Dataset[Row] = mysqlConn.select("id", "rule").where("id=47")
    //将上述数据转为样例类，以便于后面读取数据

    //遍历四级标签数据fourTags
    val KVMap: Map[String, String] = common.ruleSplit(fourTagsDS, spark)
    println(KVMap)
    val hbaseMeta: HBaseMeta = (new HBaseDataSource).parseMeta(KVMap)

    println(hbaseMeta.hbaseTable + "    " + hbaseMeta.family + "    " + hbaseMeta.selectFields)


    //   读取mysql数据库中存放的五级标签
    //   匹配婚姻状况
    val fiveTags: Dataset[Row] = mysqlConn.select("id", "rule").where("pid = 47")
    val fiveTagsList: List[TagRule] = fiveTags.map(row => {
      //把一行数据封装成样例类
      TagRule(
        row.getAs("id").toString.toInt,
        row.getAs("rule").toString
      )
    }).collectAsList() //将DataSet 转化成util.List[TagRule] 这个类型无法获取id和rule的数据
      .asScala.toList //需要隐式转换 import scala.collection.JavaConverters._

    //根据mysql数据中的四级标签的规则   读取hbase 数据
    //若使用hbase客户端读取效率较慢，将hbase作为数据源，读取效率较快
    val hbaseDatas: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()

    hbaseDatas.show(5)

    //标签匹配,根据五级标签数据和hbase数据进行标签匹配  的到最终的标签

    //编写udf函数  输入是1，2,3  返回值1 2 3
    var marriageTagID: UserDefinedFunction = udf((marriage: String) => { //rule:就是HBase中的1  2  1  2
      //设置标签默认值0
      var id: Int = 0
      //遍历五级标签
      for (ruleOb <- fiveTagsList) { // fiveTagList=>  82      1，83      2
        //当用户数据的marriage与五级标签的rule相等，那么返回五级标签的id
        if (marriage == ruleOb.rule) {
          id = ruleOb.id
        }
      }
      id
    })

    //标签匹配
    val newTags: DataFrame = hbaseDatas.select('id as ("userId"), marriageTagID('marriage).as("tagsId"))
    newTags.show(5)

    //读取HBase 中 test表的数据
    val oldTags: DataFrame = spark.read.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()

    //调用comm中的方法返回总的 tags
    val allTags: DataFrame = common.allTags(oldTags, newTags)
    allTags.show(5)
    //将最终的标签写入HBase
    allTags.write.format("cn.itcast.czxy.BD18.tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .save()

  }
}
