package cn.itcast.czxy

import java.util.Properties

import bean.HBaseMeta
import org.apache.spark.SparkContext
import org.apache.spark.sql.expressions.{UserDefinedFunction, Window}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object PayTypeTags {
  def main(args: Array[String]): Unit = {
    //1 创建sparksql
    val spark: SparkSession = SparkSession.builder.appName("consumerTag").master("local[*]").getOrCreate
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    //2 连接MySQL数据库
    val url = "jdbc:mysql://bd001:3306/tags_new?userUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC&user=root&password=123456"
    val table = "tbl_basic_tag"
    val properties = new Properties()
    val mysqlCoon: DataFrame = spark.read.jdbc(url, table, properties)

    //隐式转换
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._

    //3 读取四级标签 为读取HBASE做准备
    val fourDS: Dataset[Row] = mysqlCoon.select('rule).where("id=97")
    val fourMap: Map[String, String] = fourDS.map(row => {
      //使用##切分
      row.getAs("rule").toString.split("##")
        //再用=切分
        .map(line => {
          val arr: Array[String] = line.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap

    //将fourMap转化为HBaseMeta样例类

    var HbaseMeta: HBaseMeta = getHBaseMeta(fourMap)
    //println(HbaseMeta.selectFields)

    //4 读取五级标签 未匹配职业做准备
    val fiveRow: Dataset[Row] = mysqlCoon.select('id, 'rule).where("pid=97")
    val fiveMap: Map[String, String] = fiveRow.map(row => {
      val id: String = row.getAs("id").toString
      val rule: String = row.getAs("rule").toString
      (rule, id)
    }).collect().toMap
    //fiveDF.show()

    //5 读取HBASE数据
    val HBaseDatas: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, HbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, HbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, HbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, HbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, HbaseMeta.selectFields)
      .load()
    //HBaseDF.show(20)

    //分组top1
    val userPaymentMethod: Dataset[Row] = HBaseDatas.groupBy('memberId, 'paymentCode) //对数据进行分组 分组字段memberId和paymentCode
      .agg(count('paymentCode) as ("counts")) //对分组后的数据求取总和
      .withColumn("rk",
        row_number().over(Window.partitionBy('memberId)
          .orderBy('counts desc)))
      .where("rk=1")

    //五级标签数据
    //---------+-----------+
    //|  4034923|     alipay
    //| 13823077|     alipay
    //|138230937|     alipay
    //|  4034761|     alipay
    //|  4035131|     alipay
    val getTags: UserDefinedFunction = udf((HPayType: String) => {
      //到五级标签获取数据
      var tagId = fiveMap.get(HPayType)
      //若没有获取到数据，就获取others
      if (tagId == null) {
        tagId = fiveMap.get("others")
      }
      tagId
    })
    val newPayTypeDF: DataFrame = userPaymentMethod.select('memberId as ("userId"), getTags('paymentCode) as ("tagsId"))
   // newPayTypeDF.show()
        val getAllTags = udf((historyTagId: String, newFaceTagsId:String) => {
          if (historyTagId == "") {
            newFaceTagsId
          } else if (newFaceTagsId == "") {
            historyTagId
          } else if (newFaceTagsId == "" && historyTagId == "") {
            ""
          } else {
            //拼接历史数据和新数据（多次运行可能有重复数据）
            val alltags: String = historyTagId + "," + newFaceTagsId
            //使用，分割去重后返回字符串类型
            alltags.split(",").distinct.mkString(",")
          }
        })
        //7 解决标签覆盖问题
        //读取标签结果表 追加标签覆盖写入
        //a读取test内历史标签数据 已经计算出来的标签
        val historyTag: DataFrame = spark.read.format("tools.HBaseDataSource")
          .option(HBaseMeta.ZKHOSTS, HbaseMeta.zkHosts)
          .option(HBaseMeta.ZKPORT, HbaseMeta.zkPort)
          .option(HBaseMeta.HBASETABLE, "test")
          .option(HBaseMeta.FAMILY, "detail")
          .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
          .load()
        //  historyTag.show(20)
        //b追加计算出来的标签到历史数f据
        val JoinTags: DataFrame = historyTag.join(newPayTypeDF, historyTag("userId") === newPayTypeDF("userId"))
        val updateTags: DataFrame = JoinTags.select(
          when(historyTag.col("userId").isNotNull, historyTag.col("userId"))
            .when(newPayTypeDF.col("userId").isNotNull, newPayTypeDF.col("userId"))
            .as("userId"),
          //处理第二个字段 将两个字段合并到一起
          //自定义函数用于做数据的拼接
          getAllTags(historyTag.col("tagsId"), newPayTypeDF.col("tagsId")).as("tagsId")

        )
        //c最后覆盖写入HBASE

        //  updateTags.show(100)
        //8 将匹结果签写入到HBASE
        updateTags.write.format("tools.HBaseDataSource")
          .option(HBaseMeta.ZKHOSTS, HbaseMeta.zkHosts)
          .option(HBaseMeta.ZKPORT, HbaseMeta.zkPort)
          .option(HBaseMeta.HBASETABLE, "test")
          .option(HBaseMeta.FAMILY, "detail")
          .option(HBaseMeta.SELECTFIELDS,"userId,tagsId")
          .save()

  }

  def getHBaseMeta(fourMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(
      fourMap.getOrElse(HBaseMeta.INTYPE, ""),
      fourMap.getOrElse(HBaseMeta.ZKHOSTS, ""),
      fourMap.getOrElse(HBaseMeta.ZKPORT, ""),
      fourMap.getOrElse(HBaseMeta.HBASETABLE, ""),
      fourMap.getOrElse(HBaseMeta.FAMILY, ""),
      fourMap.getOrElse(HBaseMeta.SELECTFIELDS, ""),
      fourMap.getOrElse(HBaseMeta.ROWKEY, "")
    )
  }
}
