package cn.itcast.czxy

import java.util.Properties

import bean.HBaseMeta
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object ConstellationTest {
  def main(args: Array[String]): Unit = {
    //1.创建sparkSession
    val spark: SparkSession = SparkSession.builder().appName("myXZTest").master("local[*]").getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")


    //2.连接MySQL
    //读取配置文件中信息
    val config: Config = ConfigFactory.load()
    val url: String = config.getString("jdbc.mysql.url")
    val tableName: String = config.getString("jdbc.mysql.tableName")
    val properties = new Properties()
    val MySQLCoon: DataFrame = spark.read.jdbc(url, tableName, properties)

    //导入隐式转化
    import spark.implicits._

    //3.读取MySQL中的四级标签  切割规则 封装样例类
    val FourRow: Dataset[Row] = MySQLCoon.select('rule).where("id=125")
    val FourMap: Map[String, String] = FourRow.map(row => {
      //inType=HBase##zkHosts=192.168.10.20##zkPort=2181##hbaseTable=tbl_users##family=detail##selectFields=id,birthday
      row.getAs("rule")
        .toString
        .split("##")
        //inType=HBase   zkHosts=192.168.10.20
        .map(line => {
          val str: Array[String] = line.split("=")
          //封装二元组返回
          (str(0), str(1))
        })
    }).collectAsList().get(0).toMap

    //封装样例类
    val meta: HBaseMeta = HBaseMeta(
      FourMap.getOrElse(HBaseMeta.INTYPE, ""),
      FourMap.getOrElse(HBaseMeta.ZKHOSTS, ""),
      FourMap.getOrElse(HBaseMeta.ZKPORT, ""),
      FourMap.getOrElse(HBaseMeta.HBASETABLE, ""),
      FourMap.getOrElse(HBaseMeta.FAMILY, ""),
      FourMap.getOrElse(HBaseMeta.SELECTFIELDS, ""),
      FourMap.getOrElse(HBaseMeta.ROWKEY, "")
    )


    //4.读取四级标签的具体值 五级标签
    val FiveRow: Dataset[Row] = MySQLCoon.select('id, 'rule).where("pid=125")
    val fiveDF: DataFrame = FiveRow.map(row => {
      val id: String = row.getAs("id").toString
      val rule: String = row.getAs("rule").toString
      var start = ""
      var end = ""
      val str: Array[String] = rule.split("#")
      start = str(0)
      end = str(1)

      //封装三元组返回
      (id, start, end)
    }).toDF("id", "start", "end")

    // fiveDF.show()
    //+---+-----+-----+
    //| id|start|  end|
    //+---+-----+-----+
    //|126|01-20|02-18|
    //|127|02-19|03-20|
    //|128|03-21|04-19|
    //|129|04-20|05-20|
    //|130|05-21|06-21|

    //5.根据封装标签的规则 读取hbase中数据
    val HbaseDF: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, meta.zkHosts)
      .option(HBaseMeta.ZKPORT, meta.zkPort)
      .option(HBaseMeta.HBASETABLE, meta.hbaseTable)
      .option(HBaseMeta.FAMILY, meta.family)
      .option(HBaseMeta.SELECTFIELDS, meta.selectFields)
      .load()

    //HbaseDF.show()
    //+---+----------+
    //| id|  birthday|
    //+---+----------+
    //|  1|1992-05-31|
    //| 10|1980-10-13|
    //|100|1993-10-28|
    //|101|1996-08-18|
    //|102|1996-07-28|
    //|103|1987-05-13|
    //|104|1976-05-08|
    //|105|1983-10-11|

    //6.对五级标签数据和hbase数据注册临时视图 进行标签计算
    fiveDF.createOrReplaceTempView("fiveDF")
    HbaseDF.createOrReplaceTempView("HbaseDF")
    val newTagsDF: DataFrame = spark.sql(
      """
        |SELECT tmp_h.id as userId,
        |CASE WHEN tmp_f.id IS NULL THEN '137' ELSE tmp_f.id END as tagsId
        |FROM
        |(SELECT id,CONCAT('2020-',SUBSTRING(birthday,6)) as birthday FROM HbaseDF)tmp_h
        |LEFT JOIN
        |(SELECT id,CONCAT('2020-',start) as start,CONCAT('2020-',end) as end FROM fiveDF)tmp_f
        |ON tmp_h.birthday BETWEEN tmp_f.start AND tmp_f.end
        |""".stripMargin)

    //7.将计算后的标签写入hbase中
    //7.1读取hbase历史数据
    val historyDF: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, meta.zkHosts)
      .option(HBaseMeta.ZKPORT, meta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()

    //7.2编写udf函数 解决新标签数据写入时的覆盖、重复问题
    spark.udf.register("updateTags",(hTag:String,fTag:String)=>{
      if(hTag==""){
        fTag
      }else if(fTag=="") {
        hTag
      }else if(hTag=="" && fTag==""){
        ""
      }else {
        val str: String = hTag+","+fTag
        str.split(",").distinct.mkString(",")
      }
    })

    //注册临时视图 historyDF 和 newTagsDF
    newTagsDF.createOrReplaceTempView("newTagsDF")
    historyDF.createOrReplaceTempView("historyDF")
    val updateTagsDF: DataFrame = spark.sql(
      """
        |SELECT h.userId,
        |       updateTags(h.tagsId,n.tagsId) as tagsId
        |FROM historyDF h
        |LEFT JOIN newTagsDF n
        |ON h.userId=n.userId
        |""".stripMargin)

    //7.3写入hbase
    updateTagsDF.write.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, meta.zkHosts)
      .option(HBaseMeta.ZKPORT, meta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS,"userId,tagsId")
      .save()

  }
}
