package com.sunzm.spark.sql.hive.report.dws

import com.google.common.io.{ByteArrayDataInput, ByteArrayDataOutput, ByteStreams}
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, SparkSession}
import org.roaringbitmap.RoaringBitmap

import scala.collection.mutable

/**
 *
 * DWS层数据聚合
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-25 16:34
 */
object DWSAppTFTBaseDay {
  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //创建表
    //createTable(spark)

    //求起始页面、结束页面、浏览开始时间、浏览结束时间、浏览页面数
    //pageCalFunc(spark)

    //求uv、pv、访问时长
    pvUVAndDUrationCalFunc(spark)

    spark.stop()
  }

  def pvUVAndDUrationCalFunc(spark: SparkSession) = {
    import spark.implicits._
    import org.apache.spark.sql.functions._
    //定义一个函数, 传入一个用户ID对应的整数类型的数组
    //由于这里需要的是数字类型的数组，最好在DWD层之间添加一个字段，否则就需要维护一张表，记录每个用户对应的唯一数字类型的ID
    //然后进行关联查询
    //为了简单，示例程序中用户ID直接用数字表示
    val arrayToBitMap: mutable.WrappedArray[Int] => Array[Byte] = (userIdArray: mutable.WrappedArray[Int]) => {
      val bitmap: RoaringBitmap = RoaringBitmap.bitmapOf(userIdArray: _*)
      //因为sql中没有RoaringBitmap类型，所以需要序列化为sql支持的类型
      /*val bos: ByteArrayOutputStream = new ByteArrayOutputStream
      val dos: DataOutputStream = new DataOutputStream(bos)*/
      val bds: ByteArrayDataOutput = ByteStreams.newDataOutput()
      bitmap.serialize(bds)
      bds.toByteArray
    }

    //注册为spark的函数
    spark.udf.register("arrayToBitMap", arrayToBitMap)

    //准备测试数据
    val seq: Seq[(Int, String, String, Int, Int, Int)] = Seq(
      //用户ID, 省份, 城市, 浏览开始时间, 浏览结束时间, 浏览页面数量
      (1, "gd", "gz", 10, 12, 5),
      (2, "gd", "gz", 12, 16, 6),
      (1, "gd", "sz", 15, 20, 8)
    )

    spark.createDataset(seq)
      .toDF("userId", "province", "city", "start_ts", "end_ts", "pv_cnt")
      .createOrReplaceTempView("v_data")

    // 求uv、pv、访问时长
    //注意：uv不是直接求出来，为了粗粒度聚合可以使用细粒度的表，把去重后的用户编码存起来
    //COLLECT_SET是把同一组的数据去重后放到一个set集合中，形成一个数组(COLLECT_LIST不去重)
    //注意返回的 uv_bitmap 的类型为 binary, 如果需要创建表保存这个字段，类型需要为 binary
    val resultDF: DataFrame = spark.sql(
      """
        |SELECT
        | userId,
        | province,
        | city,
        | arrayToBitMap(COLLECT_SET(userId)) AS uv_bitmap,
        | SUM(pv_cnt) AS pv_cnt,
        | SUM(end_ts - start_ts) AS duration
        |FROM v_data
        | GROUP BY
        |  userId,
        |  province,
        |  city
        |""".stripMargin)

    //打印表结构信息
    //resultDF.printSchema()

    //resultDF.show(10, false)

    //从上面的结果中统计整体的UV，PV，浏览时长
    resultDF.createOrReplaceTempView("v_tfc_dws")

    //下面的写法spark3.X才支持
    spark.udf.register("bitMapAggrCardinality", udaf(BitMapAggrCardinality))

    //BitMapAggrCardinality.toColumn.name("bitMapAggrCardinality")
    spark.sql(
      """
        |SELECT bitMapAggrCardinality(uv_bitmap)
        | FROM v_tfc_dws
        |""".stripMargin)
      .show(10, false)

  }

  /**
   * 用来聚合的函数
   */
  private object BitMapAggrCardinality extends Aggregator[Array[Byte], Array[Byte], Int]{
    override def zero: Array[Byte] = {
      //Array.empty[Byte]
      val bufBitmap = new RoaringBitmap()
      val bds: ByteArrayDataOutput = ByteStreams.newDataOutput()
      bufBitmap.serialize(bds)
      bds.toByteArray
    }

    override def reduce(buf: Array[Byte], in: Array[Byte]): Array[Byte] = {
      val bufBitmap = new RoaringBitmap()
      val dataIn1: ByteArrayDataInput = ByteStreams.newDataInput(buf)
      bufBitmap.deserialize(dataIn1)

      val inBitmap = new RoaringBitmap()
      val dataIn2: ByteArrayDataInput = ByteStreams.newDataInput(in)
      inBitmap.deserialize(dataIn2)

      //进行或运算，结果放到 bufBitmap
      bufBitmap.or(inBitmap)

      //再次序列化返回
      val bds: ByteArrayDataOutput = ByteStreams.newDataOutput()
      bufBitmap.serialize(bds)
      bds.toByteArray
    }

    override def merge(buf1: Array[Byte], buf2: Array[Byte]): Array[Byte] = {

      val buf1Bitmap = new RoaringBitmap()
      val dataIn1: ByteArrayDataInput = ByteStreams.newDataInput(buf1)
      buf1Bitmap.deserialize(dataIn1)

      val buf2Bitmap = new RoaringBitmap()
      val dataIn2: ByteArrayDataInput = ByteStreams.newDataInput(buf2)
      buf2Bitmap.deserialize(dataIn2)

      //进行或运算，结果放到 buf1Bitmap
      buf1Bitmap.or(buf2Bitmap)

      //再次序列化返回
      val bds: ByteArrayDataOutput = ByteStreams.newDataOutput()
      buf1Bitmap.serialize(bds)
      bds.toByteArray
    }

    override def finish(mergedBuf: Array[Byte]): Int = {
      val bufBitmap = new RoaringBitmap()
      val dataIn: ByteArrayDataInput = ByteStreams.newDataInput(mergedBuf)
      bufBitmap.deserialize(dataIn)

      //获取1的个数
      bufBitmap.getCardinality
    }

    override def bufferEncoder: Encoder[Array[Byte]] = Encoders.BINARY

    override def outputEncoder: Encoder[Int] = Encoders.scalaInt
  }

  def pageCalFunc(spark: SparkSession) = {

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //求起始页面、结束页面、浏览开始时间、浏览结束时间、浏览页面数
    val seq: Seq[(String, String, String, String, Int, String)] = Seq(
      //用户ID, 会话ID, 省份, 城市, 时间, 浏览页面
      ("u001", "s001", "bj", "bj", 10, "baidu.com/1"),
      ("u001", "s001", "bj", "bj", 12, "tengxun.com/1"),
      ("u001", "s001", "bj", "bj", 15, "sina.com/3")
    )

    val dataDS: Dataset[(String, String, String, String, Int, String)] = spark.createDataset(seq)
      .toDF("userId", "sessionId", "province", "city", "ts", "pageId")
      .as[(String, String, String, String, Int, String)]

    dataDS.createOrReplaceTempView("v_data")

    //||是字符串拼接
    spark.sql(
      """
        |SELECT
        | userId,
        | sessionId,
        | province,
        | city,
        | SPLIT(MIN((ts || '-' || pageId)), '-')[1] AS start_page,
        | SPLIT(MAX((ts || '-' || pageId)), '-')[1] AS end_page,
        | MIN(ts) AS start_ts,
        | MAX(ts) AS end_ts,
        | COUNT(1) AS page_count
        |   FROM v_data
        | GROUP BY
        | userId,
        | sessionId,
        | province,
        | city
        |""".stripMargin)
      .show(10, false)

  }

  def createTable(spark: SparkSession) = {

    //显示有哪些数据库
    spark.sql("SHOW DATABASES").show()

    //创建数据库
    //spark.sql("CREATE DATABASE IF NOT EXISTS applog_dws")

    //使用 applog_dws 库
    spark.sql("USE applog_dws")

    spark.sql("DROP TABLE IF EXISTS app_tfc_aggr_day")

    spark.sql(
      s"""
         |CREATE EXTERNAL TABLE IF NOT EXISTS app_tfc_aggr_day(
         | account string COMMENT '账号',
         | eventId string COMMENT '事件类型Id',
         | sessionId string COMMENT '会话Id',
         | provice string COMMENT '省份',
         | city string COMMENT '城市',
         | region string COMMENT '区县',
         | isNew int COMMENT '是否为新用户',
         | `properties` map<String,String> COMMENT '属性字段',
         | start_page string COMMENT '浏览开始页面',
         | end_page string COMMENT '浏览结束页面',
         | start_ts bigint COMMENT '浏览开始时间',
         | end_ts bigint COMMENT '浏览结束时间',
         | pv_count int COMMENT '浏览页面数量'
         | )
         |  PARTITIONED BY (dt string)
         |  STORED AS parquet
         |  LOCATION '/data/hive/applog/dws/'
         |  TBLPROPERTIES("parquet.compress"="snappy")
         |  COMMENT 'app流量天粒度统计表'
         |""".stripMargin)

    spark.sql("show tables").show(10, false)
    spark.sql("show create table app_tfc_base_day").show(10, false)
  }
}
