package com.sunzm.spark.sql.hive.report.dws

import com.google.common.io.{ByteArrayDataInput, ByteArrayDataOutput, ByteStreams}
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql._
import org.roaringbitmap.RoaringBitmap

import scala.collection.mutable

/**
 *
 * DWS层数据聚合
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-25 16:34
 */
object DWSAppTFTCubeDemo {
  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //准备测试数据
    val seq: Seq[(Int, String, String, Int, Int, Int)] = Seq(
      //用户ID, 省份, 城市, 浏览开始时间, 浏览结束时间, 浏览页面数量
      (1, "gd", "gz", 10, 12, 5),
      (2, "gd", "gz", 12, 16, 6),
      (1, "gd", "sz", 15, 20, 8)
    )

    import spark.implicits._

    spark.createDataset(seq)
      .toDF("userId", "province", "city", "start_ts", "end_ts", "pv_cnt")
      .createOrReplaceTempView("v_data")

    //查询所有维度的
    val cubeDF: DataFrame = spark.sql(
      """
        |SELECT
        | userId,
        | province,
        | city,
        | SUM(end_ts-start_ts) AS duration,
        | SUM(pv_cnt) AS pv_cnt
        | FROM v_data
        |GROUP BY userId,province,city
        | WITH CUBE
        |""".stripMargin)
    cubeDF.show(false)

    //从 cubeDF 中查询每个用户的页面访问数量
    cubeDF.createOrReplaceTempView("v_cube")

    /*spark.sql(
      """
        |SELECT userId, SUM(pv_cnt) AS pv_cnt
        | FROM v_cube
        |WHERE userId IS NOT NULL AND province IS NULL AND city IS NULL
        | GROUP BY userId
        |""".stripMargin)
      .show(false)*/

    //另一种写法，借助 coalesce
    /**
     * coalesce(expr1, expr2, ...)
     * - 返回第一个不是空的元素. 如果全是空, 就返回null.
     */
    spark.sql(
      """
        |SELECT userId, SUM(pv_cnt) AS pv_cnt
        | FROM v_cube
        |WHERE userId IS NOT NULL AND coalesce(province,city) IS NULL
        | GROUP BY userId
        |""".stripMargin)
      .show(false)

    spark.stop()
  }
}
