package com.sunzm.spark.sql.hive.report.dws

import org.apache.spark.sql.SparkSession

/**
 * 用户连续活跃区间表的计算
 */
object DWSUserActiveRange {
  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    //准备测试数据
    //历史用户活跃区间表
    val seq = Seq(
      //用户ID, 首次登录日期, 区间起始日期, 区间结束日期
      (1, "2021-05-01", "2021-05-01", "2021-05-08"),
      (1, "2021-05-01", "2021-05-20", "2021-05-25"),
      (1, "2021-05-01", "2021-07-01", "9999-12-31"),
      (2, "2021-07-06", "2021-07-06", "2021-07-15"),
      (2, "2021-07-06", "2021-07-20", "9999-12-31"),
      (5, "2021-07-16", "2021-07-16", "9999-12-31"),
      (6, "2021-07-20", "2021-07-20", "2021-07-25")
    )

    import spark.implicits._

    spark.createDataset(seq)
      .toDF("userId", "first_dt", "range_start", "range_end")
      .createOrReplaceTempView("v_user_active_range")

    //用户登录明细表
    val logSeq = Seq(
      //用户ID, 日期
      (1, "2021-07-26"),
      (1, "2021-07-27"),
      (1, "2021-07-27"),
      (2, "2021-07-27"),
      (2, "2021-07-27"),
      (3, "2021-07-27"),
      (6, "2021-07-27")
    )

    import spark.implicits._

    spark.createDataset(logSeq)
      .toDF("userId", "dt")
      .createOrReplaceTempView("v_log_detail")

    //开始计算中间表，这个表的数据在DWS层，还不是最终的结果数据
    /**
     * 如果区间表的结束时间是9999-12-31说明连续登录，没有中断过；
     * 如果还满足今天没有登录，那么9999-12-31就应该改成昨天的日期；
     *
     * 如果只是full join，假设最后登录时间是 26号， 这种可以关联上，但是还需要添加一条27号的数据
     * 所以，还需要额外添加一条sql
     *
     *
     * 以计算2021-07-27为例
     */
    spark.sql(
      """
        |WITH dau AS (SELECT userId FROM v_log_detail WHERE dt = '2021-07-27' GROUP BY userId)
        |
        |SELECT
        | IFNULL(t1.userId, t2.userId) AS userId,
        | IFNULL(t1.first_dt, '2021-07-27') AS first_dt,
        | IFNULL(t1.range_start, '2021-07-27') AS range_start,
        | IF(t1.range_end = '9999-12-31' AND t2.userId IS NULL, '2021-07-26', IFNULL(t1.range_end, '9999-12-31')) AS range_end
        |FROM
        | v_user_active_range t1 FULL JOIN dau t2
        | ON t1.userId = t2.userId
        |
        | UNION ALL
        |
        |SELECT
        | t1.userId AS userId,
        | t1.first_dt AS first_dt,
        | '2021-07-27' AS range_start,
        | '9999-12-31' AS range_end
        |  FROM
        | (SELECT userId, MAX(first_dt) AS first_dt
        | FROM v_user_active_range
        | GROUP BY userId
        | HAVING MAX(range_end) <> '9999-12-31') t1 JOIN dau t2
        | ON t1.userId = t2.userId
        |""".stripMargin)
      //.show(20, false)
      .createOrReplaceTempView("v_user_active_range_new")

    //查询连续3天活跃用户
    spark.sql(
      """
        |SELECT
        |  userId
        | FROM v_user_active_range_new
        | WHERE range_end = '9999-12-31'
        |  AND datediff('2021-07-27', range_start) >= 3
        |""".stripMargin)
      .show(10, false)

    spark.stop()
  }
}
