package com.sunzm.spark.sql.hive.report.dws

import org.apache.spark.sql.SparkSession

/**
 * 事件间隔分析sql代码
 */
object DWSAppEventInterval {
  def main(args: Array[String]): Unit = {
    val warehouseLocation = "/user/hive/warehouse"
    //val warehouseLocation = new File("spark-warehouse").getAbsolutePath

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 8)
      .config("spark.sql.shuffle.partitions", 8)
      //如果集群中有hive相关的配置文件，下面的都可以不写
      //.config("fs.defaultFS", "hdfs://192.168.1.158:8020")
      .config("fs.defaultFS", "file:///")
      .config("spark.sql.warehouse.dir", warehouseLocation)
      //如果不开启对Hive的支持,元数据保存在内存中,程序退出,元数据也就丢了
      .enableHiveSupport()
      //开启动态分区支持
      .config("hive.exec.dynamic.partition", "true")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver")
      .config("javax.jdo.option.ConnectionURL", "jdbc:mysql://82.156.210.70:3306/hive?useSSL=false")
      .config("javax.jdo.option.ConnectionUserName", "root")
      .config("javax.jdo.option.ConnectionPassword", "ABC123abc.123")
      .getOrCreate()

    spark.sparkContext.setLogLevel("WARN")

    import spark.implicits._

    /*
-- 事件间隔分析要的中间结果(某个用户,做了A事件，然后做了B事件（不要求连续）, B 事件和 A事件的时间间隔)：
userId,E1,E2,timelong
1,a,b,5
1,a,b,3
1,a,b,20
1,a,b,1
1,a,b,2
1,a,b,2
     */
    //准备测试数据(针对同一个用户计算，测试数据中只有一个用户)
    val seq = Seq(
      //用户ID, 事件Id, 时间
      (1, "a", 1),
      (1, "a", 3),
      (1, "c", 5),
      (1, "b", 10),
      (1, "d", 20),
      (1, "a", 50),
      (1, "c", 60)
    )

    spark.createDataset(seq)
      .toDF("userId", "eventId", "ts")
      .createOrReplaceTempView("v_app_event_detail")

    /**
     * 计算过程分析
     * -- 1， 针对明细数据，进行过滤（只要a和b）， 得到如下数据
     * (1, "a", 1),
     * (1, "a", 3),
     * (1, "b", 10),
     * (1, "a", 50),
     *
     * -- 2，然后，利用lead over
     *   (1, "a", 1, "a", 3),
     *   (1, "a", 3, "b", 10),
     *   (1, "b", 10, "a", 50),
     *   (1, "a", 50, \N, \N),
     *
     * -- 3, 然后，过滤出 a->b组合，并求时间差，即为结果
     *   (1, "a", 3, "b", 10, 7),
     */

    //lag: lag(field, n) 获取当前数据的 field 列向前 n 条数据
    //lead(field, offset, default)  获取当前数据的 field 列向后 offset 条数据, 没有就用 default
    spark.sql(
      """
        |WITH tmp AS (
        |SELECT
        |  userId,
        |  `ts` as t1,
        |  eventId,
        |  LEAD(eventId,1,NULL) OVER(PARTITION BY userId ORDER BY `ts`) AS next_event,
        |  LEAD(`ts`,1,NULL) OVER(PARTITION BY userId ORDER BY `ts`) AS t2
        |FROM v_app_event_detail
        |WHERE eventId IN('a', 'b')
        |)
        |
        |SELECT
        |   userId,
        |   eventId as event1,
        |   next_event as event2,
        |   t2-t1 AS interval_time
        |FROM tmp
        |   WHERE next_event = 'b' AND eventId = 'a'
        |""".stripMargin)
      .show(10, false)

    spark.stop()
  }
}
