package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo08ContinueLogin {
  def main(args: Array[String]): Unit = {
    // 连续登录问题

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName(this.getClass.getSimpleName.replace("$", ""))
      // 控制在SparkSQL中进行Shuffle操作时默认的分区数，默认值为200，相当于会启动200个Task进行处理
      .config("spark.sql.shuffle.partitions", "2")
      // 需要在任务运行配置中增加VMOption：-Djava.library.path="D:\soft\hadoop-3.2.0"
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val dealDF: DataFrame = spark.table("db01.deal_tb")

    dealDF
      .groupBy($"id", $"datestr")
      .agg(sum($"amount") as "amount")
      .withColumn("grp", date_sub($"datestr", row_number() over Window.partitionBy($"id").orderBy($"datestr")))
      .groupBy($"id", $"grp")
      .agg(
        round(sum($"amount"), 4) as "sum_amount"
        , count("*") as "days"
        , min($"datestr") as "start_date"
        , max($"datestr") as "end_date"
      )
      .withColumn("last_end_date", lag($"end_date", 1) over Window.partitionBy($"id").orderBy($"start_date"))
      .withColumn("interval_days", datediff($"start_date", $"last_end_date"))
      .select(
        $"id"
        , $"sum_amount"
        , $"days"
        , $"start_date"
        , $"end_date"
        , coalesce($"interval_days" - 1, expr("0")) as "interval_days"
      )
      .show()
  }

}
