package cn.doitedu.day08

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window

object T01_DSLFlowCount {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName(this.getClass.getSimpleName)
      .master("local[4]")
      .getOrCreate()

    val df = spark.read
      .option("header", "true")
      .option("delimiter", "|")
      .csv("data/flow2.txt")

    import spark.implicits._
    import org.apache.spark.sql.functions._
    df.select(
      'uid,
      'start_time,
      'end_time,
      'flow,
      //有错误 //lag('end_time, 1, "start_time").over(Window.partitionBy($"uid").orderBy("start_time")).as("lag_time")
      expr("lag(end_time, 1, start_time) over(partition by uid order by start_time) as lag_time")
    ).select(
      'uid,
      'start_time,
      'end_time,
      'flow,
      expr("if(to_unix_timestamp(start_time) - to_unix_timestamp(lag_time) > 600, 1, 0) flag")
    ).select(
      'uid,
      'start_time,
      'end_time,
      'flow,
      sum("flag").over(Window.partitionBy("uid").orderBy("start_time")).as("sum_flag")
    ).groupBy(
      "uid",
      "sum_flag"
    ).agg(
      min("start_time") as "start_time",
      max("end_time") as "end_time",
      sum("flow") as "flow"
    ).drop(
      "sum_flag"
    ).show()
  }
}
