package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo06DianXin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val dianxinDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("mdn String,grid_id Bigint,city_id Int,county_id Int,duration Int,start_time String,end_time String,pt String")
      .load("spark/data/dianxin.csv")

    // 完整的输出
    dianxinDF.show(truncate = false)

    // 统计每座城市停留时间最久的前3个区县
    dianxinDF
      .where($"city_id".isNotNull)
      .groupBy($"city_id", $"county_id")
      .agg(sum($"duration") as "sum_duration")
      .withColumn("rk", row_number() over Window.partitionBy($"city_id").orderBy($"sum_duration".desc))
      .where($"rk" <= 3)
      .show()

    // 统计每座城市游客人数最多的前3个区县
    dianxinDF
      .where($"city_id".isNotNull)
      .groupBy($"city_id", $"county_id")
      .agg(countDistinct("mdn") as "cnt")
      .withColumn("rk", row_number() over Window.partitionBy($"city_id").orderBy($"cnt".desc))
      .show()

    // 统计每个用户在同一个区县内相邻两条位置记录之间的时间间隔
    dianxinDF
      .where($"city_id".isNotNull)
      // 取同一个用户上一条位置记录的结束时间
      .withColumn("last_end_time", lag($"end_time", 1) over Window.partitionBy($"mdn", $"city_id", $"county_id").orderBy($"start_time"))
      .withColumn("interval", unix_timestamp($"start_time", "yyyyMMddHHmmss") - unix_timestamp($"last_end_time", "yyyyMMddHHmmss"))
      .select($"mdn", $"county_id", $"start_time", $"end_time", when($"interval".isNull, 0).otherwise($"interval") as "interval")
      .show()
  }

}
