package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo04Dianxin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo04Dianxin")
      .master("local")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    val dianxinDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("mdn String,grid_id Bigint,city_id Int,county_id Int,duration Int,start_time String,end_time String,pt String")
      .load("spark/data/sql/dianxin.csv")

    /**
     * 统计每座城市游客数量最多的前三个区县
     * 组内TopN：row_number
     */

    // SQL的方式
    dianxinDF.createOrReplaceTempView("dianxin")

    spark
      .sql(
        """
          |select  tt1.city_id
          |        ,tt1.county_id
          |        ,tt1.cnt
          |        ,tt1.rn
          |from (
          |    select  t1.city_id
          |            ,t1.county_id
          |            ,t1.cnt
          |            ,row_number() over (partition by t1.city_id order by t1.cnt desc) as rn
          |    from (
          |        select  city_id
          |                ,county_id
          |                ,count(distinct mdn) as cnt
          |        from dianxin
          |        group by city_id,county_id
          |    ) t1
          |) tt1 where tt1.rn <=3
          |""".stripMargin).show()

    // DSL的方式
    dianxinDF
      .groupBy($"city_id", $"county_id")
      .agg(countDistinct($"mdn") as "cnt")
      .withColumn("rn", row_number() over (Window.partitionBy($"city_id").orderBy($"cnt".desc)))
      .where($"rn" <= 3)
      .show()
  }

}
