package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4Dianxin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo4Dianxin")
      .master("local")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    // 导入隐式转换
    import spark.implicits._
    // 导入Spark SQL中所有的函数
    import org.apache.spark.sql.functions._

    val dianxinDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("mdn String,grid_id String,city_id String,county_id String,t String,start_time String,end_time String,date String")
      .load("spark/data/dianxin_data")

    dianxinDF.show()

    // 按城市统计每个区县的游客人数top3

    dianxinDF.createOrReplaceTempView("dianxin")

    // SQL的方式
    spark.sql(
      """
        |select  tt1.city_id
        |        ,tt1.county_id
        |        ,tt1.num
        |        ,tt1.rk
        |from(
        |    select  t1.city_id
        |            ,t1.county_id
        |            ,t1.num
        |            ,row_number() over(partition by t1.city_id order by t1.num desc) as rk
        |    from(
        |        select  city_id
        |                ,county_id
        |                ,count(distinct mdn) as num
        |        from dianxin
        |        group by city_id,county_id
        |    ) t1
        |)tt1 where tt1.rk <= 3
        |""".stripMargin).show()

    //        dianxinDF.select(row_number() over Window.partitionBy($"city_id").orderBy($"num".desc) as "rk",$"city_id")

    // DSL的方式
    dianxinDF
      .groupBy($"city_id", $"county_id")
      .agg(countDistinct($"mdn") as "num") // 统计每个区县的游客人数
      // 对每个city按照游客人数求topN
      .select($"city_id", $"county_id", $"num", row_number().over(Window.partitionBy($"city_id").orderBy($"num".desc)) as "rk")
      .where($"rk" <= 3)
      .show(100)

  }

}
