package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo5TopN {

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("api")
      .config("spark.sql.shuffle.partitions", 1) // 设sql shuffle之后分区数, 默认是200
      .master("local")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    val dianxinData: DataFrame = spark.read
      .format("csv")
      .schema("mdn STRING,grid STRING, city STRING, county STRING, time STRING ,startDate  STRING ,endDate STRING,  day STRING")
      .load("spark/data/dianxin_data")

    /**
      * 统计每个城市人流量排名前二的区县
      *
      * 分组取topN
      *
      */

    dianxinData.createOrReplaceTempView("dianxin")

    /**
      * sql
      *
      */

    spark.sql(
      """
        |select * from
        |(select  city,county,num,row_number() over(partition by city  order by num desc) rank
        |from(
        |select city,county, count(distinct mdn) as num from dianxin group by city,county
        |) as a
        |) as b
        |where b.rank <= 2
        |
      """.stripMargin).show()


    /**
      *
      * DSL
      *
      */

    dianxinData
      .groupBy($"city", $"county")
      .agg(countDistinct($"mdn") as "num")
      .select($"city", $"county", $"num", row_number() over Window.partitionBy($"city").orderBy($"num".desc) as "rank")
      .where($"rank" <= 2)
      .show(1000)


  }

}
