package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4Dianxin13 {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo4Dianxin13")
      .master("local")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()


    // 导入隐式转换
    import spark.implicits._
    // 导入Spark SQL中所有的函数
    import org.apache.spark.sql.functions._

    val dianxinDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("mdn String,grid_id String,city_id String,county_id String,t String,start_time String,end_time String,date String")
      .load("Spark/data/dianxin_data")

    //    dianxinDF.show()

    // 统计每个城市客流量Top3的区县
    // SQL的方式
    dianxinDF.createOrReplaceTempView("dianxin")

    spark
      .sql(
        """
          |select  tt1.city_id
          |        ,tt1.county_id
          |        ,tt1.cnt
          |        ,tt1.rn
          |from(
          |    SELECT  t1.city_id
          |            ,t1.county_id
          |            ,t1.cnt
          |            ,row_number() over(partition by city_id order by cnt desc) as rn
          |    from (
          |        select  city_id
          |                ,county_id
          |                ,count(distinct mdn) as cnt
          |        from dianxin
          |        where city_id != '\\N'
          |        group by city_id,county_id
          |    ) t1
          |)tt1 where tt1.rn <=3
          |
          |""".stripMargin).show(30)

    // DSL的方式
    dianxinDF
      .where($"city_id" =!= "\\N")
      .groupBy($"city_id", $"county_id")
      .agg(countDistinct($"mdn") as "cnt")
      .select($"city_id", $"county_id", $"cnt", row_number() over Window.partitionBy($"city_id").orderBy($"cnt".desc) as "rn")
      .where($"rn" <= 3)
      .select($"city_id", $"county_id", $"cnt", $"rn")
      .show(30)


  }

}
