package com.csw.sql


import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Dataset, Row, SparkSession}

/**
  *
  * 统计每一个城市人流量最多的前两个区县
  */
object Demo04DianXin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dianXin")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //导入隐式转换
    import spark.implicits._

    //导入sql 所有函数
    import org.apache.spark.sql.functions._


    //读取数据
    val dianxinDF: Dataset[Row] = spark
      .read
      .format("csv")
      .option("seq", ",")
      .schema("mdn STRING,grid STRING,cityId STRING,countyId STRING,t INT , startTIme STRING, endTIme STRING ,day STRING")
      .load("spark/data/dianxin_data")
      .where($"cityId" =!= "\\N")

    //创建写sql的视图
    dianxinDF.createOrReplaceTempView("dianxin")


    spark.sql(
      """
        |select * from
        |(
        |select
        |cityId,countyId,num,row_number() over(partition by cityId  order by num desc ) as rank
        |from
        |(select
        |cityId,countyId,count(distinct mdn) as num
        |from dianxin
        |group by cityId,countyId)as a
        |)as b
        |where rank <=2
        |
      """.stripMargin)
    //      .show()

    /**
      * DSL
      *
      */

    dianxinDF
      .groupBy($"cityId", $"countyId") //按照城市和区县分组
      .agg(countDistinct($"mdn") as "num") //统计人数
      .select($"cityId", $"countyId", $"num", row_number().over(Window.partitionBy($"cityId").orderBy($"num".desc)) as "rank")
      .where($"rank" <= 2)//取前二的
      .select($"cityId",$"countyId",$"num",$"rank")//整理数据
      .show(1000)
  }
}
