package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4Dianxin {
  def main(args: Array[String]): Unit = {

    /**
      * 统计每一个城市人流量最多的前两个区县
      *
      */


    val spark: SparkSession = SparkSession.builder()
      .master("local")
      .appName("dianxin")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()


    //导入隐式转换
    import spark.implicits._

    //导入sql 所有的函数
    import org.apache.spark.sql.functions._


    //读取数据
    val dianxin: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("mdn STRING,grid STRING, cityId STRING , countyId STriNG , t INT , startTIme STRING, endTIme STRING ,day STRING")
      .load("spark/data/dianxin_data")
      .where($"cityId" =!= "\\N") //过滤脏数据


    dianxin.createOrReplaceTempView("dianxin")


    spark.sql(
      """
        |
        |
        |select * from
        |(
        |select
        |cityId,countyId,num , row_number() over(partition by cityId  order by num desc ) as rank
        |from
        |(select
        | cityId,countyId,count(distinct mdn) as num
        |from
        |dianxin
        |group by cityId,countyId) as a
        |) as b
        |where rank <=2
        |
        |
      """.stripMargin)
    //.show()

    /**
      * DSL
      *
      */

    dianxin
      .groupBy($"cityId", $"countyId") //按照城市和曲线分组
      .agg(countDistinct($"mdn") as "num") //统计人数
      .select($"cityId", $"countyId", $"num", row_number().over(Window.partitionBy($"cityId").orderBy($"num".desc)) as "rank")
      .where($"rank" <= 2) //取前2
      .select($"cityId", $"countyId", $"num", $"rank") //整理数据
      .show(1000)

  }

}
