package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo5TopN {
  def main(args: Array[String]): Unit = {


    /**
      * 统计每一个城市游客人数前十的区县
      *
      */


    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    /**
      * D55433A437AEC8D8D3DB2BCA56E9E64392A9D93C,117210031795040,83401,8340104,301,20180503190539,20180503233517,20180503
      *
      *
      * /**
      * mdn	        string	用户手机号码
      * grid_id 		string	停留点所在电信内部网格号
      * city_id			string	业务发生城市id
      * county_id		string	停留点区县
      * duration		string	机主在停留点停留的时间长度（分钟）,lTime-eTime
      * grid_first_time	string	网格第一个记录位置点时间（秒级）
      * grid_last_time	string	网格最后一个记录位置点时间（秒级）
      * day_id			string	天分区
      */
      *
      */

    import spark.implicits._
    val dianxingDF: DataFrame = spark
      .read
      .option("sep", ",")
      .schema("mdn STRING , grid_id STRING,city_id STRING,county_id STRING,duration STRING,grid_first_time STRING, grid_last_time STRING,  day_id STRING")
      .csv("spark/data/dianxin_data")


    dianxingDF.createOrReplaceTempView("dianxin_data")


    val resultDF: DataFrame = spark.sql(
      """
        |
        |select * from
        |(
        |select city_id,county_id,c , row_number() over(partition by city_id order by c desc) rank
        |from
        |(
        | select city_id,county_id,count(distinct mdn) as c
        | from dianxin_data
        | group by city_id,county_id
        |) as a
        |) as b
        |where b.rank <= 10
        |
        |
      """.stripMargin)


    // resultDF.show(10000)

  }
}
