package com.shujia.sql

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}

object Demo5TopN {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
      .setMaster("local") //本地运行
      .setAppName("map")

    //spark sql 默认并行度
    conf.set("spark.sql.shuffle.partitions", "4")

    val sc = new SparkContext(conf)

    //创建sparksql上下文对象
    val hiveContext = new HiveContext(sc)

    //导入一个隐式转换
    import hiveContext.implicits._

    /**
      * D55433A437AEC8D8D3DB2BCA56E9E64392A9D93C,117210031795040,83401,8340104,301,20180503190539,20180503233517,20180503
      *
      *
      * /**
      * mdn	        string	用户手机号码
      * grid_id 		string	停留点所在电信内部网格号
      * city_id			string	业务发生城市id
      * county_id		string	停留点区县
      * duration		string	机主在停留点停留的时间长度（分钟）,lTime-eTime
      * grid_first_time	string	网格第一个记录位置点时间（秒级）
      * grid_last_time	string	网格最后一个记录位置点时间（秒级）
      * day_id			string	天分区
      */
      *
      */
    val rdd = sc.textFile("data/dianxin")
      .filter(line => !line.split(",")(4).equals("\\N"))

    /**
      * 1、统计每个城市停留时间前10的用户
      * 1、计算用户在城市中停留时间
      * 2、分组取前10
      * \N
      *
      */

    rdd.map(line => {
      val split = line.split(",")
      val mdn = split(0)
      val cityId = split(2)
      val time = split(4).toInt
      (mdn + "_" + cityId, time)
    })
      //总停留时间
      .reduceByKey(_ + _)
      .map(kv => {
        val mdn = kv._1.split("_")(0)
        val cityId = kv._1.split("_")(1)
        val sumTime = kv._2

        (cityId, (mdn, sumTime))
      })
      .groupByKey()
      .map(kv => {
        val cityId = kv._1
        val top10 = kv._2.toList
          .sortBy(-_._2) //停留时间倒叙排序
          .take(10) //取前十

        (kv, top10)
      }) //.foreach(println)


    /**
      * sql实现
      *
      */

    val caseRDD = rdd.map(line => {
      val split = line.split(",")
      DianXin(split(0), split(1), split(2), split(3), split(4).toInt, split(5), split(6), split(7))
    })

    //将RDD转换成一个DF
    val dianXinDF = caseRDD.toDF()

    //将df注册成一张表
    dianXinDF.registerTempTable("dianxin")

    /**
      * sql执行顺序
      * where --->  join--> on --> group by --- hvaing ---> select ----> order by ---> limit
      *
      */

    hiveContext
      .sql(
        """
          |
          |select * from
          |(select
          |   city_id,mdn,sumTIme,row_number() over(partition by city_id order by sumTIme desc) as rank
          |from
          |(
          |select
          |   mdn,city_id,sum(duration) as sumTIme
          |from
          |   dianxin
          |group by
          |   mdn,city_id
          |)as a
          |)as b
          |where rank<=10
          |
          |
        """.stripMargin)
      .show()


  }

  case class DianXin(mdn: String, grid_id: String, city_id: String, county_id: String, duration: Integer, grid_first_time: String, grid_last_time: String, day_id: String)

}
