package com.shujia.spark.sql

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext

object Demo4TopN {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local").setAppName("app")

    //设置sparksql  默认分区数，   在产生shuffle过程中会起作用
    conf.set("spark.sql.shuffle.partitions", "1")

    val sc = new SparkContext(conf)

    // spark  sql 上下文对象  spark sql的入口
    //    val sqlContext = new SQLContext(sc)

    //SQLContext  不能使用row_number函数 ，只有HiveContext可以使用

    val hiveContext = new HiveContext(sc)

    val scoreRDD = sc.textFile("spark/data/score.txt")

    val scoRDD = scoreRDD.map(line => {
      val split = line.split(",")
      val s_id = split(0)
      val c_id = split(1)
      val sco = split(2).toInt
      (s_id, c_id, sco)
    })

    import hiveContext.implicits._
    val scoreDF = scoRDD.toDF("student_id", "cource_id", "sco")

    scoreDF.registerTempTable("score")


    //分组取topN
    val resultDF = hiveContext.sql("select * from (select *, row_number() over(partition by cource_id order by sco desc) as rank from score) as a where a.rank<=10")

    resultDF.show(1000)


  }
}
