package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo06SparkOnHive {
  def main(args: Array[String]): Unit = {
    /**
     * 通过enableHiveSupport()可以开启Hive的支持
     * 需要在pom文件中加入spark-hive的依赖
     * <dependency>
     * <groupId>org.apache.spark</groupId>
     * <artifactId>spark-hive_2.11</artifactId>
     * <version>2.4.5</version>
     * </dependency>
     * 需要启动hive的metastore
     * hive --service metastore
     *
     */
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo06HiveOnSpark")
      .master("local")
      .enableHiveSupport() // 开启Hive的支持
      .config("spark.sql.shuffle.partitions", 2) // 默认200
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._


    spark.sql("show databases").show()
    spark.sql("show tables").show()

    spark.sql("use stu_spark").show()
    spark.sql("show tables").show()


    // 统计每个科目排名前十的学生  分组取topN
    spark.sql(
      """
        |select  t1.student_id
        |        ,t1.cource_id
        |        ,t1.sco
        |        ,t1.rn
        |from (
        |        select  student_id
        |                ,cource_id
        |                ,sco
        |                ,row_number() over(partition by cource_id order by sco desc) as rn
        |        from score
        |) t1 where rn<=10
        |""".stripMargin).show()

    // DSL
    val scoDF: DataFrame = spark.table("score")
    scoDF
      .select($"student_id", $"cource_id", $"sco",
        row_number() over Window.partitionBy($"cource_id").orderBy($"sco".desc) as "rn")
      .where($"rn" <= 10)
      .show()


  }

}
