package com.zhaosc.spark.sql.winfun

import org.apache.spark.sql.SparkSession

object RowNumberWindowFunction {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("UDFTest")
      .config("spark.master", "local")
      .getOrCreate();

    // 通过hiveContext操作hive数据库 删除已经存在的表，创建新表，并且加载数据
    spark.sql("DROP TABLE IF EXISTS sales");
    spark.sql("CREATE TABLE IF NOT EXISTS sales ("
      + "product STRING,"
      + "category STRING,"
      + "revenue BIGINT) row format delimited fields terminated by '\t'");
    spark.sql("LOAD DATA "
      + "LOCAL INPATH '/root/resource/sales.txt' "
      + "INTO TABLE sales");
    /**
     * row_number()开窗函数的作用：按照我们每一个分组的数据，按其照顺序，打上一个分组内的行号
     * id=2016 [111,112,113]
     * 那么对这个分组的每一行使用row_number()开窗函数后，三行数据会一次得到一个组内的行号
     * id=2016 [111 1,112 2,113 3]
     * cell phone [6000 1,5000 2,4000 3,3000 4]
     */

    /**
     * 如果在SQL语句中使用了开窗函数，那么必须使用hiveContext对象来执行这个SQL语句
     */
    val top3SalesDF = spark.sql(""
      + "SELECT product,category,revenue "
      + "FROM ("
      + "SELECT "
      + "product,"
      + "category,"
      + "revenue,"
      + "row_number() OVER (PARTITION BY category ORDER BY revenue DESC) rank "
      + "FROM sales "
      + ") tmp_sales "
      + "WHERE rank <= 3");

    // 将每组排名前3的数据，保存到一个表中
    spark.sql("USE result");
    spark.sql("DROP TABLE IF EXISTS top3Sales");

    top3SalesDF.write.saveAsTable("top3Sales");
    spark.sparkContext.stop();
  }
}