package com.study.spark.scala.sql

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window

/**
 * 窗口函数
 *
 * @author stephen
 * @date 2019-09-27 18:50
 */
object WindowFunctionDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("WindowFunctionDemo")
      .master("local[*]")
      .getOrCreate()

    // 隐式转换
    import org.apache.spark.sql.functions._
    import spark.implicits._

    val df = Seq((0, 1, 100), (1, 2, 50), (2, 3, 40),
      (3, 1, 70), (4, 2, 51), (5, 3, 40),
      (6, 1, 30), (7, 2, 50), (8, 3, 100),
      (9, 1, 51), (10, 2, 52), (11, 3, 35),
      (12, 1, 45), (13, 2, 25)
    ).toDF("id", "cate", "key")

    // 按照cate划分窗口和排序，进一步指定区间为前一行到后一行
    // select *, first(id) over w_rows first_value,
    //          collect_list(id) over w_rows list_value
    // from tbl
    // window w_rows as (partition by cate
    //             order by key
    //             rows between 1 preceding and 1 following)
    val w_rows = Window.partitionBy($"cate").orderBy($"key").rowsBetween(-1, 2)
    df.withColumn("fisrt_value", first($"id").over(w_rows))
      .withColumn("list_value", collect_list($"id").over(w_rows))
      .show()

    // select *, first(id) over w_range first_value,
    //          collect_list(id) over w_range list_value
    // from tbl
    // window w_range as (partition by cate
    //             order by key
    //             range between 1 preceding and 1 following)
    // rangeBetween是根据orderBy的值的范围来确定区间的
    // rangeBetween(-1,1)表示的是相对于当前行的排序字段key的值的范围
    // id=5这一行的排序字段key=40，所以frame限定的是当前分区中，key值范围属于[40-1,40+1]区间的所有行。
    val w_range = Window.partitionBy("cate").orderBy("key").rangeBetween(-1, 1)
    df.withColumn("fisrt_value", first($"id").over(w_range))
      .withColumn("list_value", collect_list($"id").over(w_range))
      .show()

    // Window.unboundedPreceding表示partition内的第一行，
    // Window.unboundedFollowing表示partition内的最后一行，
    // 而当前行建议是采用Window.currentRow来表示。

    // 另外，rowsBetween和rangeBetween不是定义窗口时必须的语句。
    // 在不写的情况下，Frame是默认的rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)；
    // 当指定了order by时，默认的是rangeBetween(Window.unboundedPreceding, Window.currentRow)
    // select *, first(id) over w first_value,
    //        collect_list(id) over w list_value
    // from tbl
    // window w as (partition by cate
    //             order by key)
    val w = Window.partitionBy("cate").orderBy("key")
    df.withColumn("fisrt_value", first($"id").over(w))
      .withColumn("list_value", collect_list($"id").over(w))
      .show()

    // lag()和lead()函数可以把当前行的前后几行数据列填充到当前行
    // select *, lag(key,1,0) over w as lag_row_key,
    //         lead(key,1,0) over w as lead_row_key
    // from tbl
    // window w as (partition by cate order by key)
    df.withColumn("lag_row_key", lag($"key", 1, 0).over(w)
    ).withColumn("lead_row_key", lead($"key", 1, 0).over(w)
    ).show

    // dense_rank(),rank(),rows_number()的排序编号差异
    df.withColumn("dense_rank", dense_rank().over(w))
      .withColumn("rank", rank().over(w))
      .withColumn("rows_number", row_number().over(w))
      .show

    // ntile(k)用于等分数据集
    // select *, ntile(3) over (partition by cate order by key) as ntile_3
    // from tbl
    df.withColumn("ntile_3", ntile(3).over(w)).show

    // percent_rank()用于按排名的百分比统计
    // select *, rank() over (partition by cate order by key) as rank,
    // round(percent_rank() over (partition by cate order by key),2) as percent_rank
    // from tbl
    df.withColumn("rank", rank().over(w))
      .withColumn("percent_rank", round(percent_rank().over(w), 3))
      .show

    // cume_dist()用于统计值的分布
    // partition内小于等于（降序的话就是大于等于）当前行的rank值的行数除以这个partition的总行数
    // 单个列排序的情况下
    // select *, rank() over (partition by cate order by key) as rank,
    //        cume_dist() over (partition by cate order by key) as cume_dist
    // from tbl
    df.withColumn("rank",rank().over(w))
      .withColumn("cume_dist", cume_dist().over(w))
      .show

    // 多列排序时，等价sql
    // select *, rank() over (partition by cate order by key) as rank,
    //        cume_dist() over (partition by cate order by key, id desc) as cume_dist
    // from tbl
    val mw = Window.partitionBy("cate").orderBy($"key",$"id".desc)
    df.withColumn("rank",rank().over(mw))
      .withColumn("cume_dist", cume_dist().over(mw))
      .show
  }
}
