package com.study.sql

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.UnboundedPreceding
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{count, cume_dist, dense_rank, lit, max, mean, min, ntile, percent_rank, rank, row_number, sum}


object WindowFuncTest {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("Spark SQL basic example")
      .getOrCreate()

    import spark.implicits._

    val df = Seq(
      ("Michael", 20,3500,"Developer"),
      ("Andy", 30,4500,"Developer"),
      ("Justin", 32,3500,"Developer"),
      ("Berta", 25,4000,"Developer")
    ).toDF("name", "age","salary","job")
    df.show
/*    +-------+---+------+---------+
    |   name|age|salary|      job|
    +-------+---+------+---------+
    |Michael| 20|  3500|Developer|
    |   Andy| 30|  4500|Developer|
    | Justin| 32|  3500|Developer|
    |  Berta| 25|  4000|Developer|
    +-------+---+------+---------+*/
    df.select(
      $"name",$"age",$"salary",$"job",
      max("salary").over(Window.partitionBy("job")).as("max"),
      min("salary").over(Window.partitionBy("job")).as("min"),
      mean("salary").over(Window.partitionBy("job")).as("mean"),
      count("salary").over(Window.partitionBy("job")).as("count"),
      sum("salary").over(Window.partitionBy("job")).as("sum"),
      ntile(2).over(Window.partitionBy("job").orderBy("salary")).as("ntile"),
      row_number().over(Window.partitionBy("job").orderBy("salary")).as("rn"),
      dense_rank().over(Window.partitionBy("job").orderBy("salary")).as("dense_rank"),
      rank().over(Window.partitionBy("job").orderBy("salary")).as("rank"),
      cume_dist().over(Window.partitionBy("job").orderBy("age")).as("cume_dist"),
      percent_rank().over(Window.partitionBy("job").orderBy("age")).as("percent_rank")
    ).show
    spark.stop()
  }
}
