package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo8Window {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("window")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import org.apache.spark.sql.functions._

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",") //列的分割方式
      //指定表结构，必须按照顺序指定
      .schema("id STRING , name STRING, age INT , gender STRING , clazz STRING")
      .load("data/students.txt") //指定读取的路径


    studentDF.createOrReplaceTempView("student")

    val scoreDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING , cid STRING ,sco INT")
      .load("data/score.txt")

    scoreDF.createOrReplaceTempView("score")

    /**
      * 窗口函数，排序和不排序的区别
      * 不排序，全局结果，同一个组内每一条数据后面的值都一样
      * 排序 :  累计
      *
      */

    /**
      * count: 在窗口内进行统计
      *
      */
    spark.sql(
      """
        |
        |select
        |id,name,age,gender,clazz,
        |count(1) over(partition by clazz) AS C
        |from student
        |
        |
      """.stripMargin)
    //.show(1000)

    /**
      * hive sql执行顺序
      * from -->  join  --> on  --> where -->group by --> having  --> select  --> order by --> limit
      *
      */


    /**
      * avg :窗口内计算平均值
      *
      */
    //统计总分大于年级平均分的学生
    spark.sql(
      """
        |select * from (
        |select
        |a.id,b.name,b.age,b.clazz,b.gender,sumSco,
        | avg(sumSco) over(partition by subStr(clazz,0,2)) avgSco
        |from (
        |select id,sum(sco) as sumSco from score
        |group by id
        |) as a
        |join student  as b
        |on a.id=b.id
        |) as c
        |where sumSco > avgSco
        |
        """.stripMargin)
    //.show()


    /**
      * max ：窗口内获取最大值
      * min ：窗口内计算最小值
      *
      */

    //计算每个学生的总分和班级最高分的差距
    spark.sql(
      """
        |select * , maxSco-sumSco from (
        |select
        | a.id,a.sumSco,b.name,b.age,b.clazz,b.gender,
        | max(sumSco) over(partition by clazz) as maxSco
        | from (
        |select id,sum(sco) as sumSco from score
        |group by id
        |) as a
        |join student as b
        |on a.id=b.id
        |) as c
        |
      """.stripMargin)
    //.show()


    /**
      * 中微子 : 窗口内计算中位数
      *
      * sum(case when round(s/2)=r then sco else 0 end )
      * 如果条件返回true才做统计
      */


    spark.sql(
      """
        |
        |select * ,
        |sum(case when round(s/2)=r then sco else 0 end ) over(partition by cid) as med
        |from (
        |select
        |* ,
        |row_number() over (partition by cid order by sco) as r,
        |count(1) over(partition by cid) as s
        |from score
        |) as a
        |
      """.stripMargin)
    //.show(100000)


    /**
      * DENSE_RANK: 连续排序
      * RANK： 跳跃排名
      */


    spark.sql(
      """
        |select *,
        | DENSE_RANK() over (partition by cid order by sco) as r1,
        | RANK() over (partition by cid order by sco) as r1
        | from score
        |
        |
        """.stripMargin)
    //.show(1000)


    /**
      * lag: 按偏移量取当前行之前第几行的值。
      * lead : 按偏移量取当前行之后第几行的值。
      *
      */

    //计算学生总分，比较当前学生个前一个学生总分的差距

    spark.sql(
      """
        |
        |select
        | *,
        | lag(sumSco,1) over(order by sumSco) lagSco
        | from (
        |select id,sum(sco) as sumSco from score
        |group by id
        |) as b
        |
        |
      """.stripMargin)
      .show()


  }

}
