package com.xiaohu.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 开窗：over
 *  聚合开窗函数：sum  count  lag(取上一条)  lead(取后一条)
 *  排序开窗函数：row_number rank dense_rank
 *
 *  练习开窗的题目： DSL语法去做
 *    统计总分年级排名前十学生各科的分数
 *    统计每科都及格的学生
 *    统计总分大于年级平均分的学生
 *    统计每个班级的每个名次之间的分数差
 */
object Demo6WindowFun {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("rdd与df之间的转换")
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    /**
     * 导入隐式转换你
     */
    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    /**
     * 读取三个数据文件
     */
    val studentsDF: DataFrame = sparkSession.read
      .format("csv")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("spark/data/students.txt")
//    studentsDF.show()
    val scoresDF: DataFrame = sparkSession.read
      .format("csv")
      .schema("id STRING,subject_id STRING,score INT")
      .load("spark/data/score.txt")
//    scoresDF.show()
    val subjectsDF: DataFrame = sparkSession.read
      .format("csv")
      .schema("subject_id STRING,subject_name STRING,subject_score INT")
      .load("spark/data/subject.txt")
//    subjectsDF.show()

    //统计总分年级排名前十学生各科的分数
    val resDS1: Dataset[Row] = scoresDF.join(studentsDF, "id")
      .withColumn("sumScore", sum("score") over Window.partitionBy("id"))
      .withColumn("rn", dense_rank() over Window.partitionBy(substring($"clazz", 0, 2)).orderBy($"sumScore".desc))
      .where($"rn" <= 10)
      .limit(120)

    //统计每科都及格的学生
    val resDS2: Dataset[Row] = scoresDF.join(subjectsDF, "subject_id")
      .where($"score" >= $"subject_score" * 0.6)
      .withColumn("jigeCount", count(expr("1")) over Window.partitionBy($"id"))
      .where($"jigeCount" === 6)

    //统计总分大于年级平均分的学生
    val resDS3: Dataset[Row] = scoresDF
      .join(studentsDF, "id")
      .withColumn("sumScore", sum($"score") over Window.partitionBy($"id"))
      .withColumn("avgScore", avg($"sumScore") over Window.partitionBy(substring($"clazz", 0, 2)))
      .where($"sumScore" > $"avgScore")

    //统计每个班级的每个名次之间的分数差
    val resDF4: DataFrame = scoresDF
      .join(studentsDF, "id")
      .groupBy("id", "clazz")
      .agg(sum("score") as "sumScore")
      .withColumn("rn", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc))
      .withColumn("beforeSumScore", lag($"sumScore", 1, 750) over Window.partitionBy($"clazz").orderBy($"sumScore".desc))
      .withColumn("cha", $"beforeSumScore" - $"sumScore")

  }
}
