package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.expressions.Window

object DSLDemoOnYarn {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder()
      .config("spark.sql.shuffle.partitions","1") // 参数设置优先级：代码 > 命令参数 > 环境配置
//      .master("local") // 提交到服务器中执行的话，不需要指定local
      .appName("DSL语法补充")
      .getOrCreate()

    val studentDF: DataFrame = ss.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("/bigdata33/data/student.txt") // hdfs路径

    val scoreDF: DataFrame = ss.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,subject_id STRING,score INT")
      .load("/bigdata33/data/score.txt")

    import ss.implicits._
    import org.apache.spark.sql.functions._

    //字符串拼接
    //    studentDF.select($"id",$"name",concat(expr("'数加: '"),$"name"))
    //      .show()

    //开窗
    val resDF: DataFrame = scoreDF.groupBy("id")
      .agg( //agg可以在分组后同时使用多个聚合函数
        sum("score") as "sumScore",
        count(expr("1")) as "num"
      ).join(studentDF, "id")
      //如果要新增一列 使用withColumn
      .withColumn("rn", row_number() over Window.partitionBy("clazz").orderBy($"sumScore".desc))
      .where($"rn" <= 3)
      .toDF()

    resDF.write
      .format("csv")
      .option("sep","\t")
      .save("/bigdata33/sparkoutput/out1")
  }
}
