package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4DSL {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1) //指定spark sql shuffle之后rdd的分区数
      .getOrCreate()

    /**
     * sql执行顺序
     * from  -> join -> on -> where -> group by -> having -> select -> order by -> limit
     *
     */

    //读取数据
    val studentDF: DataFrame = spark
      .read
      .json("data/students.json")
    //导入隐式转换
    import spark.implicits._
    //导入函数包
    import org.apache.spark.sql.functions._

    //打印表结构
    studentDF.printSchema()

    /**
     * 1、show： 查看数据
     *
     */
    studentDF.show(100) //指定打印的行数
    studentDF.show(false) //完整显示数据

    /**
     * 2、select
     * 注意：dsl的select中不能使用聚合函数，需要在agg中使用
     */
    //通过字段名选择字段
    studentDF.select("id", "name", "age").show()

    //使用列对象选择字段
    studentDF.select($"id", $"name", $"clazz").show()

    //列对象可以在选择的时候对字段进行处理
    studentDF.select($"id", substring($"clazz", 1, 2) as "sub", $"age" + 1 as "age").show()

    /**
     * 3、where
     */
    //sql 表达式
    studentDF.where("gender = '男'").show()
    //列对象
    studentDF.where($"gender" === "女").show()
    studentDF.where($"gender" =!= "女").show()

    /**
     * 4、group by -->  agg
     * 分组聚合会返回所有分组和聚合中的字段，其他的字段就没了
     *
     * dsl: 没有having 直接使用where就可以
     */
    val aggDF: DataFrame = studentDF
      .groupBy($"clazz")
      .agg(avg($"age") as "avgAge", count($"clazz") as "num")

    aggDF.printSchema()
    aggDF.show()


    /**
     * 5、join
     *
     */
    val scoreDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING ,sco DOUBLE")
      .load("data/score.txt")

    //两个表关联字段名称不一样
    //studentDF.join(scoreDF, $"sid" === $"id", "inner").show()

    //关联字段名一样
    studentDF.join(scoreDF, "id").show()

    /**
     * 6、sort
     *
     */

    scoreDF.sort($"sco").show(10000)
    scoreDF.sort($"sco".desc).show(10000)

    /**
     * 7、distinct
     *
     */

    studentDF.select("clazz").distinct().show()

    /**
     * 8、limit
     */
    studentDF.limit(2).show()

    /**
     * 9、row_number
     *
     * withColumn: 在前面Df的基础上增加列
     */

    //统计每个班级总分前2的学生
    scoreDF
      .groupBy($"id")
      .agg(sum($"sco") as "sumSco") //计算学生的总分
      .join(studentDF, "id") //关联获取班级
      .withColumn("r", row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc)) //组内增加排名
      .where($"r" <= 2)
      .show()

    /**
     * sql
     */
    studentDF.createOrReplaceTempView("student")
    scoreDF.createOrReplaceTempView("score")

    spark.sql(
      """
        |
        |select * from (
        |select
        |b.id,b.name,b.age,b.gender,b.clazz,a.sumSco,
        |row_number() over (partition by clazz order by  a.sumSco desc) as r
        |from
        |(select
        |id,sum(sco) as sumSco
        |from
        |score
        |group by id
        |) as a
        |join
        |student as b
        |on a.id=b.id
        |) as c
        |where r <= 2
        |
        |
        |""".stripMargin)
      .show()


  }

}
