package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo2DSL {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //读取数据，创建DF
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING,score DOUBLE")
      .load("data/score.txt")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //1、select
    studentDF.select("name", "age").show()
    //$”age“: 获取列对象
    studentDF.select($"name", $"age" + 1 as "age").show()
    //在select中使用函数
    studentDF.select(substring($"clazz", 1, 2) as "type").show()
    studentDF.selectExpr("age+1 as age").show()

    //2、where
    studentDF.where($"sex" =!= "男" and $"age" === 23).show()
    studentDF.where(substring($"clazz", 1, 2) === "文科").show()
    studentDF.where($"name" isin("葛德曜", "符半双", "羿彦昌")).show()

    //3、groupBy之后在agg中聚合
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .show()

    //4、集合之后过滤（having）
    /**
     * select clazz,count(1) as count from
     * students
     * group by clazz
     * having count > 80
     */
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .where($"count" > 80)
      .show()

    //5、order by
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .show()

    //6、limit
    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "count", round(avg($"age"), 2) as "avgAge")
      .orderBy($"count".desc)
      .limit(10)
      .show()

    //6、show:相当于action算子
    studentDF.show()
    studentDF.show(10)
    studentDF.show(10, truncate = false)


    //7、join
    studentDF.as("a").join(scoreDF.as("b"), $"a.id" === $"b.id", "inner").show()

    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"name")
      .agg(sum($"score") as "sumScore")
      .show()

    //8、row_number
    /**
     * select * from (
     *    select *,row_number() over(partition by clazz order by sumScore desc) as r from(
     *       select a.id,name,clazz,sum(score) as sumScore from
     *       student as a
     *       join
     *       score as b
     *       on a.id=b.id
     *       group by a.id,name,clazz
     *    ) as c
     * ) as d
     * where r<=10
     */
    studentDF
      .as("a") //取别名
      .join(scoreDF.as("b"), $"a.id" === $"b.id", "inner")
      .groupBy($"a.id", $"name", $"clazz")
      .agg(sum($"score") as "sumScore")
      //.select($"id", $"name", $"clazz", $"sumScore", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc) as "r")
      //withColumn:在上面DF的基础上增加新的字段
      .withColumn("r", row_number() over Window.partitionBy($"clazz").orderBy($"sumScore".desc))
      .where($"r" <= 10)
      .show()
  }
}
