package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo5DSL {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val df: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

    df.show(100)

    /**
      * select 选择
      *
      */

    //通过列名获取列
    df.select("id", "name")
      .show()

    //通过列表达式获取列，可以对列进行加工处理
    df.select($"id", $"age".+(1).as("addAge"))
    //简写
    df.select($"id", $"age" + 1 as "addAge")
      .show()


    /**
      * where ： 过滤
      *
      * =!= 不等于
      * === 等于
      **/

    //列表达式的方式
    df.where($"age" > 22 and $"gender" =!= "男")
      .show()


    //字符串表达式, sql语法
    df.where("age > 22 and gender = '男'")
      .show()


    /**
      * groupBy
      *
      */

    df.groupBy($"clazz")
      .count()
      .show()

    /**
      * 分组之后使用ag进行聚合计算
      * 分组的列和聚合的列会保留
      * sql 中所有的聚合函数在这里都可以使用
      *
      *
      * select clazz,count(1) ,avg(age) from t group by clazz
      */

    df
      .groupBy($"clazz")
      .agg(count($"clazz") as "num", avg($"age") as "avgAge")
      .show()

    /**
      * sortby
      *
      */

    df.sort($"age".desc)
      .show()


    /**
      * join
      *
      */

    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("sId STRING,cId STRING,sco DOUBLE")
      .load("data/score.txt")

    /**
      *
      * join
      */

    //关联字段名一样的时候，默认关联方式是inner join
    //df.join(scoreDF, "id").show()

    //指定关联方式
    //df.join(scoreDF, List("id"), "inner").show()

    //关联字段名不一样的时候
    df.join(scoreDF, $"id" === $"sId", "inner").show()

    /**
      * 取出每隔班级年龄最大的前10个学生
      *
      */

    df.createOrReplaceTempView("student")


    spark.sql(
      """
        |
        |select * from (
        |select
        |*,row_number() over(partition by clazz order by age desc) as r
        |from
        |student
        |) as a
        |where a.r<=10
        |
      """.stripMargin)
    //.show(1000)

    /**
      *
      * DSL
      *
      * withColumn"  通过一个表达式给DF 增加一个新的列
      */

    df.withColumn("r", row_number().over(Window.partitionBy($"clazz").orderBy($"age".desc)))
      .where($"r" <= 10)
      .show(1000)


    /**
      * sql 执行顺序
      * select * from
      * t1 as a
      * join
      * t2 as b
      * on a.id=b.id
      * where a.age>22
      * group by id
      * having age(age)> 23
      *
      *
      * from --> join  -->  on --> where --> group by --- having --- select -- order by -- limit
      *
      *
      */

  }

}
