package com.shujia.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo3DFApi {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("api")
      //设置sparksql 在shuffle之后DF的分区数据，默认是200
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()


    //1、读取数据
    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING ,name STRING , age INT ,gender  STRING ,clazz STRING")
      .load("data/students.txt")

    /**
      * show : 相当于action算子
      *
      */

    //打印前10条数据
    studentDF.show()
    //指定需要打印的行数
    studentDF.show(100)
    //完全显示一行数据
    studentDF.show(false)

    /**
      * select: 选择字段，和sql中select是一样
      *
      */

    //通过列名获取列
    studentDF.select("id", "age").show()
    //在select中对字段做处理，和sql语法是一样的
    studentDF.selectExpr("id", "age +1 as age").show()

    //导入隐式转换
    import spark.implicits._

    //使用列对象的方式获取列
    studentDF.select($"id", $"age" + 2 as "age").show()

    /**
      * where: 过滤数据
      *
      */


    //使用字符串表达式
    studentDF.where("gender = '男'").show()

    //使用列对象的方式
    studentDF.where($"gender" === "女").show()


    /**
      *
      * group by
      *
      */

    studentDF
      .groupBy($"clazz")
      .count()
      .show()

    /**
      *
      * agg ： 分组之后进行聚合计算
      */

    //导入DSL所有的函数
    import org.apache.spark.sql.functions._

    studentDF
      .groupBy($"clazz")
      //分组之后做聚合计算
      .agg(count($"clazz") as "c", avg($"age") as "avgAge")
      .show()


    /**
      * join
      */

    val scoreDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING , cid STRING ,sco INT")
      .load("data/score.txt")


    //当关联字段名不一致的时候
    //val joinDF: DataFrame = studentDF.join(scoreDF, $"id" === $"id", "inner")

    //关联字段名一样的时候
    val joinDF: DataFrame = studentDF.join(scoreDF, "id")

    joinDF.show()

    /**
      * 统计每个班级总分前2的学生
      *
      * withColumn: 给DF增加新的列
      *
      */

    joinDF
      //按照id和班级分组
      .groupBy($"id", $"clazz")
      //对分数求和
      .agg(sum($"sco") as "sumSco")

      //使用开窗函数
      // .select($"id", $"clazz", $"sumSco", row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc) as "r")

      //在前面DF的基础上增加列
      .withColumn("r", row_number() over Window.partitionBy($"clazz").orderBy($"sumSco".desc))

      //取前2
      .where($"r" <= 2)
      .show()

    /**
      *
      * sql
      */

    joinDF.createOrReplaceTempView("stu_sco")

    spark.sql(
      """
        |select * from (
        |select
        | id,
        | clazz,
        | sumSco,
        | row_number() over(partition by clazz order by sumSco desc) as r
        | from (
        |   select id,clazz,sum(sco) as sumSco
        |   from stu_sco
        |   group by id,clazz
        |) as a
        |) as b
        |where r <= 2
        |
        |
      """.stripMargin).show()


    /**
      * orderBy
      *
      */

    scoreDF
      .groupBy($"id")
      .agg(sum($"sco") as "sumSco")
      .orderBy($"sumSco".desc)
      .show()


  }

}
