package com.zyh.day05.operator

import com.zyh.day04.User
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.{DataFrame, Dataset, RelationalGroupedDataset, Row, SparkSession}

object GroupByTest {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("lt")
      .getOrCreate()

    val list = List((1, "xiao1hei", 18, "F",2000.0), (2, "xiao2hei", 18, "F",4000.0), (3, "xiao3hei", 20, "M",2000.0), (4, "xiao4hei", 22, "M",3000.0))

    import spark.implicits._
    val df: DataFrame = list.toDF("uid", "name", "age", "sex","salary")

//    val map = Map(("*", "count"), ("salary", "max"), ("salary", "min"))
    //agg可用来执行多个组函数,将操作的字段名和函数名作为键值对传入
//    val result: DataFrame = df.groupBy(col("sex"))
//      .agg(map)
    //先按照薪资降序排,再按照id升序排
    val result: Dataset[Row] = df.orderBy($"salary".desc,$"uid")

    result.show()
    spark.close()

  }
}
