package com.fwmagic.spark.core

import org.apache.spark.sql.SparkSession

object AggregateFuncation {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("AggregateFunction")
      .master("local[*]")
      .getOrCreate()


    import org.apache.spark.sql.functions._
    import spark.implicits._

    val employee = spark.read.json("/Users/fangwei/learn/mycode/workspace/fwmagic-spark/src/main/resources/employee.json")
    val department = spark.read.json("/Users/fangwei/learn/mycode/workspace/fwmagic-spark/src/main/resources/department.json")

    /**
      * collect_list 将一个分组内，指定的字段值都收集起来，不去重
      * collect_set  与collect_list不同的是，这个会去重
      *
      */

    employee.join(department,$"depId" === $"id")
      .groupBy(employee("depId"))
      .agg(avg(employee("salary")),sum(employee("salary")),max(employee("salary")),min(employee("salary")),count(employee("name")),countDistinct(employee("name")))
      .orderBy(employee("depId"))
      .show()


    /**
      * collect_list和collect_set将同一个分组内的指定字段的值串起来，拼成一个数组
      * 常用于行转列
      * depId=1,employee=aaa
      * depId=1,employee=bbb
      * depId=1,employee=[aaa,bbb]
      */
    employee.groupBy(employee("depId"))
        .agg(collect_list(employee("name")),collect_set(employee("name")))
        .orderBy(employee("depId"))
        .collect()
        .foreach(println(_))


    spark.stop()
  }
}
