package com.fwmagic.spark.other.core

import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 计算部门的平均薪资和年龄
  *
  * 需求：
  * 		1、只统计年龄在20岁以上的员工
  * 		2、根据部门名称和员工性别为粒度来进行统计
  * 		3、统计出每个部门分性别的平均薪资和年龄
  *
  */
object DepartmentAvgSalaryAndAgeStat {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("AggregateFunction")
      .master("local[*]")
      .getOrCreate()

    val employee: DataFrame = spark.read.json("/Users/fangwei/learn/mycode/workspace/fwmagic-spark/src/main/resources/employee.json")
    val department: DataFrame = spark.read.json("/Users/fangwei/learn/mycode/workspace/fwmagic-spark/src/main/resources/department.json")

    /**
      * select e.*,d.*,avg(e.salary),avg(e.age) from employee e left join deaparment d on e.depId=d.id where e.age>20
      * group by d.name,e.gender
      */

    employee
      .filter("age > 20")
      .join(department, employee("depId") === department("id"))
      .groupBy(department("name"), employee("gender"))
      .agg(avg(employee("salary")), avg(employee("age")))
      //执行action操作，将结果计算出来
      .show()

    /**
      * 基本知识
      * DataFrame=DataSet[Row]
      * DataFrame的类型是Row，所以是untyped，弱类型
      * DataSet通常是我们自定义的case class，所以是typed类型，是强类型
      *
      * DataSet开发与RDD开发有许多共同点
      * 如：DataSet的api也分成transformation和action，tranformation是lazy特性的
      * action会触发实际的操作
      * dataset也有持久化的概念
      */
  }
}
