package com.niit.spark.sql.test

import org.apache.spark.sql.functions.avg
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * Date:2025/5/14
 * Author：Ys
 * Description:
 */
object GroupFilterDepartment {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("GroupFilterDepartment").master("local[*]").getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")

    val df: DataFrame = spark.read.option("header", "true").csv("input/sql/employees.csv")

    //第一步：按照部门分组，计算每个部分的平均工资
    val avgSalaryDF: DataFrame = df.groupBy("department").agg(avg("salary").as("avg_salary"))
    //第二步：过滤出平均工资大于6000的部门
    avgSalaryDF.filter("avg_salary>8000").show()
    spark.stop()
  }

}
