package chapter04

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession

object Test09_salary {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val spark = SparkSession.builder()
      .appName("score")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._
    //读工资的第一个文件
    val dfFirst = spark.read.format("csv")
      .option("sep", ",")
      .option("inferSchema", "true")
      .option("header", "true")
      .load("input/Employee_salary_first_half.csv")
    dfFirst.printSchema()
    dfFirst.show(5)
    val dfSecond = spark.read
      .option("inferSchema", "true")
      .option("header", "true")
      .csv("input/Employee_salary_second_half.csv")
    dfSecond.printSchema()
    dfSecond.show(5)
    //计算平均工资
    dfFirst.union(dfSecond)
      .groupBy("EmpID","Name")
      .avg("Deduction")
      .sort($"avg(Deduction)".desc)
      .show(10)
    //每个部门的最高工资
    dfFirst.union(dfSecond)
      .groupBy("Department")
      .max("Deduction")
      //增加一列（不存在对应列名）或者更改对应列（如果存在列名）
      .withColumn("newDeduction",$"Department".substr(0,1))
      .repartition(1)
      .write
      .partitionBy("newDeduction")
      .csv("input/ss")
  }
}
