package org
import org.apache.spark.sql.SparkSession

object data3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._

    val sc = spark.sparkContext


    val first_half = sc.textFile("C:\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Employee_salary_second_half.csv")


    val split_first = first_half.mapPartitionsWithIndex((ix, it) =>
      if (ix == 0) it.drop(1) else it
    ).map(line => {
      val data = line.split(',')
      (data(1), data(6).toInt)
    })


    val high_earners = split_first.filter(_._2 > 200000).keys.distinct()
    high_earners.collect().foreach(println)

    split_first
      .groupByKey()
      .mapValues(salaries => salaries.sum.toDouble / 12)
      .toDF("员工名", "月平均工资")
      .repartition(1)
      .write
      .option("header", "true")
      .csv("C:\\Users\\Administrator\\Desktop\\salary_report")

    spark.stop()
  }
}
//    val data1 = sc.makeRDD(List(("张三",2000),("李四",2500),("王五",5000),("张三",3600)))
//    val data2 = sc.makeRDD(List(("张三",3000),("李四",3500),("王五",6000),("张三",4600)))
//
//    data1.leftOuterJoin(data2).foreach(println)
//    data1.rightOuterJoin(data2).foreach(println)
//
//    data1.combineByKey(
//      v => (v,1),
//      (t:(Int,Int), v) => (t._1 + v, t._2 + 1),
//      (t1:(Int,Int), t2:(Int,Int)) => (t1._1 + t2._1, t1._2 + t2._2)
//    ).foreach(println)
//    data1.aggregateByKey(0)(math.max(_,_),_+_).foreach(println)
//    sc.stop()
//
//  }
//}