import org.apache.spark.{SparkConf, SparkContext}


object ReduceByKeyRDDTest {
  def main(args: Array[String]): Unit = {
    // 初始化Spark配置和上下文
    val conf = new SparkConf().setAppName("EmployeeSalaryAnalysis").setMaster("local")
    val sc = new SparkContext(conf)

    // 读取上下半年薪资数据文件
    val firstHalfData = sc.textFile("D/Employee_salary_first_half.csv")
    val secondHalfData = sc.textFile("D/Employee_salary_second_half.csv")

    // 解析数据行，提取EmpID和Net_Pay，过滤标题行
    def parseLine(line: String): (String, Double) = {
      val fields = line.split(",")
      (fields(0), fields(6).toDouble)
    }

    // 转换为键值对RDD并过滤标题
    val firstHalfRDD = firstHalfData.map(parseLine).filter(_._1 != "EmpID")
    val secondHalfRDD = secondHalfData.map(parseLine).filter(_._1 != "EmpID")

    // 合并上下半年数据，按EmpID分组累加Net_Pay
    val combinedRDD = firstHalfRDD.union(secondHalfRDD)
    val totalSalaryRDD = combinedRDD.reduceByKey(_ + _)

    // 排序并输出结果
    val sortedResults = totalSalaryRDD.sortBy(_._2, ascending = false)


    sortedResults.collect().foreach { case (empID, total) =>
      printf(s"Employee ID: $empID, total Net Pay: ${total.formatted("%.2f")}\n")
    }


    sc.stop()
  }

}
