import org.apache.spark.sql.SparkSession
import org.apache.spark.rdd.RDD

object scala4252 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("Employee Salary Analysis")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext

    // 读取上半年员工薪资数据并转换为RDD，映射为键值对 (EmpID, Net_Pay)
    val split_first: RDD[(Int, Double)] = sc.textFile("Employee_salary_first_half.csv")
      .map(line => {
        val fields = line.split(",")
        (fields(0).toInt, fields(6).toDouble)
      })

    // 读取下半年员工薪资数据并转换为RDD，映射为键值对 (EmpID, Net_Pay)
    val split_second: RDD[(Int, Double)] = sc.textFile("Employee_salary_second_half.csv")
      .map(line => {
        val fields = line.split(",")
        (fields(0).toInt, fields(6).toDouble)
      })
    // 合并两个RDD
    val combinedRDD: RDD[(Int, Double)] = split_first.union(split_second)

    // 统计每位员工的总实际薪资
    val totalSalaryRDD: RDD[(Int, Double)] = combinedRDD.reduceByKey(_ + _)
    // 收集并打印结果
    val results = totalSalaryRDD.collect()
    results.foreach(result => println(s"Employee ID: ${result._1}, Total Net Pay: ${result._2}"))

    sc.stop()
  }
}