import org.apache.spark.{SparkConf, SparkContext}

object EmployeeSalaryAnalysis {
  def main(args: Array[String]): Unit = {
    // 设置Spark配置
    val conf = new SparkConf().setAppName("EmployeeMonthlyAverageSalary").setMaster("local[*]")
    val sc = new SparkContext(conf)

    // 读取上半年和下半年的薪资数据文件
    val firstHalfData = sc.textFile("D/Employee_salary_first_half(1).csv")
    val secondHalfData = sc.textFile("D/Employee_salary_second_half(1).csv")

    def processLine(line: String): (String, Double) = {
      val fields = line.split(",")
      (fields(1).trim(), fields(6).trim().toDouble)
    }

    // 过滤标题行（第一行），转换为RDD[(姓名, 薪资)]
    val firstHalfRDD = firstHalfData.zipWithIndex().filter(_._2 != 0).map(_._1).map(processLine)
    val secondHalfRDD = secondHalfData.zipWithIndex().filter(_._2 != 0).map(_._1).map(processLine)

    // 合并两个RDD
    val combinedRDD = firstHalfRDD.union(secondHalfRDD)
    val resultRDD = combinedRDD.combineByKey(
      (pay: Double) => (pay, 1),
      (acc: (Double, Int), pay: Double) => (acc._1 + pay, acc._2 + 1),
      (acc1: (Double, Int), acc2: (Double, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
    ).map { case (name, (totalPay, months)) =>
      (name, totalPay / months)
    }

    // 收集结果并排序（可选，按姓名排序）
    val sortedResults = resultRDD.sortBy(_._1)

    // 输出结果，保留两位小数
    sortedResults.collect().foreach { case (name, averagePay) =>
      println(s"Employee Name: $name, Average Monthly Net Pay: ${averagePay.formatted("%.2f")}")
    }

    sc.stop()
  }
}
