import org.apache.spark.{SparkConf, SparkContext}

object EmployeeMonthlySalary {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("EmployeeMonthlySalary")
      .setMaster("local[*]")

    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val firstHalfData = Seq(
      ("张三", 8500.0), ("李四", 9200.0), ("王五", 7800.0),
      ("张三", 8700.0), ("李四", 9300.0), ("王五", 7900.0),
      ("张三", 8600.0), ("李四", 9100.0), ("王五", 8000.0),
      ("张三", 8800.0), ("李四", 9400.0), ("王五", 8100.0),
      ("张三", 8900.0), ("李四", 9500.0), ("王五", 8200.0),
      ("张三", 9000.0), ("李四", 9600.0), ("王五", 8300.0)
    )

    val secondHalfData = Seq(
      ("张三", 9100.0), ("李四", 9700.0), ("王五", 8400.0),
      ("张三", 9200.0), ("李四", 9800.0), ("王五", 8500.0),
      ("张三", 9300.0), ("李四", 9900.0), ("王五", 8600.0),
      ("张三", 9400.0), ("李四", 10000.0), ("王五", 8700.0),
      ("张三", 9500.0), ("李四", 10100.0), ("王五", 8800.0),
      ("张三", 9600.0), ("李四", 10200.0), ("王五", 8900.0)
    )

    val split_first = sc.parallelize(firstHalfData)
    val split_second = sc.parallelize(secondHalfData)

    val combinedRDD = split_first.union(split_second)

    val salaryStats = combinedRDD.combineByKey(

      (salary: Double) => (salary, 1),
      (acc: (Double, Int), salary: Double) => (acc._1 + salary, acc._2 + 1),
      (acc1: (Double, Int), acc2: (Double, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
    )

    val monthlyAvgSalary = salaryStats.map { case (name, (total, months)) =>
      val avg = total / months
      (name, f"$avg%1.2f")
    }

    println("2020年员工月均实际薪资:")
    monthlyAvgSalary.collect().foreach { case (name, avg) =>
      println(s"$name: ￥$avg")
    }

    sc.stop()
  }
}