import org.apache.spark.sql.SparkSession
object am {
  def main(args: Array[String]): Unit = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("WordCount")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    try {
      val first_half = sc.textFile("file:///D:/Employee_salary_first_half.csv")
      val second_half = sc.textFile("file:///D:/Employee_salary_second_half.csv")
      val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
        if (ix == 0) it.drop(1)
        it
      })
      val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
        if (ix == 0) it.drop(1)
        it
      })
      val split_first = drop_first.map(line => {
        val data = line.split(",");
        (data(1), data(6).toInt)
      })
      val split_second = drop_second.map(line => {
        val data = line.split(",");
        (data(1), data(6).toInt)
      })
      val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
      val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)
      val name = filter_first.union(filter_second).distinct()
      val all_salary = split_first.union(split_second)
      val salary = split_first.union(split_second)
      val avg_salary = salary.combineByKey(
        count => (count, 0),
        (acc: (Int, Int), count) => (acc._1 + count, acc._2 + 0),
        (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2)
      )
      avg_salary.map(x => (x._1, x._2._1.toDouble / 12)).collect
      // 假设你已经定义了 val sparkSession 和 val sc，并且已经加载了数据

      // 处理后的数据，计算平均薪资并生成输出语句
      val avg_salary_result = avg_salary.map(x => (x._1, x._2._1.toDouble / 12))

      // 输出结果到控制台，每个元素占一行，格式为 "姓名, 平均月薪"
      avg_salary_result.collect().foreach { case (name, avg_monthly_salary) =>
        println(s"$name, $avg_monthly_salary")
      }

      // 停止Spark会话
      spark.stop()

    } finally {
      // 停止 SparkSession
      spark.stop()
    }
  }
}

