import org.apache.spark.{SparkConf, SparkContext}

object scala4232 {
  def main(args: Array[String]): Unit = {
    // 1. 创建SparkConf和SparkContext
    val conf = new SparkConf().setAppName("SalaryFilter").setMaster("local[*]") // 根据实际集群情况设置master
    val sc = new SparkContext(conf)

    // 2. 创建RDD，读取上半年和下半年薪资数据文件
    val first_half = sc.textFile("C:\\Users\\PC-608\\Desktop\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Users\\PC-608\\Desktop\\Employee_salary_second_half.csv")

    // 3. 删除首行字段名称数据
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })

    // 4. 分割RDD，并取出第2列员工姓名和第7列实际薪资数据
    val split_first = drop_first.map {
      line => {
        val data = line.split(",")
        (data(1), data(6).toInt)
      }
    }
    val split_second = drop_second.map {
      line => {
        val data = line.split(",")
        (data(1), data(6).toInt)
      }
    }

    // 5. 筛选出上半年或下半年实际薪资大于20万元的员工姓名
    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)

    // 6. 合并两个RDD并去重后输出结果
    val name = filter_first.union(filter_second).distinct()
    val result = name.collect()
    result.foreach(println)

    // 7. 关闭SparkContext
    sc.stop()
  }
}