import org.apache.spark.{SparkConf, SparkContext}

object add {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("SalaryFilter").setMaster("local[*]") // 本地模式，可根据需要修改
    val sc = new SparkContext(conf)


    val first_half = sc.textFile("C:\\Users\\PC-608\\Downloads\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Users\\PC-608\\Downloads\\Employee_salary_second_half.csv")


    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => if (ix == 0) it.drop(1) else it)
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => if (ix == 0) it.drop(1) else it)


    val split_first = drop_first.map(line => {
      val data = line.split(",")
      (data(1), data(6).trim.toInt)
    })
    val split_second = drop_second.map(line => {
      val data = line.split(",")
      (data(1), data(6).trim.toInt)
    })


    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)


    val names = filter_first.union(filter_second).distinct()
    val collectedNames = names.collect()


    collectedNames.foreach(println)


    sc.stop()
  }
}