package org.example
import org.apache.spark.{SparkConf, SparkContext}

object zy {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("SalaryFilter").setMaster("local")
    val sc = new SparkContext(conf)

    val first_half = sc.textFile("C:/Users/Administrator/Desktop/Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:/Users/Administrator/Desktop/Employee_salary_second_half.csv")

    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })

    val split_first = drop_first.map(line => {
      val data = line.split(",")
      (data(1), data(6).toInt)
    }).filter(_._2 > 200000).map(_._1)
    val split_second = drop_second.map(line => {
      val data = line.split(",")
      (data(1), data(6).toInt)
    }).filter(_._2 > 200000).map(_._1)

    val name_filter = split_first.union(split_second).distinct()

    name_filter.collect().foreach(println)

    sc.stop()
  }

}