import org.apache.spark.sql.SparkSession

object data1_core3 {
  def main(args: Array[String]): Unit = {


    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val first_half = sc.textFile("D:\\pp\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("D:\\pp\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1) else it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1) else it
    })
    val split_first = drop_first.map(
      Line => {
        val data = Line.split(","); (data(1), data(6).toInt)
      })
    val split_second = drop_second.map(
      Line => {
        val data = Line.split(","); (data(1), data(6).toInt)
      })
    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name = filter_first.union(filter_second).distinct()
    name.collect().foreach(println)

  }
}
