import org.apache.spark.{SparkConf, SparkContext}
object TestRDD2 {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("SalaryAnalysis")
      .setMaster("local[*]")

    val sc = new SparkContext(conf)


    val first_half = sc.textFile("D/Employee_salary_first_half.csv")
    val second_half = sc.textFile("D/Employee_salary_second_half.csv")


    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      else it
    })

    val split_first = drop_first.map {
      line =>
        val data = line.split(",")
        (data(1), data(6).toInt)
    }
    val split_second = drop_second.map {
      line =>
        val data = line.split(",")
        (data(1), data(6).toInt)
    }

    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)

    val name = filter_first.union(filter_second).distinct()
    name.foreach(println)
    sc.stop()
  }

}
