package hfy

import org.apache.spark.{SparkConf, SparkContext}

object employee {
  def main(agrs:Array[String]):Unit={
    val conf = new SparkConf().setAppName("EmployeeSalaryAnalysis").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val first_half=sc.textFile("D:\\Employee_salary_first_half.csv")
    val second_half=sc.textFile("D:\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix,it)=>{
      if (ix==0) it.drop(1)
      it
    })
    val drop_second=second_half.mapPartitionsWithIndex((ix,it)=>{
      if (ix==0) it.drop(1)
      it
    })
    val split_first=drop_first.map(
      line=>{val data =line.split(",");(data(1),data(6).toInt)})
    val split_second=drop_second.map(
      line=>{val data=line.split(",");(data(1),data(6).toInt)})
    val filter_first=split_first.filter(x=>x._2>200000).map(x=>x._1)
    val filter_second=split_second.filter(x=>x._2>200000).map(x=>x._1)
    val name =filter_first.union(filter_second).distinct()
    name.collect().foreach(println)
  }
}
