package org.example
import org.apache.spark.sql.SparkSession

object speakData4 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    val sc=spark.sparkContext
    val first_half =spark.sparkContext.textFile("E:\\yhm\\Employee_salary_first_half.csv")
    val second_half=spark.sparkContext.textFile("E:\\yhm\\Employee_salary_second_half.csv")
    val drop_first=first_half.mapPartitionsWithIndex((ix,it)=>{
      if  (ix ==0) it.drop(1)else
        it
    })
    val drop_second=second_half.mapPartitionsWithIndex((ix,it) =>{
      if (ix ==0) it.drop(1)else
        it
    })
    val split_first=drop_first.map(
      line =>{val data =line.split(",");(data(1),data(6).toInt)})
    val split_second =drop_second.map(
      line =>{val data =line.split(",");(data(1),data(6).toInt)})
    val filter_first =split_first.filter(x=>x._2 >200000).map(x =>x._1)
    val filter_second=split_second.filter(x=>x._2>200000).map(x =>x._1)
    val name=filter_first.union(filter_second).distinct()
    name.collect().foreach(println)
    sc.stop()
  }

}

