import org.apache.spark.rdd
import org.apache.spark.sql.SparkSession

object price {

  def main(args: Array[String]): Unit = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Test")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    try {
      val first_half = sc.textFile("D:\\Employee_salary_first_half.csv")
      val second_half = sc.textFile("D:\\Employee_salary_second_half.csv")
      val drop_first = first_half.mapPartitionsWithIndex((ix, it)=>{
        if (ix == 0) it.drop(1)
        it})
      val drop_second = second_half.mapPartitionsWithIndex((ix, it) =>{
        if (ix == 0) it.drop(1)
        it
      })
      val split_first = drop_first.map(line=>{val data = line.split(",");(data(1),data(6).toInt)})
      val split_second = drop_second.map(line => {val data = line.split(",");(data(1),data(6).toInt)})
      val filter_first = split_first.filter(x =>x._2 > 200000).map(x=>x._1)
      val filter_second = split_second.filter(x =>x._2 > 200000).map(x=>x._1)
      val name = filter_first.union(filter_second).distinct()
      println("=" * 40)
      println("符合条件员工列表（薪资>200,000）：")
      name.collect().foreach(println)
      println("=" * 40)
      name.collect
    }
    finally {
      // 停止 SparkSession
      spark.stop()
    }
  }
}
