package org.example

import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

class yun248 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local[*]").appName("spark").getOrCreate()
    val sc = spark.sparkContext
    val first_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix,it) => {
      if (ix ==0) it.drop(1)
      it })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(
      Line => {val data = Line.split(",");(data(1),data(6).toInt)})
    val split_second = drop_second.map(
      Line => {val data = Line.split(",");(data(1),data(6).toInt)})
    val filter_first=split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second=split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name=filter_first.union(filter_second).distinct()
    name.collect().foreach(println)

  }

}