package org.example

import org.apache.spark.sql.SparkSession

object data2_core {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val first_half = sc.textFile("F:\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("F:\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val drop_second = second_half.mapPartitionsWithIndex((ix, it) => {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(
      Line => {
        val data = Line.split(",")
        try {
          (data(1), data(6).toInt)
        }

        catch {
          case e: NumberFormatException => (data(1), 0)
        }
      })
    val split_second = drop_second.map(
      Line => {
        val data = Line.split(",")
        try {
          (data(1), data(6).toInt)
        }

        catch {
          case e: NumberFormatException => (data(1), 0)
        }
      })
    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name = filter_first.union(filter_second).distinct()
    name.collect().foreach(println)
    //    sort_first.foreach(System.out.println)
    //    sort_first.collect().foreach(println)
    sc.stop()
  }




}
