package org.example
import org.apache.spark.sql.SparkSession
object data1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
//    val data1 = sc.parallelize(List(60,90,75,99,72),1)
//    val data2 = sc.makeRDD(List(72,80,90,69,99),1)
    //交集intersection
    //data1.intersection(data2).foreach(println)
    //并集 union
    //data1.union(data2).foreach(println)
    //差集 subtract data1有而data2中没有的元素
    //data1.subtract(data2).foreach(println)
    //
   //data1.zip(data2).foreach(println)
    //过滤
    //data1.filter(_ >= 90).foreach(println)
    //去重
//    data1.distinct().foreach(println)
//    sc.stop()
val first_half = sc.textFile("C:\\Employee_salary_first_half.csv")
    val second_half = sc.textFile("C:\\Employee_salary_second_half.csv")
    val drop_first = first_half.mapPartitionsWithIndex { (idx, iter) =>
      if (idx == 0) iter.drop(1)
      else iter
    }
    val drop_second = second_half.mapPartitionsWithIndex { (idx, iter) =>
      if (idx == 0) iter.drop(1)
      else iter
    }
    val split_first = drop_first.map { line =>
      val data = line.split(",")
      (data(1), data(6).toInt)
    }
    val split_second = drop_second.map { line =>
      val data = line.split(",")
      (data(1), data(6).toInt)
    }
    val filter_first = split_first.filter(_._2 > 200000).map(_._1)
    val filter_second = split_second.filter(_._2 > 200000).map(_._1)
    val highEarners = filter_first.union(filter_second).distinct()
    println("上半年或下半年实际薪资大于20万元的员工姓名")
    highEarners.collect().foreach(println)

  }
}
