import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object a {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext

    // 示例 RDD 操作
    val rdd: RDD[Int] = sc.makeRDD(List(1, 4, 3, 2, 5), 1)
    val mapRDD = rdd.map(num => num + 2).sortBy(tp => tp).filter(_ % 2 == 0).take(1)
    mapRDD.foreach(System.out.println)

    // 读取并处理 CSV 文件
    val drop_first = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_first_half.csv")
    val drop_second = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_second_half.csv")

    def removeHeader(rdd: RDD[String]): RDD[String] = {
      rdd.mapPartitionsWithIndex((ix, it) => {
        if (ix == 0) it.drop(1)
        else it
      })
    }

    val drop_first_noheader = removeHeader(drop_first)
    val drop_second_noheader = removeHeader(drop_second)

    val split_first = drop_first_noheader.map(
      line => {
        val data = line.split(",")
        (data(1), data(6).toInt)
      }
    )
    val split_second = drop_second_noheader.map(
      line => {
        val data = line.split(",")
        (data(1), data(6).toInt)
      }
    )

    val filter_first = split_first.filter(x => x._2 > 200000).map(x => x._1)
    val filter_second = split_second.filter(x => x._2 > 200000).map(x => x._1)
    val name = filter_first.union(filter_second).distinct()
    name.foreach(println)
  }
}
