package org.example
import org.apache.spark.sql.SparkSession
object ogallbi {
  def main(args: Array[String]): Unit = {
    //环境
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    //
    val data1=sc.parallelize(List(60,90,76,89,76),1)
    val data2=sc.makeRDD(List(69,99,70,90,77),1)
//交集
 //  data1.intersection(data2).foreach(println)
    //并集
    //data1.union(data2).foreach(println)
    //差集
    //data1.subtract(data2).foreach(println)//data1有的且data2没有的值
    //拉链zip输出元组（key value）需保证rdd分区数一致
    //data1.zip(data2).foreach(println)
    //一共5*5=25个新元素组成,1跟2中每一个元素组合成新的一组
   //data1.cartesian(data2).foreach(println)//5*5=25的新元素组成
    //过滤
    //data1.filter(_>=90).foreach(println)
    //去重
    //data1.distinct().foreach(println)
    val first_half=sc.textFile("E:\\gfl08/Employee_salary_first_half.csv")
   .mapPartitionsWithIndex{(idx,iter)=>
     if (idx==0) iter.drop(1) else iter}
    val second_half=sc.textFile("E:\\gfl08/Employee_salary_second_half.csv")
      .mapPartitionsWithIndex { (idx, iter) =>
        if (idx == 0) iter.drop(1) else iter
      }
    val drop_first=first_half.mapPartitionsWithIndex((ix,it)=>{
      if (ix==0) it.drop(1)
      it
    })
    val drop_second=second_half.mapPartitionsWithIndex((ix,it)=>{
      if (ix==0) it.drop(1)
      it
    })
    val split_first=drop_first.map(
      Line=>{val data=Line.split(",");(data(1),data(6).toInt)}
    )
    val split_second= drop_second.map(
      Line => {
        val data = Line.split(","); (data(1), data(6).toInt)
      }
    )
    val filter_first=split_first.filter(x=>x._2>200000).map(x=>x._1)
    val filter_second=split_second.filter(x=>x._2>200000).map(x=>x._1)
    val name=filter_first.union(filter_second).distinct()
    name.collect().foreach(println)

    sc.stop()
  }
}
