package org.example
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object data1_core {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc =spark.sparkContext
//    //        1.创建rdd
//    val data1 =sc.parallelize(List(60,90,75,80,72))
////    val data2=sc.makeRDD(List(1 to 10))
//    val data2=sc.makeRDD(List("how are you","I am fine","think you"))
//    //        2.分析数据 map映射 filter过滤 flatMap扁平映射 take 取前n个值
//    val res1 =data1.map(_+2)
//    val res3=data1.map(x => x+2).filter(_>=80).sortBy(y =>y,false).take(1)
//    val res2=data2.flatMap(_.split(" "))
    val first_half = sc.textFile("F:\\Employee_salary_first_half.csv")
    val drop_first=first_half.mapPartitionsWithIndex((ix,it)=> {
      if (ix == 0) it.drop(1)
      it
    })
    val split_first = drop_first.map(line => {
      val data = line.split(",");
      (data(1), data(6).toLongOption)})
    val sort_first = split_first.sortBy(x => x._2, false).take(3)
//    多表操作
    val data1=sc.parallelize(List(60,90,75,80,72),1)
    val data2=sc.makeRDD(List(72,80,75,66,95),1)
//    交集
    data1.intersection((data2)).foreach(println)

//    并集
    data1.union(data2).foreach(println)
//差集 1有2没有的
    data1.subtract(data2).foreach(println)
//拉链zip 须保证分区数一致 输出的是元组
    data1.zip(data2).foreach(println)
// 笛卡尔积 1跟2 每个元素组合成新的一组，一共5*5=25个
    data1.cartesian(data2).foreach(println)
//过滤
    data1.filter(_>=90).foreach(println)
//    去重
    data1.distinct().foreach(println)
//    3.打印结果
    sort_first.foreach(println)
//    sort_first.foreach(System.out.println)
//    sort_first.collect().foreach(println)


    sc.stop()


  }
}
