package org.example
import org.apache.spark.SparkContext.jarOfObject
import org.apache.spark.sql.SparkSession
import org.apache.spark.rdd.RDD

object sparkYun_148 {
  def main(args: Array[String]): Unit = {
      val spark = SparkSession
        .builder()
        .master("local[*]")
        .appName("sparkBase")



//    val rdd1 = sc.parallelize(List(1,2,3,4),2)
//    val rdd2 = sc.parallelize((List(3,4,5,6),2)
//    val interRDD = rdd1.intersection(rdd2)
//      interRDD.foreach(System.out.println)
//
//    val unRDD = rdd1.union(rdd2)
//      unRDD.collect().foreach(System.out.println)
//    val suRDD = rdd2.subtract(rdd1)
//      suRDD.foreach(System.out.println)
//    val score = sc.parallelize(List(('a',60),('b',70),('c',80),('a',60)))
//    score.foreach(_._2 >= 80).collect().foreach(println())
//
//    score.distinct().collect().
//
//    sc.stop()



//    val first_half = sc.textFile("C:\\Users\\Administrator\\Desktop\\Employee_salary_first_half.csv")
//      val drop_first = first_half.mapPartitionsWithIndex((ix,it) =>{
//        if (ix == 0) it.drop(1)
//        it
//      })
//      val split_first = drop_first.map(line => {val data = line.split(",");(data(1),data(6).toInt)})
//      val sort_first = split_first.sortBy(x => x._2,false)
//      sort_first.take(3)

  }
}
