package chapter03

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object Test16_SortBy {
  def main(args: Array[String]): Unit = {
    val logger = Logger.getLogger("org.apache.spark")
    logger.setLevel(Level.WARN)
    val conf = new SparkConf().setMaster("local[*]").setAppName("sort")
    val sc = new SparkContext(conf)
    val value = sc.makeRDD(List(1, 4, 2, 1, 4, 5, 6))
    println(value.sortBy(e=>e,false).collect().toList)
    //读取员工工资信息 取出总工资最高的5名员工的姓名
    val value1 = sc.textFile("input/Employee_salary_first_half.csv")
    //去除表头
    println(value1.mapPartitionsWithIndex((index,e)=>{
      if(index==0) e.drop(1) else e
    })
      .map(e=>e.split(","))
      .map(e=>(e(1),e(5).toInt))
      .sortBy(e=>e._2,false)
      .take(5)
      .mkString(",")
    )
    //计算每个部门的最高工资员工的姓名和工资和部门名称
    println(value1.mapPartitionsWithIndex((index,e)=>{
      if(index==0) e.drop(1) else e
    })
      .map(e=>e.split(","))
      .map(e=>(e(5),e(9),e(1)))
      .groupBy(e=>e._2)
      .map(e=>(e._1,e._2.map(f=>(f._1.toInt,f._3))))
      .map(e=>(e._1,e._2.toList.sortBy(f=>f._1).reverse))
    .take(2)
    .toList)

    val dataRDD1 = sc.makeRDD(List(1,"2",3,4),2)
    val dataRDD2 = sc.makeRDD(List(3,"4",5,6),2)
    val dataRDD = dataRDD1.intersection(dataRDD2)
    println(dataRDD.collect().toList)
    println(dataRDD1.union(dataRDD2).collect().toList)
    println(dataRDD1.subtract(dataRDD2).collect().toList)
    println(dataRDD1.zip(dataRDD2).collect().toList)
  }
}
