package cn.aijson.demo.rdd

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDDSort {

  def main(args: Array[String]): Unit = {
    // 创建环境
    val conf: SparkConf = new SparkConf().setAppName("spark").setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)
    sc.setLogLevel("WARN")

    //2读文件,生成分布式数据集RDD
    //词频统计进行分区操作
    val lines: RDD[String] = sc.textFile("data/input/words.txt")
    val result: RDD[(String, Int)] = lines.filter(StringUtils.isNoneBlank(_))
      .flatMap(_.split(" "))
      .map((_, 1))
      .reduceByKey(_+_)

    println("%%%%%%%%%%按照姓名倒序取前3sortByKey")
    result.sortByKey(false).take(3).foreach(println)
    println("%%%%%%%%%%按照次数升序取前3sortBy")
    result.sortBy(_._2,ascending = false).take(3).foreach(println)
    println("%%%%%%%%%%top取前3,默认降序")
    result.top(3)(Ordering.by(_._2)).foreach(println)
  }
}
