package cn.hnu.spark

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

object RddDemo05 {
  def main(args: Array[String]): Unit = {
    //创建Rdd
    val conf: SparkConf = new SparkConf().setAppName("Rdd-demo").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    //统计单词出现次数最多的3个单词
    val lines: RDD[String] = sc.textFile("data/words.txt")
    val newLines: RDD[String] = lines.filter(StringUtils.isNoneBlank(_))
    val result: RDD[(String, Int)] = newLines.flatMap(_.split("\\s+")).map((_, 1)).reduceByKey(_ + _)

    //rdd缓存到内存中
    result.cache()
//    result.persist()
//    result.persist(StorageLevel.MEMORY_ONLY)

    sc.setCheckpointDir("data/chk")
    result.checkpoint()

    val sortedResult1: Array[(String, Int)] = result.sortBy(_._2, false).take(3)
    // ("haddop",3) ("spark",2)

//    val result2: RDD[(Int, String)] = result.map((x) => {
//      (x._2, x._1)
//    })

    val result2: RDD[(Int, String)] = result.map(_.swap)
    val sortedResult2= result2.sortByKey(false).map(_.swap).take(3)

    //topN默认就是降序
    val sortedResult3: Array[(String, Int)] = result.top(3)(Ordering.by(_._2))

    println(sortedResult1.toBuffer)
    println(sortedResult2.toBuffer)
    println(sortedResult3.toBuffer)








  }

}
