package com.edu

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCount {

  def main(args: Array[String]): Unit = {

    val conf:SparkConf = new SparkConf().setAppName("wc").setMaster("local[*]")
    val sc:SparkContext = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val lines:RDD[String] = sc.textFile("data/input/wc.txt")

    val words:RDD[String] = lines.flatMap(_.split(" "))

    val wordandons:RDD[(String,Int)] = words.map((_,1))

    val result:RDD[(String,Int)] = wordandons.reduceByKey(_+_)
    // sortBy(排序的key,false=降序).take(取前几位)
    val sortResult1 = result
      .sortBy(_._2, false) // 按照降序排序
      .take(3) // 取前3个

    // sortByKey
    val sortResult2 = result.map(_.swap).sortByKey(false).take(3)

    val sortResult3 = result.top(3)(Ordering.by(_._2))

    sortResult1.foreach(println)
    sortResult2.foreach(println)
    sortResult3.foreach(println)

    result.collect()

    sc.stop()
  }

}
