package cn.doitedu.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

object T07_SortByDemo {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("SortBy")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val lines: RDD[String] = sc.textFile("hdfs://node-1.51doit.cn:8020/data/wc")
    //切分压平
    val words: RDD[String] = lines.flatMap(_.split(" "))
    //将单词和1组合
    val wordAndOne: RDD[(String, Int)] = words.map((_, 1))
    //分组聚合
    val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
    //按照单词出现的次数，从高到低进行排序
    val sorted: RDD[(String, Int)] = reduced.sortBy(_._2, false)

    sorted.saveAsTextFile("out/out11")

  }
}
