package cn.doitedu.day03

import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/*
 * 分区的同时，并且在分区内进行排序(不是全局排序)
 */
object T03_RepartitionAndSortWithinPartitionDemo2 {

  def main(args: Array[String]): Unit = {

    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]")
    val sc = new SparkContext(conf)

    val lines = sc.textFile("data/login.txt")

    val lst: Seq[(String, Int)] = List(
      ("spark", 3), ("hadoop", 1), ("hive", 3), ("spark", 2),
      ("spark", 9), ("flink", 2), ("hbase", 1), ("spark", 4),
      ("kafka", 8), ("kafka", 5), ("kafka", 7), ("kafka", 1),
      ("hadoop", 5), ("flink", 4), ("hive", 6), ("flink", 3)
    )
    //通过并行化的方式创建RDD，分区数量为4
    val wordAndOne: RDD[(String, Int)] = sc.parallelize(lst, 4)
    //按照HashPartitioner进行分区
    val partitioner = new HashPartitioner(wordAndOne.partitions.length)
    //按照指定的分区进行分区，并且将数据按照指定的排序规则在分区内排序

    val ord = Ordering[String].reverse

    //不使用RepartitionAndSortWithinPartitionDemo，而是使用底层的ShuffleRDD方法
    val shuffledRDD = new ShuffledRDD[String, Int, Null](wordAndOne, partitioner)
      .setKeyOrdering(ord)

    shuffledRDD.saveAsTextFile("out/out10")
  }
}
