package rddSummary.transition.key_value_type

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

object test_partitionBy {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("test").setMaster("local")
    val sparkContext = new SparkContext(conf)

    val rdd: RDD[(Int, String)] =
      sparkContext.makeRDD(Array((1,"aaa"),(2,"bbb"),(3,"ccc")),3)

    /**
     *将数据按照指定 Partitioner 重新进行分区。Spark 默认的分区器是HashPartitioner
     */

    val rdd2: RDD[(Int, String)] =
      rdd.partitionBy(new HashPartitioner(2))

    rdd2.collect().foreach(println)

    sparkContext.stop()
  }

}
