package com.yanggu.spark.core.rdd.transform.keyvalue

import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

//Key-Value类型-PartitionBy算子
object RDD16_PartitionBy {

  def main(args: Array[String]): Unit = {

    //1. 创建sparkConf配置对象
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("spark")

    //2. 创建spark上下文对象
    val sparkContext = new SparkContext(sparkConf)

    //3. 从内存中创建RDD
    val rdd = sparkContext.makeRDD(Array((1, "aaa"), (2, "bbb"), (3, "ccc")), 3)

    //4. partitionBy
    //将数据按照指定Partitioner重新进行分区。
    //Spark默认的分区器是HashPartitioner
    val value = rdd.partitionBy(new HashPartitioner(2))

    //5. 带上分区索引
    val value1 = value.mapPartitionsWithIndex {
      (partitionIndex, datas) => {
        datas.map {
          data => (partitionIndex, data)
        }
      }
    }

    //6. 打印
    println(value1.collect.foreach(println))

    //7. 释放资源
    sparkContext.stop()
  }

}
