package com.mjf.spark.day04

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, Partitioner, SparkConf, SparkContext}

/**
 * 转换算子-partitionBy
 *    对kv类型的RDD按照key进行重新分区
 */
object Spark01_Transformation_partitionBy {
  def main(args: Array[String]): Unit = {

    // 创建SparkConf配置文件
    val conf = new SparkConf().setMaster("local[*]").setAppName("Spark01_Transformation_partitionBy")
    // 创建SparkContext对象
    val sc = new SparkContext(conf)

//    val rdd: RDD[(Int, String)] = sc.makeRDD(Array((1,"aaa"),(2,"bbb"),(3,"ccc")),3)
    val rdd: RDD[(Int, String)] = sc.makeRDD(Array((135,"aaa"),(124,"bbb"),(136,"ccc")),3)

    print("-------------分区之前---------------")
    rdd.mapPartitionsWithIndex(
      (index, datas) => {
        println(index + "--->"+ datas.mkString(","))
        datas
      }
    ).collect()

    // 注意：RDD本身没有partitionBy算子，通过隐式转换动态给kv类型的RDD扩展的功能
//    val newRDD: RDD[(Int, String)] = rdd.partitionBy(new HashPartitioner(2))  // 使用HashPartitioner
    val newRDD: RDD[(Int, String)] = rdd.partitionBy(new MyPartitioner(2))  // 使用自定义分区器

    print("-------------分区之后---------------")
    newRDD.mapPartitionsWithIndex(
      (index, datas) => {
        println(index + "--->"+ datas.mkString(","))
        datas
      }
    ).collect()

    // 关闭连接
    sc. stop()

  }
}

// 自定义分区器
class MyPartitioner (partitions: Int) extends Partitioner{

  // 获取分区个数
  override def numPartitions: Int = partitions

  // 指定分区规则   返回值Int表示分区编号，从0开始
  override def getPartition(key: Any): Int = {
    val str: String = key.toString
    if(str.startsWith("135")) {
      0
    } else {
      1
    }
  }
}