package com.ada.spark.rddoperator

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
  * 作用：对pairRDD进行分区操作，如果原有的partionRDD和现有的partionRDD是一致的话就不进行分区， 否则会生成ShuffleRDD，即会产生shuffle过程。
  */
object Spark19_partitionBy {

    def main(args: Array[String]): Unit = {
        //创建SparkConf
        val conf = new SparkConf().setAppName("Spark19_partitionBy").setMaster("local[*]")
        //创建Spark上下文对象
        val sc = new SparkContext(conf)

        val rdd = sc.parallelize(Array((1, "aaa"), (2, "bbb"), (3, "ccc"), (4, "ddd")), 4)

        println("调整分区前：" + rdd.partitions.size)

        //var rdd2 = rddoperator.partitionBy(new org.apache.spark.HashPartitioner(2))

        //使用自定义分区器
        var rdd2 = rdd.partitionBy(new MyPartitioner(3))

        println("调整分区后：" + rdd2.partitions.size)

        //println(rdd2.collect().mkString(","))
        //(2,bbb),(4,ddd),(1,aaa),(3,ccc)

        rdd2.saveAsTextFile("output")
    }

    //自定义分区器
    class MyPartitioner(partitions: Int) extends Partitioner {
        require(partitions >= 0, s"Number of partitions ($partitions) cannot be negative.")

        def numPartitions: Int = partitions

        def getPartition(key: Any): Int = 1
    }

}
