package com.offcn.bigdata.spark.p4

import java.util.concurrent.atomic.AtomicInteger

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
  * 自定义分区之轮询分区
  */
object _01RoundRobinFashionPartitionerOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[1]")
            .setAppName(s"${_01RoundRobinFashionPartitionerOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)
        var letters = sc.parallelize('a' to 'z')//范围分区

        letters = letters.mapPartitionsWithIndex((index, it) => {
            val list = it.toList
            println(s"分区编号为<${index}>中的数据为：${list}")
            list.iterator
        })

//        letters.count()
        //自定义轮训分区
        val l2letter = letters.map(ch => (ch, ch))

        val ret = l2letter.partitionBy(new RoundRobinFashionPartitioner(4))

        ret.mapPartitionsWithIndex((index, it) => {
            val list = it.toMap.keys
            println(s"-----分区编号为<${index}>中的数据为：${list}")
            list.iterator
        }).count()

        sc.stop()
    }
}
class RoundRobinFashionPartitioner(partitions: Int) extends Partitioner {
    override def numPartitions: Int = partitions

    var counter = new AtomicInteger()
    override def getPartition(key: Any): Int = {
        counter.getAndIncrement() % numPartitions
    }
}
