package com.bigdata

import org.apache.spark.Partitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object DefinedPartitioner {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("test").master("local").getOrCreate()
    val sc = spark.sparkContext

    sc.setLogLevel("error")

    val rdd1: RDD[(Int, String)] = sc.parallelize(Array[(Int, String)](
      (1, "zhangsan"),
      (2, "lisi"),
      (3, "wangwu"),
      (4, "zhaoliu"),
      (5, "tianqi"),
      (6, "sunba")
    ),3)

//    println(s"rdd1 partition length  = ${rdd1.getNumPartitions}")

//    rdd1.mapPartitionsWithIndex((index,iter)=>{
//      val list = new ListBuffer[(Int,String)]()
//      while(iter.hasNext){
//        val tuple = iter.next()
//        println(s"index = $index ,value = $tuple")
//        list.append(tuple)
//      }
//
//      list.iterator
//    }).count()

    val resultRDD = rdd1.partitionBy(new Partitioner {
      override def numPartitions: Int = 6

      override def getPartition(key: Any): Int = key.toString.toInt % numPartitions
    })
    resultRDD.mapPartitionsWithIndex((index,iter)=>{
      val list = new ListBuffer[(Int,String)]()
      while(iter.hasNext){
        val tuple = iter.next()
        println(s"index = $index ,value = $tuple")
        list.append(tuple)
      }

      list.iterator
    }).count()


  }
}
