package com.doit.day03

import org.apache.spark.rdd.RDD
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 */
object Demo04Partitioner {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
    val sc = SparkContext.getOrCreate(conf)

    val rdd1 = sc.makeRDD(List("a", "b", "c", "c", "d", "e", "f", "e", "f"), 3)

    rdd1.groupBy(e=>e)




    rdd1.mapPartitionsWithIndex((p, iter) => {
      iter.map(e => p + "-------" + e)
    }).foreach(println)

    println("------------------------")
    val grouped: RDD[(String, Iterable[String])] = rdd1.groupBy(e => e)

    grouped.map(_._1).mapPartitionsWithIndex((p, iter) => {
      iter.map(e => p + "-------" + e)
    }).foreach(println)
    println("------------------------")

    // 指定分区器
    val res: RDD[(String, String)] = rdd1.zip(rdd1).partitionBy(new HashPartitioner(rdd1.getNumPartitions))
    res.map(_._1).mapPartitionsWithIndex((p, iter) => {
      iter.map(e => p + "-------" + e)
    }).foreach(println)
    val res2 = rdd1.zip(rdd1).partitionBy(new MyPartitioner(rdd1.getNumPartitions))
    println("==================================")
    res2.map(_._1).mapPartitionsWithIndex((p, iter) => {
      iter.map(e => p + "-------" + e)
    }).foreach(println)

  }

}
