package com.yjjxt

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

/**
 * 一般为了减少分区
 * 如果Shuffle=false， 不进行shuffle操作，直接多合一 属于窄依赖
 * 即使定义的分区数多于原来的分区数，  代码执行时会等于原来的分区数
 *
 * 如果Shuffle=true 进行shuffle操作，类似于Repartition
 *
 */
object Hello18Coalesce {
  def main(args: Array[String]): Unit = {
    val sparkContext = new SparkContext((new SparkConf().setMaster("local").setAppName("Join" + System.currentTimeMillis())))

    val array = Array[String]("user1", "user2", "user3", "user4", "user5", "user6", "user7", "user8")
    val lines = sparkContext.parallelize(array, 4)

    val linesRP: RDD[String] = lines.coalesce(5, true)
    println(lines.getNumPartitions + "--" + linesRP.getNumPartitions)

    lines.mapPartitionsWithIndex((idx, iter) => {
      //遍历一下
      while (iter.hasNext) {
        println(idx + "-lines-" + iter.next())
      }
      iter
    }).count()

    linesRP.mapPartitionsWithIndex((idx, iter) => {
      println("linesRP-----------------------------" + idx)
      //遍历一下
      while (iter.hasNext) {
        println(idx + "-linesRP-" + iter.next())
      }
      iter
    }).count()

    sparkContext.stop()

  }
}
