package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code27Partition3 extends App {


  private val conf: SparkConf = new SparkConf()
  conf.setAppName("WordCount")
  conf.setMaster("local")
  //  conf.set("spark.default.parallelism","3") // 设置Spark的默认Shuffle并行度为 3

  private val sc = new SparkContext(conf)
  val value1RDD: RDD[(String, Int)] = sc.parallelize(List(("k1", 11), ("k1", 1), ("k2", 2), ("k3", 33), ("k5", 45), ("k4", 4), ("k3", 3), ("k3", 33)), 4)
  println("value1RDD:" + value1RDD.getNumPartitions)

//  sc.parallelize(List(1,2,3,4),2).coalesce(1)

  /**
   * 设置分区：
   *      repartition: 可以指定一个分区数 但对于分区数据可以比上一个RDD 大 或者 小
   *      coalesce： 默认shuffle为false,对于上一个RDD的分区可以调小 不可以调大
   *
   */
  //    private val repartitionRDD: RDD[(String, Int)] = value1RDD.repartition(2)
//  private val repartitionRDD: RDD[(String, Int)] = value1RDD.coalesce(2)
  private val repartitionRDD: RDD[(String, Int)] = value1RDD.coalesce(8,true)

  val mapRDD: RDD[(String, Int)] = repartitionRDD
    .map(x => (x._1, x._2 * x._2))
  println("mapRDD:" + mapRDD.getNumPartitions)

  val reduceRDD: RDD[(String, Int)] = mapRDD
    .reduceByKey(_ + _, numPartitions = 3)
  println("reduceRDD:" + reduceRDD.getNumPartitions)

  reduceRDD.foreach(println)


  while (true) {

  }


}
