package com.dxf.bigdata.D05_spark_again

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 重置分区
 * coalesce  可以自己定义是否shuffle
 * repartition   必须shuffle  底层  coalesce(numPartitions, shuffle = true)
 * partitionBy  自定义分区器
 */
object Coalesce_Repartition {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("app")

    val sc = new SparkContext(sparkConf)


    val rdd: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4), 4)

    // coalesce 缩减,扩大分区, 还有一个重置分区 repartition
    val coalRDD: RDD[Int] = rdd.coalesce(2)
    val reaRDD: RDD[Int] = rdd.repartition(2)

//    rdd.repartition()

    coalRDD.saveAsTextFile("output")
    reaRDD.saveAsTextFile("output3")


    //别忘记
    sc.stop()


  }
}