package SparkRDD.RDD分区和Shuffle

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.junit.Test;

/*
  * repartition --- 表示重新分区，可大可小 ==> shuffle默认为true
  *  coalesce   --- 表示重分区时，不可增大 ==> shuffle默认为false，修改为true即可增大
 */

class partitionTest {


  val conf = new SparkConf().setMaster("local[6]").setAppName("sortBy")
  val sc   = new SparkContext(conf)

  @Test
  def repartitionTest: Unit ={

    // 创建RDD时指定分区
    val rdd = sc.parallelize(Seq(1,2,3,4,5,6,7,8),2)
    println(rdd.repartition(4).partitions.size) // 4
    println(rdd.repartition(1).partitions.size) // 1

  }

  @Test
  def coalesceTest: Unit ={

    val rdd = sc.parallelize(Seq(1,2,3,4,5,6,7,8),2)
    println(rdd.coalesce(4).partitions.size) // 不是4，还是2
    println(rdd.coalesce(4,shuffle = true).partitions.size) // 4
    println(rdd.coalesce(1).partitions.size) // 1

  }

  @Test
  def textFileTest: Unit ={

    // 读取外部数据时也可以指定分区的数量，但是这只是最小分区数
    val data: RDD[String] = sc.textFile("src/main/scala/Rdd算子/测验/fix_1.csv",2)
    println(data.partitions.size) // 3

  }



}