package com.shujia.spark.optimize

import org.apache.spark.{SparkConf, SparkContext}

object Demo1Coalesce {
  def main(args: Array[String]): Unit = {


    /**
      * coalesce   从分区
      *
      */

    val conf = new SparkConf()
      .setAppName("app")
      .setMaster("local[1]")
    val sc = new SparkContext(conf)

    //产生小文件
    sc
      .textFile("spark/data/students.txt")
      .repartition(3)
    //.saveAsTextFile("spark/data/coalesce")


    //数据里面由很多小文件，导致rdd分区很多，每隔分区数据量很小
    val rdd = sc.textFile("spark/data/coalesce")
    println("rdd:" + rdd.getNumPartitions)

    /**
      * repartition  会产生shuffle
      *
      */
    rdd.repartition(2)

    //减少分区的时候可以不产生shuflle
    val cRDD = rdd.coalesce(2, shuffle = false)

    println(cRDD.getNumPartitions)


    cRDD.foreach(println)



    //增加分区必须由shuffle
    val rdd3 = rdd.coalesce(200, shuffle = true)

    println(rdd3.getNumPartitions)

  }
}
