package com.shujia.spark.opt

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo0RePartition {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("agg")

    val sc = new SparkContext(conf)

    val linesDS: RDD[String] = sc.textFile("data/students.txt")

    println(s"linesDS:${linesDS.getNumPartitions}")

    val mapRDD: RDD[String] = linesDS.map(stu => stu)

    println(s"mapRDD:${mapRDD.getNumPartitions}")

    /**
      * repartition: 重分区，会产生shuffle
      *
      */

    val repartitionRDD: RDD[String] = mapRDD.repartition(10)

    println(s"repartitionRDD:${repartitionRDD.getNumPartitions}")

    /**
      * coalesce： 重分区，可以手动指定是否产生shuffle,
      * 如果不产生shuffle只能用于减少分区不能用于增加分区
      *
      * 可以用于hdfs的时候合并小文件
      *
      */

    val coalesceRDD: RDD[String] = repartitionRDD.coalesce(5, false)

    println(s"coalesceRDD:${coalesceRDD.getNumPartitions}")

    coalesceRDD.foreach(println)


    while (true) ()

  }

}
