package com.doit.day03

import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 */
object Demo03NumberP {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
    val sc = SparkContext.getOrCreate(conf)

    val rdd = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8), 3)

    val rdd1 = rdd.map(_ * 10)
    println(rdd1.getNumPartitions)
    val rdd2 = rdd1.map(_ * 2)

    println(rdd2.getNumPartitions)

    val rdd3 = rdd2.sortBy(e => e)

    println(rdd3.getNumPartitions)

    val rdd4 = rdd3.filter(_ > 1)

    println(rdd4.getNumPartitions)

    println("-----------------------") //   coalesce(numPartitions, shuffle = true)
    /**
     * 较少分区  可以不用shuffle 这个方法默认是要产生shuffle的,
     * shuffle影响计算效率
     *   较少分区  可以不 shuffler   dd4.coalesce(2, false)
     */
    val rdd5 = rdd4.repartition(2)  // 减少分区   shuffle
    println(rdd5.getNumPartitions)

    val rdd6 = rdd4.repartition(4)  //增加分区s    huffle
    println(rdd6.getNumPartitions)
    println("-----------------------")

    val rdd7 = rdd4.coalesce(2, true)

    val rdd8 = rdd4.coalesce(4, true)

    println(rdd7.getNumPartitions)
    println(rdd8.getNumPartitions)
  }

}
