package org.huangrui.spark.scala.core.rdd.instance

import org.apache.spark.{SparkConf, SparkContext}

/**
 * 内存数据是如何进行分区的
 * @Author hr
 * @Create 2024-10-16 11:15 
 */
object Spark02_RDD_Memory_Partition_Data {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("spark")
    val sc = new SparkContext(conf)
    /*
      【1】
      【2，3】
      【4】
      【5，6】
      -------------------------------
      len=6, partnum=4

      (0 until 4) => [0, 1, 2, 3]

      0 => ((i * length) / numSlices, (((i + 1) * length) / numSlices))
        => ((0 * 6) / 4, (((0 + 1) * 6) / 4))
        => (0, 1) => 1
      1 => ((i * length) / numSlices, (((i + 1) * length) / numSlices))
        => ((1 * 6) / 4, (((2) * 6) / 4))
        => (1, 3) => 2
      2 => ((i * length) / numSlices, (((i + 1) * length) / numSlices))
        => ((2 * 6) / 4, (((3) * 6) / 4))
        => (3, 4) => 1
      3 => ((i * length) / numSlices, (((i + 1) * length) / numSlices))
        => ((3 * 6) / 4, (((4) * 6) / 4))
        => (4, 6) => 2
     */
    // TODO Spark分区数据的存储基本原则：平均分
    val ints = List(1, 2, 3, 4, 5, 6)
    val rdd = sc.parallelize(ints, 4)
    rdd.saveAsTextFile("output")

    sc.stop()
  }
}
