package com.atguigu1.core.operator

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *
 * @description: 文件数据分区分布测试
 * @time: 2021-03-12 11:45
 * @author: baojinlong
 **/
object Spark09FlatMapDemo {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("rdd")
    // 设置rdd分区数字
    val sparkContext = new SparkContext(conf)
    val rddValue: RDD[Any] = sparkContext.makeRDD(List(List(1, 2), 3, List(5, 4)))
    // 关闭环境
    val resultRdd: RDD[Any] = rddValue.flatMap {
      case list: List[_] => list
      case data => List(data)
    }
    resultRdd.collect().foreach(println)
    sparkContext.stop()

    /**
     * 文件分区数量计算方式:
     * totalSize=7Byte
     * goalSize=7/2=3Byte每个分区需要保存3个字节
     * 7/3=2 余下1 1/每个分区的字节数=1/3=0.333>0.1则新增一个分区hadoop
     */
  }

}
