package action.RDD创建操作

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Spark Parallelize
  *
  * @author wdmcode@aliyun.com
  * @version 1.0.0
  * @date 2018/11/7
  */
object SparkParallelize {
  /**
    * 概念理解
    * 什么是RDD (Resilient Distributed Datasets)
    *
    * Spark revolves around the concept of a resilient distributed dataset (RDD),
    * which is a fault-tolerant collection of elements that can be operated on in parallel.
    * There are two ways to create RDDs:
    * parallelizing an existing collection in your driver program,
    * or referencing a dataset in an external storage system,
    * such as a shared filesystem, HDFS, HBase, or any data source offering a Hadoop InputFormat.
    * Spark围绕弹性分布式数据集（RDD）的概念展开，RDD是可以并行操作的元素的容错集合。
    * 创建RDD有两种方法：
    * 使用Parallelize加载现有集合，或者在外部存储系统中引用数据集，比如共享文件系统、HDFS、HBase或提供Hadoop InputFormat的任何数据源。
    *
    *
    * def parallelize[T: ClassTag](seq: Seq[T],numSlices: Int = defaultParallelism): RDD[T] = withScope
    * 参数1：Seq集合
    * 参数2：numSlices 分区数，默认为该Application分配到的资源的CPU核数
    */

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("SparkParallelize")
    conf.setMaster("local[2]")

    val spark = new SparkContext(conf)

    val data = spark.parallelize(1 to 10)
    println(data.partitions.size)
    data.foreach(s => println(s))

    val data1 = spark.parallelize(1 to 10, 3)
    println(data1.partitions.size)

    spark.stop()
  }
}
