package com.gome.han.bigdata.spark.core.rdd.createrdd.fromcollection

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author Hanpeng
 * @date 2021/1/10 20:40
 * @description:从内存集合种创建RDD
 */
object CreateRddFromMem {
  def main(args: Array[String]): Unit = {
    //TODO 准备环境
    val sparkConf:SparkConf = new SparkConf();
    sparkConf.setMaster("local[*]").setAppName("CreateRddFromMem");
    val sparkContext:SparkContext = new SparkContext(sparkConf);
    //TODO 创建RDD
    //从内存中创建RDD 将内存中集合的数据作为处理的数据源
    val seq: Seq[Int] = Seq[Int](1, 2, 3, 4);
    //并行
 /*   Parallelize acts lazily. If seq is a mutable collection and is altered after the
    call to parallelize and before the first action on the RDD,
    the resultant RDD will reflect the modified collection. Pass a copy of the argument to avoid this.
      如果seq是可变集合,生成的RDD将对应修改后的seq。正常编程中我们应该避免这种情形
      避免重建空的RDD
    */
    //parallelize 与makeRDD 底层makeRDD调用了parallelize 。空的RDD没有分区。或者是空的分区
    val rdd1: RDD[Int] = sparkContext.parallelize(seq);
    val rdd: RDD[Int] = sparkContext.makeRDD(seq);
    //rdd.collect().foreach(i => println(i))
    //rdd.collect().foreach(println(_));
    //返回一个数组 里面包含RDD的所有元素
    rdd.collect().foreach(println);
    // TODO 关闭环境
    sparkContext.stop()
  }

}





















