package com.sub.spark.core.rdd.instance;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;

/**
 * RDD（Resilient Distributed Dataset）叫做弹性分布式数据集
 * 代码中是一个抽象类，它代表一个弹性的、不可变、可分区、里面的元素可并行计算的集合。
 * 本质是一个数据模型，即Java对象，封装了数据处理逻辑，但不保存数据，适合分布式计算，存在多个分区
 */
public class SparkRDDInstance  {

    public static void main(String[] args) {

        //spark conf
        SparkConf conf = new SparkConf();
        conf.setMaster("local[*]");
        conf.setAppName("sparkCore");
        //spark context
        JavaSparkContext javaSparkContext = new JavaSparkContext(conf);

        //1.从集合创建RDD对象
        System.out.println("create RDD by parallelize(like)");
        ArrayList<Object> list = new ArrayList<>();
        list.add("hadoop");
        list.add("hdfs");
        list.add("yarn");
        list.add("mapreduce");
        list.add("spark");
        JavaRDD<Object> parallelize = javaSparkContext.parallelize(list);
        List<Object> collect = parallelize.collect();
        for (Object o : collect) {
            System.out.println(o.toString());
        }

        //2、从文件创建RDD对象，默认分区为2 和 环境的核数取小的值  一般为2
        System.out.println("create RDD by textFile(path)");
        JavaRDD<String> stringJavaRDD = javaSparkContext.textFile("data/demo/spark/num.txt", 2);
        stringJavaRDD.collect().forEach(System.out::println);

        //save as text file
        JavaRDD<String> stringRDD = javaSparkContext.parallelize(Arrays.asList("hello", "spark", "hello", "spark", "hello","sub","demo","big-data","hadoop"));
        stringRDD.saveAsTextFile("output");

        //3、从其他RDD转换而来
        System.out.println("create RDD by map");
        JavaRDD<String> mapRDD = stringJavaRDD.map(s -> s + " world");
        mapRDD.collect().forEach(System.out::println);

        //close
        javaSparkContext.stop();
    }
}
