package org.example;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Arrays;


public class spake {
    public static void main(String[] args){
        final JavaSparkContext sc =new JavaSparkContext("local","spark");
        final List<Integer> nums =Arrays.asList(1,2,3,4,5,6,7,8,9,10);
        final JavaRDD<Integer> rdd =sc.parallelize(nums);
        System.out.println(rdd.collect());
        System.out.println(rdd.partitions().size());
        //使用map映射将rdd的数据加倍
        //1、文件读取
        final JavaRDD<String> rddText = sc.textFile("C:\\Users\\Administrator\\Desktop\\text.txt",5);
        System.out.println(rddText.collect());
        //2、文件分布式存储
        rddText.saveAsTextFile("output");
        //map映射 filter过滤 sortBy排序 take挑选
        final JavaRDD<Object> mapRDD = rdd.map(num -> num * 2);
        System.out.println(mapRDD.collect());

        sc.parallelize(Arrays.asList(1,2,3,4,5),2)
                .map(tp -> tp * tp)
                        .collect()
                                .forEach(System.out::println);

        //释放资源
        sc.close();
    }
}


