package org.example;


import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;

import java.util.Arrays;
import java.util.List;

public class sparkYun2_base {
    public static void main(String[] args) {
        //创建spask运行环境
        final JavaSparkContext sc = new JavaSparkContext("local","spark");
        final List<Integer> nums = Arrays.asList(1,2,3,4,5,6,7,8,9,10);
        final JavaRDD<Integer> rdd = sc.parallelize(nums);
        System.out.println(rdd.collect());
        System.out.println(rdd.partitions().size());
        //练习题：使用map映射将rdd的数据加倍
        final JavaRDD<Integer> doubledRDD = rdd.map(num -> num * 2);
        System.out.println(doubledRDD.collect());
        //文件读取
        final JavaRDD<String> rddText =sc.textFile("C:\\Users\\Administrator\\Desktop\\text.txt");
        System.out.println(rddText.collect());
        //文件分布式存储
        rddText.saveAsTextFile("output");
        //map映射 filter过滤 sortBy排序 take挑选
        //final  JavaRDD<Object> mapRDD = rdd.map(new Function<Integer, Object>() {
        //  @Override
          //  public Object call(Integer integer) throws  Exception{
            //    return integer * 2;
            //}
        //});
        //lambda语法：当函数只有一个复写方法则可以用箭头函数代替
        //final  JavaRDD<Object> mapRDD = rdd.map((Function<Integer, Object>) integer -> integer * 2);
        //能省则省：return分号 大括号 小括号 参数和箭头 统统可以省略
        final  JavaRDD<Object> mapRDD = rdd.map(num -> num * 2);
        System.out.println(mapRDD.collect());
        //写法简化
        sc.parallelize(Arrays.asList(1, 2, 3, 4, 5), 2)
                .map(tp -> tp * tp)
                .sortBy(cp -> cp,false,1)
                        .collect()
                                 .forEach(System.out::println);



        //释放资源

        sc.close();
    }

}
