package org.huangrui.spark.java.core.rdd.serial;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.util.Arrays;
import java.util.List;

/**
 * @Author hr
 * @Create 2024-10-18 11:56
 */
public class Spark01_RDD_Serial_1 {
    public static void main(String[] args) {
        final SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("spark");
        final JavaSparkContext jsc = new JavaSparkContext(conf);

        final List<String> nums = Arrays.asList("Hadoop", "Hive", "Spark", "Flink");
        final JavaRDD<String> rdd = jsc.parallelize(nums, 2);

        // RDD算子(方法)的逻辑代码是在Executor端执行的，其他的代码都是Driver端执行
        Search search = new Search("H");
        search.match(rdd);

        jsc.close();
    }
}
class Search {
    private final String word;

    public Search(String word) {
        this.word = word;
    }

    public void match(JavaRDD<String> rdd) {
        /**
         * 原本Searh 对象要求序列化，但是String 对象已经序列化
         */
        String query = this.word;
        rdd.filter(line -> line.startsWith(query))
                .collect().forEach(System.out::println);
    }
}