package javademo;

import org.apache.commons.lang3.StringUtils;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;

/**
 * 项目名称:spark-learn<br>
 * 包名:javademo<br>
 * 用于:spark-learn<br>
 * 创建时间:2019年03月15日<br>
 * 更新时间:2019年03月15日<br>
 *
 * @author :lds（创建人）<br>
 * @version :v1.0（版本号）<br>
 * @since jdk1.8
 */
public class TextFileDemo {
    public static void main(String[] args) {
        final JavaSparkContext sc = SparkContextFactory.getContext("textFile");
        final JavaRDD<String> linesRDD = sc.textFile("README.md");
        //过滤出title数据
        final JavaRDD<String> titleRDD = linesRDD.filter(line -> line.contains("####"));
        //过滤出序号数据
        //使用正则表达式
        final JavaRDD<String> numLinesRDD = linesRDD.filter(line -> Pattern.matches("^\\d\\..*$",line));
        //使用union合并数据集
        final JavaRDD<String> unionRDD = titleRDD.union(numLinesRDD).cache();

        //输出行数
        final long lineCount = unionRDD.count();
        System.out.println("union count:"+lineCount);

        //在java中这种输出直接报错
        //unionRDD.foreach(System.out::println);

        //正确写法
        final List<String> titleAndNumLineList = unionRDD.collect();
        titleAndNumLineList.forEach(System.out::println);

        //python 写法：','.join([1,2,2])
        final String join = StringUtils.join(Arrays.asList(1, 2, 2), ",");
        System.out.println("join:"+join);

        //采样输出
        unionRDD.take(10).forEach(System.out::println);

    }
}
