package rdd;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.storage.StorageLevel;

public class SimpleRDD {

	
	@SuppressWarnings("serial")
	public static void main(String[] args) {
		System.setProperty("hadoop.home.dir", "D:\\Program Files\\hadoop-2.7.6");
		//spark程序必须先创建一个JavaSparkContext对象，其会告诉Spark如何访问集群
		SparkConf conf = new SparkConf().setAppName("RDD-yt").setMaster("local");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		JavaRDD<String> distFile = sc.textFile("D:\\Program Files\\spark-2.3.1-bin-hadoop2.7\\examples\\src\\main\\resources\\data.txt");
		JavaRDD<Integer> lineLengths = distFile.map(s -> s.length());
//		System.out.println(distFile.collect());
		Integer totalLength = lineLengths.reduce((a, b) -> a+b);
		System.out.println(totalLength);
		//持久化到内存
		lineLengths.persist(StorageLevel.MEMORY_ONLY());
		
		
		//上面是使用lambda表达式的写法
		//匿名函数的写法
		JavaRDD<String> lines = sc.textFile("data.txt");
		JavaRDD<Integer> linesLength = lines.map(new Function<String, Integer>(){
			@Override
			public Integer call(String v1) throws Exception {
				return v1.length();
			}
		});
		linesLength.reduce(new Function2<Integer, Integer, Integer>() {
			public Integer call(Integer v1, Integer v2){return v1 + v2;}
		});
		
		//又或者 先编写一个类实现相应的方法
		JavaRDD<String> textFile = sc.textFile("data.txt");
		JavaRDD<Integer> javaRDD = textFile.map(new GetLength());
		Integer length = javaRDD.reduce(new SimpleRDD().new Sum());
		sc.close();
	}
	
	
	@SuppressWarnings("serial")
	class Sum implements Function2<Integer, Integer, Integer>{
		@Override
		public Integer call(Integer v1, Integer v2) throws Exception {
			return v1+v2;
		}
	}
	
}
