package org.shj.spark.operator;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;

public class MapPartitionsOperator {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setMaster("local").setAppName("MapPartitionsOperator");
		JavaSparkContext ctx = new JavaSparkContext(conf);
		
		List<String> names = Arrays.asList("zhuyin", "wangfei", "linzhilin");
		JavaRDD<String> nameRDD = ctx.parallelize(names);
		
		final Map<String, Integer> scoreMap = new HashMap<String, Integer>();
		scoreMap.put("zhuyin", 150);
		scoreMap.put("wangfei", 350);
		scoreMap.put("linzhilin", 450);
		
		//map算子，一次就处理一个partition的一条数据！！
		//mapPartitions算子，一次处理一个partition中所有的数据
		
		//推荐使用场景：
		//如果你的RDD数据不是特别多，那么采用MapPartitions算子代替Map算子，可以
		//加快处理速度
		
		//map 对每个元素进行操作
		JavaRDD<Integer> mapResults = nameRDD.mapPartitions(new FlatMapFunction<Iterator<String>, Integer>(){
			private static final long serialVersionUID = -7683890113299050533L;

			@Override
			public Iterator<Integer> call(Iterator<String> it) throws Exception {
				List<Integer> list = new LinkedList<Integer>();
				while(it.hasNext()){
					String name = it.next();
					list.add(scoreMap.get(name));
				}
				return list.iterator();
			}
		});
		
		mapResults.foreach(new VoidFunction<Integer>(){
			private static final long serialVersionUID = 1L;

			@Override
			public void call(Integer t) throws Exception {
				System.out.println(t);
			}
		});
		
		ctx.close();

	}

}
