package org.shj.spark.operator;

import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;

public class MapPartitionsWithIndexOperator {

	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setMaster("local").setAppName("MapPartitionsWithIndexOperator");
		JavaSparkContext sc = new JavaSparkContext(conf);
		
		List<String> names = Arrays.asList("zhuyin", "wangfei", "linzhilin");
				
		//parallelize并行集合的时候， 指定了并行度为2，即 Partitions的数量是2，也就是list
		//里面的数据会被分到2个分区里
		JavaRDD<String> nameRDD = sc.parallelize(names, 2);

		JavaRDD<String> mapResult = nameRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>(){
			private static final long serialVersionUID = -1747435593374271212L;

			@Override
			public Iterator<String> call(Integer partitionInd, Iterator<String> it) throws Exception {
				List<String> list = new LinkedList<String>();
				while(it.hasNext()){
					String name = it.next();
					list.add("Partition " + partitionInd + " : " + name);
				}
				return list.iterator();
			}
			
		}, true);
		
		mapResult.foreach(new VoidFunction<String>(){
			private static final long serialVersionUID = -6925032523880930946L;

			@Override
			public void call(String t) throws Exception {
				System.out.println(t);
			}
			
		});
	}

}
