/**
 * Illustrates a simple fold in Java
 */
package com.hsj;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;

/**
 * hadoopFile读取保存样例
 */
public class BasicHadoopFile {
    public static void main(String[] args) throws Exception {
        String master;
        if (args.length > 0) {
            master = args[0];
        } else {
            master = "local";
        }
        JavaSparkContext sc = new JavaSparkContext(
                master, "BasicHadoopFile", System.getenv("SPARK_HOME"), System.getenv("JARS"));
        JavaPairRDD<Text, IntWritable> rdd = sc.hadoopFile("BasicSaveSequenceFile/part-00000", SequenceFileInputFormat.class, Text.class, IntWritable.class);
        JavaPairRDD<String, Integer> result = rdd.mapToPair(new PairFunction<Tuple2<Text, IntWritable>, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(Tuple2<Text, IntWritable> textIntWritableTuple2) {
                return new Tuple2<String, Integer>(textIntWritableTuple2._1.toString(), textIntWritableTuple2._2.get());
            }
        });


//        Integer result = rdd.fold(0, new Function2<Integer, Integer, Integer>() {
//            public Integer call(Integer x, Integer y) {
//                return x + y;
//            }
//        });
        System.out.println(result.collect());
        //保存sequenceFile的另一种方式
        result.saveAsHadoopFile("filePath", Text.class, IntWritable.class, SequenceFileOutputFormat.class);

//        System.out.println(rdd.takeOrdered(2));
    }
}
