package com.andnnl;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;


import java.util.Arrays;
import java.util.List;

/**
 * Created by chenss on 2017/12/6.
 */
public class JavaWordCount {
    public static void main(String[] args) {
//        if (args.length < 1) {
//            System.err.println("Usage: <file>");
//            System.exit(1);
//        }

        //使用session
        //SparkSession spark = SparkSession.builder().appName("JavaWordCount").master("local").getOrCreate();
        //JavaRDD<String> lines = spark.read().textFile(args[0]).javaRDD();
        //JavaSparkContext sc = new JavaSparkContext("spark://master:7077","word count",conf);
        if ( System.getenv("HADOOP_HOME") == null && System.getProperty("hadoop.home.dir")==null) {
            System.setProperty("hadoop.home.dir", "D:\\hadoop-common-2.2.0-bin-master");
        }
        SparkConf conf = new SparkConf();
        JavaSparkContext sc = new JavaSparkContext("local","word count local",conf);//本地跑
        JavaRDD<String> lines = sc.textFile("hdfs://master/test.txt");
        JavaRDD<String> words = lines.flatMap(line -> Arrays.asList(line.split(" ")).iterator()).filter(word->!word.isEmpty());

        JavaPairRDD<String, Integer> counts = words.mapToPair(w -> new Tuple2<String, Integer>(w, 1))
                .reduceByKey((x, y) -> x + y)
                .mapToPair(s -> new Tuple2<Integer, String>(s._2, s._1))
                .sortByKey(false)
                .mapToPair(s -> new Tuple2<String, Integer>(s._2, s._1));

        counts.take(10).forEach(tuple->System.out.println(tuple._1() + " -> " + tuple._2()));

        sc.stop();
    }
}
