package com.bingo.demo;

import java.io.File;
import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import com.bingo.util.FileTools;

import scala.Tuple2;

public class WordCountStandalone {
	public static void main(String[] args) {
		String master = args[0];
		String dataFilePath = args[1];
		String resultFilePath = args[2];
		
		// 初始化  SparkConf   setAppName:设置应用名称   setMaster:设置运行模式
		SparkConf conf = new SparkConf().setMaster(master).setAppName("WordCountStandalone");
		// 初始化  SparkContext对象
        JavaSparkContext jsc = new JavaSparkContext(conf);
        // 使用SparkContext对象读取文件，存为JavaRdd
        JavaRDD<String> dataRdd = jsc.textFile(dataFilePath);
        
        //每一行都分割成单词，返回单词集合
        JavaRDD<String> flatMapRdd = dataRdd.flatMap(strTxt -> Arrays.asList(strTxt.split(" ")).iterator());
        
        // 使用mapToPair进行map操作 key是单词，value是1
        JavaPairRDD<String, Integer> mapRdd = flatMapRdd.mapToPair(word -> new Tuple2<>(word, 1));
        
        // 使用reduceByKey进行单词统计 返回 （word，CountSum）
        JavaPairRDD<String, Integer> res = mapRdd.reduceByKey((a, b) -> a +b);
        
        //先将key和value倒过来，再按照key排序
        JavaPairRDD<Integer, String> sortsTpl = res.mapToPair(tpl -> new Tuple2<>(tpl._2(), tpl._1())).sortByKey(false);
        
        //取前10个
        List<Tuple2<Integer, String>> top10 = sortsTpl.take(10);
        
        StringBuilder resultSb = new StringBuilder();
        //打印出来
        for(Tuple2<Integer, String> tuple2 : top10){
        	String str = tuple2._2() + "\t" + tuple2._1() + "\n";
        	resultSb.append(str);
        }
        
        try {
			FileTools.writeInFile(new File(resultFilePath), resultSb.toString(), false);
		} catch (Exception e) {
			e.printStackTrace();
		}
        
        //关闭context
        jsc.close();
	}
}
