package com.jscloud.bigdata;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;

public class JavaWordCount {
        public static void main(String[] args) {
                //sparkCore 核心陈旭入口类 -- SparkContext
                //java 当中核心编程对象 -- JavaSparkContext
                SparkConf sparkConf = new SparkConf().setAppName("sparkCount")
                        //local[4] 表示启用4个线程模拟spark 的分布式计算
                        //local[*] 表示启用与本地CPU核数相等的线程数来模拟 spark 的分布式计算
                        .setMaster("local[*]");
                //注意避坑，这里最好用JDK8，高版本会出错，具体如何解决待日后再说
                JavaSparkContext sparkContext = new JavaSparkContext(sparkConf);

                //设置日志级别
                sparkContext.setLogLevel("WARN");

                //读取文件
                JavaRDD<String> stringJavaRDD = sparkContext.textFile("hdfs://bigdata01:8020/hello.txt");

                //切分单词
                JavaRDD<String> allWords = stringJavaRDD.flatMap(new FlatMapFunction<String, String>() {
                        @Override
                        public Iterator<String> call(String s) throws Exception {
                                String[] s1 = s.split(" ");
                                return Arrays.asList(s1).iterator();
                        }
                });

                //每个单词出现一次，记做一次
                JavaPairRDD<String, Integer> wordAndOne = allWords.mapToPair(new PairFunction<String, String, Integer>() {
                        @Override
                        public Tuple2<String, Integer> call(String s) throws Exception {
                                // 创建元组，计数1
                                return new Tuple2<String, Integer>(s, 1);
                        }
                });

                //相同单词累计处理
                JavaPairRDD<String, Integer> wordAndTotal = wordAndOne.reduceByKey(new Function2<Integer, Integer, Integer>() {
                        @Override
                        public Integer call(Integer v1, Integer v2) throws Exception {
                                return v1 + v2;
                        }
                });

                //输出统计结果记录
                for (Tuple2<String, Integer> stringIntegerTuple2 : wordAndTotal.collect()) {
                        System.out.println(stringIntegerTuple2._1);
                        System.out.println(stringIntegerTuple2._2);
                }

                //关闭入口类
                sparkContext.stop();


        }
}
