package com.atguigu.bigdata.spark.core.wc;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.codehaus.janino.Java;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;

public class Spark03_WordCount_JAVA {
    public static void main(String[] args) {
        // 1.创建配置对象
        SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("sparkCore");

        // 2. 创建sparkContext
        JavaSparkContext sc = new JavaSparkContext(conf);

        wordcount1(sc);

        wordcount2(sc);

        wordcount3(sc);

        wordcount4(sc);
        sc.stop();
    }

    // groupBy
    //将数据根据指定的规则进行分组, 分区默认不变，但是数据会被打乱重新组合，我们将这样的操作称之为shuffle。极限情况下，数据可能被分在同一个分区中
    private static void wordcount1(JavaSparkContext sc) {
        JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello Scala","Hello Spark"),2);
        JavaRDD<String> flatmap = rdd.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                String[] temp = s.split(" ");
                return Arrays.asList(temp).iterator();
            }
        });

       JavaPairRDD<String,Iterable<String>> groupby = flatmap.groupBy(new Function<String, String>() {
           @Override
           public String call(String s) throws Exception {
               return s;
           }
       });
       JavaPairRDD<String, Integer> mapValue = groupby.mapValues(new Function<Iterable<String>, Integer>() {
           @Override
           public Integer call(Iterable<String> strings) throws Exception {
               int res = 0;
               Iterator<String> it = strings.iterator();
               while(it.hasNext()) {
                   it.next();
                   res++;
               }
               return res;
           }
       });
       System.out.println(mapValue.collect());
    }
    // groupByKey  将数据源的数据根据key对value进行分组
    private static void wordcount2(JavaSparkContext sc) {
        JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello Scala","Hello Spark"),2);
        JavaRDD<String> flatmap = rdd.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                String[] temp = s.split(" ");
                return Arrays.asList(temp).iterator();
            }
        });
        JavaPairRDD<String,Integer> pairRDD = flatmap.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        JavaPairRDD<String, Iterable<Integer>> groupByKey = pairRDD.groupByKey();
        JavaPairRDD<String,Integer> res = groupByKey.mapValues(new Function<Iterable<Integer>, Integer>() {
            @Override
            public Integer call(Iterable<Integer> integers) throws Exception {
                int res = 0;
                Iterator<Integer> it = integers.iterator();
                while(it.hasNext()) {
                    it.next();
                    res++;
                }
                return res;
            }
        });
        System.out.println(res.collect());
    }
    // reduceByKey
    //可以将数据按照相同的Key对Value进行聚合
    private static void wordcount3(JavaSparkContext sc) {
        JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello Scala","Hello Spark"),2);
        JavaRDD<String> flatmap = rdd.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                String[] temp = s.split(" ");
                return Arrays.asList(temp).iterator();
            }
        });

        JavaPairRDD<String,Integer> mapToPair = flatmap.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });

        JavaPairRDD<String,Integer> reduceBykey = mapToPair.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });
        System.out.println(reduceBykey.collect());
    }
    // aggregateByKey  将数据根据不同的规则进行分区内计算和分区间计算
    private static void wordcount4(JavaSparkContext sc){
        JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello Scala","Hello Spark"),2);
        JavaRDD<String> flatmap = rdd.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                String[] temp = s.split(" ");
                return Arrays.asList(temp).iterator();
            }
        });
        JavaPairRDD<String,Integer> mapToPair = flatmap.mapToPair(new PairFunction<String, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(String s) throws Exception {
                return new Tuple2<String, Integer>(s, 1);
            }
        });
        JavaPairRDD<String,Integer> aggregateBykey = mapToPair.aggregateByKey(0, new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        }, new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });
        System.out.println(aggregateBykey.collect());
    }
}
