package rdd.operate;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Iterator;

//groupByKey是在元组的基础上，也就是在kv键值对的基础上进行的
//是根据每个kv的k，对v进行聚合操作，形成（k,[v1,v2...]）
public class Spark5758_Operate_groupByKey {
    public static void main(String[] args) {
        final SparkConf conf = new SparkConf();
        conf.setMaster("local");
        conf.setAppName("spark");
        final JavaSparkContext jsc = new JavaSparkContext(conf);

        final JavaPairRDD<String,Integer> pairRDD = jsc.parallelizePairs(
                Arrays.asList(
                        new Tuple2<>("a",1),
                        new Tuple2<>("b",2),
                        new Tuple2<>("a",3),
                        new Tuple2<>("b",4)
                )                                                               //在这里变成了kv键值对的形式
        );
        pairRDD.collect().forEach(System.out::println);

        final JavaPairRDD<String,Iterable<Integer>> gourpByKeyRDD = pairRDD.groupByKey();
        gourpByKeyRDD.collect().forEach(System.out::println);

        JavaPairRDD<String, Integer> count = gourpByKeyRDD.mapValues(
                itera -> {
                    int sum = 0;
                    Iterator<Integer> iterator = itera.iterator();
                    while (iterator.hasNext()) {
                        sum = sum + iterator.next();
                    }
                    return sum;
                }
        );

        count.collect().forEach(System.out::println);
        jsc.close();
    }
}
