package com.atguigu.bigdata.spark.core.rdd.operator.transform;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

public class Spark15_RDD_Operator_Transform_JAVA {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf().setMaster("local[*]").setAppName("sparkCore");
        JavaSparkContext sc = new JavaSparkContext(conf);

        // TODO 算子 - (Key - Value类型)
        List<Tuple2<String, Integer>> list = Arrays.asList(new Tuple2<String,Integer>("1", 1),
                new Tuple2<String,Integer>("1",2), new Tuple2<String, Integer>("2", 2),
                new Tuple2<String, Integer>("1", 3), new Tuple2<String, Integer>("2", 4));
        JavaRDD<Tuple2<String, Integer>> rdd = sc.parallelize(list, 2);
        JavaPairRDD<String, Integer> pairRDD = rdd.mapToPair(new PairFunction<Tuple2<String, Integer>, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(Tuple2<String, Integer> stringIntegerTuple2) throws Exception {
                return new Tuple2<String, Integer>(stringIntegerTuple2._1,stringIntegerTuple2._2);
            }
        });

        // reduceByKey : 相同的key的数据进行value数据的聚合操作
        // scala语言中一般的聚合操作都是两两聚合，spark基于scala开发的，所以它的聚合也是两两聚合
        // 【1，2，3】
        // 【3，3】
        // 【6】
        // reduceByKey中如果key的数据只有一个，是不会参与运算的。支持分区内数据预先聚合，减少落盘数据量
        JavaPairRDD<String, Integer>  res = pairRDD.reduceByKey(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer integer, Integer integer2) throws Exception {
                return integer + integer2;
            }
        });

        System.out.println(res.collect().toString());

        sc.stop();
    }
}
