package spark_core.operate_transform.keyvalue;

import java.util.Arrays;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;


/**
 * @author shihb
 * @date 2020/1/8 11:43
 * 根据key,进行聚合
 * 相同key的value会根据聚合函数聚合在一起
 * 进行shuffle之前会预聚合，性能能有所提高
 */
public class ReduceByKeyDemo {

  public static void main(String[] args) {
    //local模式,创建SparkConf对象设定spark的部署环境
    SparkConf sparkConf = new SparkConf().setMaster("local[*]").setAppName("mark rdd");
    //创建spark上下文对象（这边是java上下文）
    JavaSparkContext sc = new JavaSparkContext(sparkConf);

    //key-value的方法要pairRdd才能调用,所以创建JavaPairRDD
    JavaPairRDD<Integer,String> inputPairRDD = sc.parallelizePairs(Arrays
        .asList(new Tuple2<>(1, "aa"), new Tuple2(2, "bb"), new Tuple2(3, "cc"),
            new Tuple2(1, "dd")));

    //可选参数numPartitions,设置分区数
    JavaPairRDD<Integer, String> reduceByRdd = inputPairRDD
        .reduceByKey((o1, o2) -> o1 + o2, 2);
    reduceByRdd.glom().collect().forEach(System.out::println);
    //结果
//  [(2,bb)]
//  [(1,aadd), (3,cc)]

    //停止
    sc.stop();

  }
}
