package com.lb.bigdata.spark.core.p2

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object _02CombineByKeyOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
                .setMaster("local[*]")
                .setAppName(s"${_02CombineByKeyOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)

        val stu = sc.parallelize(List(
            Student("郭雪磊", 18, "山东"),
            Student("单 松", 20, "山东"),
            Student("刘宇航", 18, "河北"),
            Student("王健", 18, "河南"),
            Student("许迎港", 18, "河北"),
            Student("元永劫", 18, "黑龙江"),
            Student("林博", 18, "黑龙江"),
            Student("李佳奥", 18, "河南"),
            Student("冯世明", 18, "黑龙江"),
            Student("肖楚轩", 18, "山东"),
            Student("张皓", 18, "河南"),
            Student("冯岩", 18, "黑龙江")
        ), 2)

        //计算每个省份的人数
        val pro2stu: RDD[(String, Student)] = stu.map(stu => (stu.province, stu))
        /*
            createCombiner: 意为创建一个聚合类型，用于当前的聚合操作，
            在创建的过程中，使用该分区中该key第一次出现的value
         */
        def createCombiner(stu: Student): Array[Student] = {
            Array(stu)
        }
        /**
         * 分区内的聚合操作，将该分区内其他的元素，聚合到上一步createCombiner构建的聚合类型
         */
        def mergeValue(arr: Array[Student], stu: Student): Array[Student] ={
            arr.:+(stu)
        }
        /**
         * 分区间的聚合操作
         */
        def mergeCombiners(arr1:Array[Student], arr2:Array[Student]): Array[Student] ={
            arr1.++(arr2)
        }

        pro2stu.combineByKey(createCombiner, mergeValue, mergeCombiners)
                        .foreach{case (province, arr) => {
                            println(s"省份: ${province}, 人数: ${arr.length}")
                        }}

        sc.stop()
    }
    case class Student(name: String, age: Int, province: String)
}
