package com.lb.bigdata.spark.core.p2

import com.lb.bigdata.spark.core.p2._02CombineByKeyOps.Student
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object _03AggregateBykeyOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
                .setMaster("local[*]")
                .setAppName(s"${_03AggregateBykeyOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)

        val stu = sc.parallelize(List(
            Student("郭雪磊", 18, "山东"),
            Student("单 松", 20, "山东"),
            Student("刘宇航", 18, "河北"),
            Student("王健", 18, "河南"),
            Student("许迎港", 18, "河北"),
            Student("元永劫", 18, "黑龙江"),
            Student("林博", 18, "黑龙江"),
            Student("李佳奥", 18, "河南"),
            Student("冯世明", 18, "黑龙江"),
            Student("肖楚轩", 18, "山东"),
            Student("张皓", 18, "河南"),
            Student("冯岩", 18, "黑龙江")
        ), 2)

        val pro2stu: RDD[(String, Student)] = stu.map(stu => (stu.province, stu))

        def seqOp(arr:Array[Student], stu:Student): Array[Student] ={
            arr.:+(stu)
        }

        def combOp(arr1:Array[Student], arr2:Array[Student]): Array[Student] ={
            arr1.++(arr2)
        }

        pro2stu.aggregateByKey(Array[Student]())(seqOp, combOp)
                .foreach{case (province, arr) => {
                    println(s"省份: ${province}, 人数: ${arr.length}")
                }}
    }
}
