package com.offcn.spark.p3

import com.offcn.spark.p3.AggregateByKey.reduceBy
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Auther: BigData-LGW
 * @ClassName: AggregateByKey2
 * @Date: 2020/12/7 21:44
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object AggregateByKey2 {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName("AggregateByKey2")
        val sc = new SparkContext(conf)
        groupByKey(sc)
        sc.stop()
    }
    def groupByKey(sc: SparkContext)={
        case class Student(id:Int,name:String,province:String)
        val stuRDD = sc.parallelize(List(
            Student(1, "唐玉峰", "安徽"),
            Student(11, "王世伟", "安徽"),
            Student(3, "胡国权", "甘肃"),
            Student(44, "old李", "甘肃"),
            Student(47, "董卓", "甘肃"),
            Student(5, "马惠", "黑吉辽"),
            Student(55, "刘龙沛", "黑吉辽"),
            Student(2, "李梦", "安徽"),
            Student(4, "陈延年", "甘肃"),
            Student(10086, "刘炳文", "黑吉辽")
        ), 2).mapPartitionsWithIndex((index,partition) => {
            val list = partition.toList
            println(s"-->stuRDD的分区编号为<${index}>中的数据为：${list.mkString("[", ", ", "]")}")
            list.toIterator
        })
        val pairs = stuRDD.map(stu => (stu.province,stu))
        def seq(stus:Array[Student],stu:Student):Array[Student] ={
            stus.+:(stu)
        }
        def comb(stus:Array[Student],stui:Array[Student]):Array[Student]={
            stus ++ stui
        }
        val province2Info:RDD[(String,Array[Student])] =pairs.aggregateByKey(Array[Student]())(seq,comb)
        province2Info.foreach{
            case (province,stus) => {
                println(s"province2Infos>>> 省份：${province}, 学生信息：${stus.mkString(", ")}, 人数：${stus.size}")
            }
        }
    }
}
