package com.offcn.bigdata.spark.p2

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * combineByKey是算子combineByKeyWithClassTag简化版本，简化了ClassTag
  */
object _02CombineByKeyOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_02CombineByKeyOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)
        val infos = sc.parallelize(List(
            Student("郭雪磊", 18, "山东"),
            Student("单 松", 20, "山东"),
            Student("刘宇航", 18, "河北"),
            Student("王健", 18, "河南"),
            Student("许迎港", 18, "河北"),
            Student("元永劫", 18, "黑龙江"),
            Student("林博", 18, "黑龙江"),
            Student("李佳奥", 18, "河南"),
            Student("冯世明", 18, "黑龙江"),
            Student("肖楚轩", 18, "山东"),
            Student("张皓", 18, "河南"),
            Student("冯岩", 18, "黑龙江")
        ), 2)
        cbk2gbk(infos)
//        cbk2rbk(infos)

        sc.stop()
    }

    //计算每个省份的人数
    def cbk2rbk(infos: RDD[Student]): Unit = {
        val p2Info = infos.mapPartitionsWithIndex((index, it) => {
            val list = it.toList
            println(s"分区编号{${index}}中的数据为： ${list}")
            list.map(stu => (stu.province, 1)).toIterator
        })
        def createCombiner(num: Int): Int = num
        def mergeValue(c: Int, num: Int): Int = c + num
        def mergeCombiners(c1: Int, c2: Int) = c1 + c2
        p2Info.combineByKey(createCombiner, mergeValue, mergeCombiners)
            .foreach{case (province, num) => {
                println(s"省份：${province}对应的人数为：${num}")
            }}
//        p2Info.combineByKey((num:Int) => num, (v1:Int, v2:Int) => v1 + v2, (v1:Int, v2:Int) => v1 +v2)
//            .foreach{case (province, num) => {
//                println(s"省份：${province}对应的人数为：${num}")
//            }}
    }

    case class Student(name: String, age: Int, province: String)
    /**
      * 将学习信息，按照province进行分组
      */
    def cbk2gbk(infos: RDD[Student]): Unit = {
        val p2Info = infos.mapPartitionsWithIndex((index, it) => {
            val list = it.toList
            println(s"分区编号{${index}}中的数据为： ${list}")
            list.map(stu => (stu.province, stu)).toIterator
        })
        println("------使用groupByKey来进行分组")
        p2Info.groupByKey().foreach{case (province, infos) => {
            println(s"省份：$province, 信息：${infos.toList}")
        }}
        println("---------------combineByKey-----------------------")
        /*
            createCombiner: 意为创建一个聚合类型，用于当前的聚合操作，
            在创建的过程中，使用该分区中该key第一次出现的value
         */
        def createCombiner(stu: Student): Array[Student] = {
            println(s"----createCombiner---${stu}")
            Array(stu)
        }

        /**
          * 分区内的聚合操作，将该分区内其他的元素，聚合到上一步createCombiner构建的聚合类型
          * @return
          */
        def mergeValue(array: Array[Student], stu: Student): Array[Student] = {
            println(s"==mergeValue===聚合类型：${array.toList}, 被聚合的值：${stu}")
            array.+:(stu)
        }

        /**
          * 分区间的聚合操作
          */
        def mergeCombiners(array1: Array[Student], array2: Array[Student]): Array[Student] = {
            println(s"---mergeCombiners: array1: ${array1.toList}, array2: ${array2.toList}")
            array1 ++ array2
        }
        p2Info.combineByKey(createCombiner, mergeValue, mergeCombiners)
            .foreach{case (province, infos) => {
                println(s"省份：$province, 信息：${infos.toList}")
            }}
    }
}
