package com.atbeijing.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.{SparkConf, SparkContext}

object Spark18_Oper_Transform {

    def main(args: Array[String]): Unit = {

        val conf = new SparkConf().setMaster("local[*]").setAppName("TransformOperator")
        val sc = new SparkContext(conf)

        // TODO 算子 - 转换 - KV - aggregateByKey (6 / 10)
        val rdd = sc.makeRDD(
            List(
                ("a", 1), ("a", 2), ("b", 3),
                ("a", 4), ("b", 5), ("b", 6)
            ),2
        )

        // (a, (7, 3))
        // (b, (14, 3))

        //rdd.reduceByKey(_+_)
        //rdd.aggregateByKey(0)(_+_, _+_)
        //rdd.foldByKey()

        // combineByKey算子有三个参数
        // 第一个参数表示相同key的value初始值的数据结构转换
        // 第二个参数表示分区内计算规则
        // 第三个参数表示分区间计算规则
        val rdd1 = rdd.combineByKey(
            //一个value
            (num:Int) => (num, 1),
            // value,value
            ( t:(Int, Int), num:Int ) => {
                (t._1 + num, t._2 + 1)
            },
            ( t1:(Int, Int), t2:(Int, Int) ) => {
                (t1._1 + t2._1, t1._2 + t2._2)
            }
        )
        rdd1.collect().foreach(println)
//        rdd1.map{
//            case (key, ( total, count )) => {
//                (key, total/count)
//            }
//        }.collect().foreach(println)
//
//        println("=======================================")
//        rdd.combineByKey(
//            num => num,
//            (x:Int,y:Int ) => {
//                x + y
//            },
//            (x:Int, y:Int) => {
//                x + y
//            }
//        ).collect().foreach(println)

        sc.stop()

    }
    class User {

    }
}
