package com.gin.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object L04_Aggregator {

  def main(args: Array[String]): Unit = {
    //加载配置 获取spark上下文对象
    val conf = new SparkConf().setMaster("local").setAppName("L01")
    val sc = new SparkContext(conf)
    //只打印错误日志
    sc.setLogLevel("ERROR")

    val data = sc.parallelize(List(
      ("zhangsan", 234),
      ("zhangsan", 5667),
      ("zhangsan", 343),
      ("lisi", 212),
      ("lisi", 44),
      ("lisi", 33),
      ("wangwu", 535),
      ("wangwu", 22)
    ))

    //key  value -> 一组
    val groupByKey = data.groupByKey()
    groupByKey.foreach(println)

    println("-------- groupByKey --------")
    println()

    // 将 (zhangsan,CompactBuffer(234, 5667, 343)) 转换回: 张三 234, 张三 5667, 张三 343
    // 行列转换, flatmap -> 一进多出
    val rollbackGroupByKey = groupByKey.flatMap(
      //获取groupByKey中的第二个元素CompactBuffer(234, 5667, 343), 一个迭代器
      e => e._2.map(
        //e._1就是zhangsan, x是迭代器里的每个元素
        x => (e._1, x)
      )
        //flatmap需要接收一个迭代器
        .iterator
    )
    rollbackGroupByKey.foreach(println)

    println("-------- rollbackGroupByKey --------")
    println()

    //rollbackGroupByKey 简写方式
    //val rollbackGroupByKey2 = groupByKey.flatMapValues( e => e.iterator)
    //rollbackGroupByKey2.foreach(println)

    println("-------- rollbackGroupByKey2 --------")
    println()

    //每个人按分数排序,取前两个; 聚合
    groupByKey.mapValues(e => e.toList.sorted.take(2)).foreach(println)

    println("-------- map sort take --------")
    println()

    //每个人按分数排序,取前两个; 平铺
    groupByKey.flatMapValues(e => e.toList.sorted.take(2)).foreach(println)

    println("-------- flatmap sort take --------")
    println()

    //每个人的分数求和
    val sum = data.reduceByKey((x, y) => x + y)
    sum.foreach(println)

    println("-------- sum --------")
    println()

    val max = data.reduceByKey((oldVal, newVal) => if (oldVal > newVal) oldVal else newVal)
    max.foreach(println)

    println("-------- max --------")
    println()

    val count = data.mapValues(e => 1).reduceByKey((oldVal, newVal) => oldVal + newVal)
    count.foreach(println)

    println("-------- count --------")
    println()

    //sum / count = 均值
    val avg = sum.join(count).mapValues(e => e._1/e._2)
    avg.foreach(println)

    println("-------- avg --------")
    println()

    //使用combineByKey, 自定义逻辑, 性能远优于上面的avg计算方式
    data.combineByKey(
      //createCombiner: V => C,
      //第一条记录的 value,在key对应的hashmap对象中如何存储
      (value: Int) => (value, 1),

      //mergeValue: (C, V) => C,
      //如果有第二条记录,第二条以及以后的value,如果放入到历史的hashmap对象中
      (oldVal: (Int, Int), newVal: Int) => (oldVal._1 + newVal, oldVal._2+1),

      //mergeCombiners: (C, C) => C,
      //合并溢写结果的函数: 每个分区的二元组元素如何进行合并(reduce)
      (v1: (Int, Int), v2: (Int, Int)) => (v1._1 + v2._1, v1._2 + v2._2 )

    ).mapValues(e => e._1 / e._2).foreach(println)

    println("-------- avg by combineByKey --------")
    println()



  }

}
