package day02.operator.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author wsl
 * @Description 聚合
 */
object KeyValue_ByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("rdd").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val rdd: RDD[(String, Int)] = sc.makeRDD(Array(("a", 1), ("a", 3), ("a", 2), ("a", 4), ("a", 1)),1)
    val rdd2: RDD[(String, Int)] = sc.makeRDD(Array(("a", 11), ("a", 33), ("b", 22), ("b", 44), ("b", 11)), 3)

    //groupByKey  按key重新分组 (a,CompactBuffer(1, 3)) (b,CompactBuffer(2, 4, 1))
    rdd.groupByKey().collect().foreach(println)

    //groupBy 没有按照key聚合 只是分组了 (a,CompactBuffer((a,1), (a,3)))
    rdd.groupBy(_._1)
      .map {
        case (k, v) => (k, v.map(_._2).sum)
      }
      .collect().foreach(println)
    println("---------------------")

    //sortByKey 按照key进行排序的(k,v)
    rdd.sortByKey(false).collect().foreach(println)
    println("---------by------------")

    rdd.sortBy(_._2,false).foreach(println)
    println("---------------------")

    //mapValues 只对V操作
    rdd.mapValues(_ + "||").collect().foreach(println)

    //inner join
    val joinRdd: RDD[(String, (Int, Int))] = rdd.join(rdd2)
    joinRdd.collect().foreach(println)

    // full join 且同一个RDD中对key聚合
    val coRdd: RDD[(String, (Iterable[Int], Iterable[Int]))] = rdd.cogroup(rdd2)
    coRdd.collect().foreach(println)


    //reduceByKey  按key聚合  (a,4) (b,7)
    rdd
      //.reduceByKey((v1,v2)=>v1+v2)
      .reduceByKey(_ + _).collect().foreach(println)


    //aggregateByKey  初始值 分区内逻辑：每个分区内相同的key相加 分区间逻辑:合并多个分区
    rdd.aggregateByKey(0)(_ + _, _ + _).collect().foreach(println)


    //foldByKey 初始值 分区内逻辑和分区间逻辑相同
    rdd.foldByKey(0)(_ + _).collect().foreach(println)


    //combineByKey 有初始值并且支持转换数据结构 分区内 分区间
    rdd.combineByKey(
      v => (v, 1),
      (t: (Int, Int), v) => (t._1 + v, t._2 + 1),
      (t1: (Int, Int), t2: (Int, Int)) => (t1._1 + t2._1, t1._2 + t2._2)
    )
      .map(t => (t._1, t._2._1 / t._2._2.toDouble))
      .collect().foreach(println)

    sc.stop()
  }
}
