package com.xahj.bd2104.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Created with IntelliJ IDEA.
 * Author: Amos
 * E-mail: amos@amoscloud.com
 * Date: 2021/9/14
 * Time: 8:30
 * Description: 
 */
object SparkCoreDemo6_transformations {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setAppName("demo6").setMaster("local[2]"))

    // RDD中 以二元组作为泛型的RDD 有一些特殊操作
    // 关于RDD[(K,V)]的操作声明在PairRDDFunction中

    val rdd: RDD[(String, Int)] = sc.makeRDD(List("a" -> 1, "a" -> 2, "a" -> 1, "b" -> 1, "b" -> 1))

    // 1. groupByKey
    //  对kv泛型的RDD按照Key进行分组
    //  groupByKey不做聚合
    //   宽依赖
    //  (K, Iterable[V])
    //  def groupByKey(): RDD[(K, Iterable[V])] =
    val rdd1 = rdd.groupByKey()
      .map(t => {
        (t._1, t._2.sum.toDouble / t._2.size)
      })
    //      .foreach(println)


    // 2. reduceByKey
    // 聚合时不能改变value的类型
    //def reduceByKey(func: (V, V) => V): RDD[(K, V)] =
    val k_count = rdd.countByKey()
    val rdd2 = rdd.reduceByKey(_ + _)
      .map(t => {
        (t._1, t._2.toDouble / k_count(t._1))
      })
    //      .foreach(println)


    // 3. Spark中几乎所有的聚合算子 都是来源于combineByKeyWithClassTag()
    //   combineByKey是combineByKeyWithClassTag的简化封装
    //   def combineByKey[C](
    //        创建合并器,每个分区内部对每个Key的第一个Value进行转换,
    //        构建用于合并的C类型合并器对象
    //      createCombiner: V => C,
    //        拿到Key对应的其他Value聚合到合并器中
    //      mergeValue: (C, V) => C,
    //        将不同分区的相同Key对应的合并器  进行聚合得到最终结果
    //      mergeCombiners: (C, C) => C
    //      ): RDD[(K, C)] =

    // combineByKey与reduceByKey的区别
    //     combineByKey允许自定义聚合结束时的V的类型
    //     reduceByKey是combineByKey

    rdd.combineByKey[CountSum](
      (v: Int) => CountSum(1, v),
      (c: CountSum, v: Int) => CountSum(c.count + 1, c.sum + v),
      (c1: CountSum, c2: CountSum) => c1.add(c2)
    )
      .map(t => {
        (t._1, t._2.avg())
      })
    //      .foreach(println)

    // 4.  aggregateByKey
    //   是combineByKey的变种
    //    使用柯里化的两个参数列表将 聚合结果的声明  和聚合过程分离开
    //   第一个参数列表  声明聚合结果类型
    //    第二个参数列表类似combineByKey的后两个参数
    //def aggregateByKey[U: ClassTag]
    // (zeroValue: U)
    // (seqOp: (U, V) => U,
    //  combOp: (U, U) => U): RDD[(K, U)] =
    rdd.aggregateByKey(CountSum(0, 0))(
      (u: CountSum, v: Int) => CountSum(u.count + 1, u.sum + v),
      (u1: CountSum, u2: CountSum) => u1.add(u2)
    )


    // 5. 排序

    val rdd6: RDD[(String, Int, Double, Int)] = sc.makeRDD(List(
      ("amos", 18, 1.25, 175),
      ("tom", 28, 1222.25, 15),
      ("jerry", 16, 323.25, 85),
      ("史派克", 38, 1464.45, 250)
    ))

    // 5.1 sortByKey
    rdd6.map(t => {
      (t._3, t)
    })
      .sortByKey(false, 1)
      .values

    // 5.2 sortBy
    //  def sortBy[K](
    //      f: (T) => K,
    //      ascending: Boolean = true,
    //      numPartitions: Int = this.partitions.length): RDD[T] =
    rdd6.sortBy(_._3, false, 1)
    //      .foreach(println)


    // 6. join
    // join 内连接  左表右表都存在的key
    //      左连接  显示左表存在的key,右表不存在用null填充
    //      右连接  显示右表存在的key,左表不存在用null填充
    //      全连接  显示左右表key的全集, 不存在用null填充


    val rdd7: RDD[(String, String)] = sc.makeRDD(List("k1" -> "v1", "k2" -> "v1"))
    val rdd8: RDD[(String, Int)] = sc.makeRDD(List("k2" -> 2, "k3" -> 3))

    //def join[W](other: RDD[(K, W)]): RDD[(K, (V, W))] =
    val rdd9: RDD[(String, (String, Int))] = rdd7.join(rdd8)

    //def leftOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (V, Option[W]))] =
    val rdd10 = rdd7.leftOuterJoin(rdd8)

    //def rightOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], W))] =
    val rdd11 = rdd7.rightOuterJoin(rdd8)

    //  def fullOuterJoin[W](other: RDD[(K, W)]): RDD[(K, (Option[V], Option[W]))] =
    val rdd12 = rdd7.fullOuterJoin(rdd8)


    // 7.cogroup
    val rdd13: RDD[(String, Int)] = sc.makeRDD(List(
      "xiaomi" -> 6000,
      "xiaomi" -> 500,
      "huawei" -> 5000,
      "oppo" -> 60000,
      "huawei" -> 10000,
      "smartsien" -> 5000,
      "oneplus" -> 10000,
      "samsung" -> 10
    ))
    val rdd14: RDD[(String, Int)] = sc.makeRDD(List(
      "vivo" -> 2000,
      "oppo" -> 500,
      "huawei" -> 5000,
      "huawei" -> 10000,
      "smartsien" -> 5000,
      "oneplus" -> 10000,
      "oneplus" -> 200,
      "redmi" -> 10
    ))

    val rdd21 = rdd13.groupByKey()
    val rdd22 = rdd14.groupByKey()

    val rdd23 = rdd21.fullOuterJoin(rdd22)
    rdd23.foreach(println)
    // (k,(v,w))


    println("--------------------------")
    rdd13.cogroup(rdd14)
      .foreach(println)

//    println(rdd21.collect().toList)
//    println(rdd22.collect().toList)
  }
}

case class CountSum(count: Long, sum: Long) extends Serializable {
  def add(o: CountSum) = {
    CountSum(this.count + o.count, this.sum + o.sum)
  }

  def avg() = {
    this.sum.toDouble / this.count
  }
}
