package rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDD_PairRDDOperation {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("RDD_PairRDDOperation")

    val sc: SparkContext = new SparkContext(conf)
    val pairRDD: RDD[(Int, Int)] = sc.parallelize(Seq((1, 2), (3, 4), (3, 6)))

    // keys
    val keysRDD: RDD[Int] = pairRDD.keys
    println(keysRDD.collect().mkString(","))

    // values
    val valuesRDD: RDD[Int] = pairRDD.values
    println(valuesRDD.collect().mkString(","))

    // mapValues(func)：将函数应用到Pair RDD中的每个元素上，只改变value，不改变key
    val mapValuesRDD: RDD[(Int, Int)] = pairRDD.mapValues(x => x * x)
    println(mapValuesRDD.collect().mkString(","))

    // flatMapValues(func)：传入(K, U)对，传出(K,TraversableOnce[U])。通过一个flatMap函数传递key-value pair RDD中的每个value，而不改变key值
    val flatMapValuesRDD: RDD[(Int, Int)] = pairRDD.flatMapValues(x => 1 to x)
    println(flatMapValuesRDD.collect().mkString(","))

    // sortByKey()：按照key进行排序，默认是升序。当对(K, V)对的数据集(其中K实现Ordered)调用时，返回一个(K, V)对的数据集(按键升序或降序排序)，按布尔类型的ascending参数中指定的顺序
    val sortByKeyRDD: RDD[(Int, Int)] = pairRDD.sortByKey(false)
    println(sortByKeyRDD.collect().mkString(","))

    // groupByKey：它将RDD中每个key的值分组成一个序列。当对(K, V)对的数据集调用时，返回(K, Iterable[V])对的数据集
    val groupByKeyRDD: RDD[(Int, Iterable[Int])] = pairRDD.groupByKey()
    println(groupByKeyRDD.collect().mkString(","))

    // reduceByKey：它按照key来合并值(相同key的值进行合并)。当对一个(K, V)对的数据集调用时，返回一个(K, V)对的数据集，其中每个key的值使用给定的reduce函数func进行聚合，该reduce函数的类型必须是(V,V) => V
    val reduceByKeyRDD: RDD[(Int, Int)] = pairRDD.reduceByKey((x, y) => x + y)
    println(reduceByKeyRDD.collect().mkString(","))

    // foldByKey：它使用一个关联函数和一个初始值来合并每个键的值，这个初始值可以任意次数地添加到结果中，并且不能改变结果(例如，列表连接为Nil，加法为0，乘法为1)
    val foldByKeyRDD: RDD[(Int, Int)] = pairRDD.foldByKey(1)((x, y) => x + y)
    println(foldByKeyRDD.collect().mkString(","))

    // aggregateByKey：当对一个(K, V)对的数据集调用时，返回一个(K, U)对的数据集，其中每个key的值使用给定的combine函数和一个中性的“零”值进行聚合。允许与输入值类型不同的聚合值类型，同时避免不必要的分配
    val aggregateByKeyRDD: RDD[(Int, Int)] = pairRDD.aggregateByKey(1)((x, y) => x + y, (a, b) => a + b)
    println(aggregateByKeyRDD.collect().mkString(","))

    // combineByKey
    val combineByKeyResultRDD: RDD[(Null, (Int, Int))] = sc
      .parallelize(List(1, 2, 3, 4, 5, 6), 2)
      .map(num => (null, num))
      .combineByKey(
        num => (num, 1),
        (c: (Int, Int), n: Int) => (c._1 + n, c._2 + 1),
        (c1: (Int, Int), c2: (Int, Int)) => (c1._1 + c2._1, c2._2 + c2._2))
    val resultTuple: (Null, (Int, Int)) = combineByKeyResultRDD.collect()(0)
    println(resultTuple)
    val avgResult: Double = resultTuple._2._1.toDouble / (resultTuple._2._2)
    println("平均值为："+avgResult)

    // sampleByKey
    val sampleByKeyRDD: RDD[(Int, Null)] = sc
      .parallelize(List(1, 2, 3, 4, 5, 6))
      .map(num => (num, null))
      .sampleByKey(false, Map[Int, Double](1 ->0.1,2 -> 0.2,3 -> 0.3,4->0.1,5->0.1,6 -> 0.2))
    println(sampleByKeyRDD.collect().mkString(","))

    // coGroup：当对类型(K, V)和(K, W)的数据集调用时，返回一个(K， (Iterable， Iterable))元组的数据集
    val pairRDD1: RDD[(String, Int)] = sc.parallelize(
      List(
        ("a", 1), ("b", 2),("a",3)
      ))
    val pairRDD2: RDD[(String, String)] = sc.parallelize(
      Array(
        ("b", "2"), ("a", "1")
      )
    )
    println(pairRDD1
      .cogroup(pairRDD2)
      .collect().mkString(","))

    // groupWith[W
    // pairRDD1.groupWith()

    // partitionBy：返回使用指定分区器分区的RDD的一个副本

    // Pair RDD上的action操作
    val students = List(("计算机系","张三"),("数学系","李四"),("计算机系","王老五"),("数学系","赵老六"))
    val studentsPairRDD: RDD[(String, String)] = sc.parallelize(students)
    val countByKeyMapResult: collection.Map[String, Long] = studentsPairRDD.countByKey()
    println(countByKeyMapResult)
    // println(studentsPairRDD.countByValue())
    println(studentsPairRDD.collectAsMap())

    // Pair RDD的集合和连接操作
    val pairRDD10 = sc.parallelize(Seq((1,2),(3,4),(3,6)))
    val pairRDD20 = sc.parallelize(Seq((3,9)))
    // join
    println(pairRDD10
      .join(pairRDD20)
      .collect()
      .mkString(","))

    // leftOuterJoin
    println(pairRDD10
      .leftOuterJoin(pairRDD20)
      .collect()
      .mkString(","))

    // rightOuterJoin
    println(pairRDD10
      .rightOuterJoin(pairRDD20)
      .collect()
      .mkString(","))

    // fullOuterJoin
    println(pairRDD10
      .fullOuterJoin(pairRDD20)
      .collect()
      .mkString(","))

    // coGroup
    println(pairRDD10
      .cogroup(pairRDD20)
      .collect()
      .mkString(","))

    // groupWith
    println(pairRDD10
      .groupWith(pairRDD20)
      .collect()
      .mkString(","))

    // combineByKey：
    val combineByKeyRDD = sc
      .parallelize(List(1, 2, 3, 4, 5,6), 2)
      .map(num => (null, num))
      .combineByKey(
        value => (value, 1),
        (c: (Int, Int), value: Int) => (c._1 + value, c._2 + 1),
        (c1: (Int, Int), c2: (Int, Int)) => (c1._1 + c2._1, c1._2 + c2._2)
      )
    val tupleResult= combineByKeyRDD.collect()(0)
    println("所有元素的和："+tupleResult._2._1)
    println("所有元素的个数："+tupleResult._2._2)



    sc.stop()
  }
}
