package com.atguigu0.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description: xxx
 * @time: 2020/6/11 14:41
 * @author: baojinlong
 **/
object RddTransformDemos {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("myWordCount").setMaster("local[*]")
    // 创建sparkContext对象
    val sc: SparkContext = new SparkContext(sparkConf)
    // 创建rdd
    val value: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4, 5))
    println(value)
    val value1: RDD[Int] = sc.makeRDD(Array(1, 2, 3, 4, 5))
    println(value1)
    val value2: RDD[Int] = sc.parallelize(1 to 11)
    println(value2)
    // 算子练习:map
    value2.map((x: Int) => x * 2)
    value2.map(x => x * 2)
    println("1:map案例")
    println(value2.map(_ * 2).collect)

    // mapPartitions效率高,但是得考虑内存,因为它是对每一个分区进行操作
    println("2:mapPartitions案例")
    value2.mapPartitions(x => x.map(_ * 2))
    println(value2.collect())
    // mapPartitionsWithIndex需要提供一个初始分区编号
    val tuples: Array[(Int, Int)] = value2.mapPartitionsWithIndex((index, items) => {
      items.map(x => {
        (index, x)
      })
    }).collect
    println(tuples)

    println("3:mapPartitionsWithIndex案例")
    val tuples4: Array[(Int, Int)] = value2.mapPartitionsWithIndex((index, items) => items.map(x => (index, x))).collect
    println(tuples4)
    val tuples2: Array[(Int, Int)] = value2.mapPartitionsWithIndex((index, items) => items.map((index, _))).collect
    // 长度是6,分片数2. 第一个是0, (0+1)*6/2=3 => [0,3) [3,6) local模式分区是核数,集群模式取math.max(totalCoreCount.get(),2)
    // [0,3) [3,6)
    // 注意分片规则:本地测试是numSlices=4核,0 until numSlices  i*length/numSlices  (i + 1)*length/numSlices length=6
    // 得到 (0,1] (1,3] (3,4] (4,6] 左闭右开,如果元素个数/分片数能整除就按照整除的分区,如2个分区,4个数据就是按照0:[1,2] 1:[3,4]
    // Array((0,1), (1,2), (1,3), (2,4), (3,5), (3,6))
    println(tuples2)

    println("4:flatMap案例")
    println(value2.flatMap(1 to _).collect)
    value2.groupBy(_ % 2)
    value2.mapPartitionsWithIndex((index, items) => items.map((index, _)))

    println("5:glom案例")
    val glomRdd: RDD[Int] = sc.parallelize(1 to 16, 4)
    val glomRddResult: Array[Array[Int]] = glomRdd.glom().collect
    println(glomRddResult)

    println("6:groupBy案例")
    println(value2.groupBy(_ % 2).collect)

    println("7:filter案例")
    val sourceFilter: RDD[String] = sc.parallelize(Array("xiaoming", "xiaojiang", "xiaohe", "dazhi"))
    println(sourceFilter.filter(_.contains("xiao")).collect)

    println("8:sample案例")
    println(value2.sample(withReplacement = true, fraction = 0.4, seed = 2).collect)

    println("9:distinct案例")
    println(value2.distinct.collect)
    println(value2.distinct(numPartitions = 2).collect)

    println("10:coalesce案例")
    val value5: RDD[Int] = sc.parallelize(1 to 16, 4)
    // 如果shuffle为true的话则可以新增分区数
    val value6: RDD[Int] = value5.coalesce(numPartitions = 2)
    println("新的分区数" + value6.partitions.length)


    println("11:repartition案例")
    val value7: RDD[Int] = value5.repartition(numPartitions = 2)
    println("新的分区数" + value7.partitions.length)

    println("12:sortBy案例")
    println(value5.sortBy(_ % 3).collect)

    println("双Value类型交互案例")
    println("13:union案例")
    val value8: RDD[Int] = sc.parallelize(1 to 5)
    val value9: RDD[Int] = value8.union(value5)
    println(value9)

    println("14:subtract案例")
    val value10: RDD[Int] = value8.subtract(value5)
    println(value10)

    println("15:intersection案例")
    val value11: RDD[Int] = value8.intersection(value5)
    println(value11)

    println("16:cartesian案例")
    val value12: RDD[(Int, Int)] = value8.cartesian(value5)
    println(value12)

    println("17:sortBy案例")
    val value13: RDD[Int] = sc.parallelize(6 to 11)
    val value14: RDD[(Int, Int)] = value13.zip(value8)
    println(value14)
    println("Key-Value类型")

    // 对pairRDD进行分区操作，如果原有的partitionRDD和现有的partionRDD是一致的话就不进行分区， 
    // 否则会生成ShuffleRDD，即会产生shuffle过程
    println("18:partitionBy案例")
    val value3: RDD[Int] = sc.parallelize(1 to 8, 4)
    val pairRdd: RDD[(Int, Int)] = value3.map((_, 1))
    val value4: RDD[(Int, Int)] = pairRdd.partitionBy(new org.apache.spark.HashPartitioner(2))
    // 只有keyValue类型的rdd才有分区器,仅仅是value类型的rdd有分区,但没有分区器
    println(value4.mapPartitionsWithIndex((index, items) => items.map((index, _))).collect)

    println("19:reduceByKey案例")
    val value15: RDD[(String, Int)] = sc.parallelize(List(("female", 1), ("male", 5), ("female", 5), ("male", 2)))
    // 计算相同key对应值的相加结果 Array((female,6), (male,7))
    value15.reduceByKey(_ + _)

    println("20:groupByKey案例")
    val words: Array[String] = Array("one", "two", "two", "three", "three", "three")
    val wordPairsRDD: RDD[(String, Int)] = sc.parallelize(words).map(word => (word, 1))
    val value16: RDD[(String, Iterable[Int])] = wordPairsRDD.groupByKey()
    println(value16)
    // 1. reduceByKey：按照key进行聚合，在shuffle之前有combine（预聚合）操作，返回结果是RDD[k,v]。
    // 2. groupByKey：按照key进行分组，直接进行shuffle。

    println("21:aggregateByKey案例")
    val value17: RDD[(String, Int)] = sc.parallelize(List(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)), 2)
    val value18: RDD[(String, Int)] = value17.aggregateByKey(zeroValue = 0)(math.max, _ + _)
    println(value18)

    println("22:foldByKey案例")
    val value19: RDD[(Int, Int)] = sc.parallelize(List((1, 3), (1, 2), (1, 4), (2, 3), (3, 6), (3, 8)), 3)
    val value20: RDD[(Int, Int)] = value19.foldByKey(zeroValue = 0)(_ + _)
    println(value20)

    println("23:combineByKey案例")
    val value21: RDD[(String, Int)] = sc.parallelize(Array(("a", 88), ("b", 95), ("a", 91), ("b", 93), ("a", 95), ("b", 98)), 2)
    val value22: RDD[(String, (Int, Int))] = value21.combineByKey((_, 1), (acc: (Int, Int), v) => (acc._1 + v, acc._2 + 1), (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2))
    val value23: RDD[(String, Double)] = value22.map { case (key, value) => (key, value._1 / value._2) }
    println(value23)
    println(value22)

    println("24:sortByKey案例")
    val value24: RDD[(Int, String)] = sc.parallelize(Array((3, "aa"), (6, "cc"), (2, "bb"), (1, "dd")))
    println(value24.sortByKey(ascending = true).collect())

    println("25:mapValues案例")
    val value25: RDD[(Int, String)] = sc.parallelize(Array((1, "a"), (1, "d"), (2, "b"), (3, "c")))
    println(value25.mapValues(_ + "|||").collect)

    println("26:join案例")
    val value26: RDD[(Int, String)] = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c")))
    val value27: RDD[(Int, Int)] = sc.parallelize(Array((1, 4), (1, 44), (2, 5), (4, 6)))
    println(value26.join(value27).collect())

    println("27:cogroup案例")
    val value28: RDD[(Int, String)] = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c")))
    val value29: RDD[(Int, Int)] = sc.parallelize(Array((1, 4), (2, 5), (3, 6)))
    println(value28.cogroup(value29).collect())

    println("---rdd练习---")
    // 创建一个rdd将一个分区内的数据转成String
    val value30: RDD[String] = sc.parallelize(Array("a", "b", "c", "d"), 2)
    println(value30.mapPartitionsWithIndex((index, items) => items.map((index, _))).collect)
    // 获取每个分区内数据
    println("方法一:处理结果" + value30.glom.map(x => x.mkString).collect)
    println("方法二:处理结果" + value30.mapPartitions(x => Iterator(x.mkString("|"))).collect)
    println("ok-end")
  }
}
