package rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDD_Transformation {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("RDD_Transformation")

    val sc: SparkContext = new SparkContext(conf)
    val intsList = List(1, 2, 3, 3)
    val intsRDD: RDD[Int] = sc.parallelize(intsList)

    // map转换：map(func)
    // 定义一个函数作为map算子的传入参数
    val rdd1 = intsRDD.map(addOne)
    rdd1.collect().foreach(println)
    // 使用lambda表达式
    val rdd2 = intsRDD.map(x => x * 2)
    rdd2.collect().foreach(println)
    println("*" * 50)

    // mapPartitions转换：mapPartitions(func)
    val numRDD: RDD[Int] = sc.parallelize(List.range(1, 11, 1), 2)
    val rdd3: RDD[Any] = numRDD.mapPartitions(iter => {
      Tuple1(iter.sum).productIterator
    })
    rdd3.collect().foreach(println)
    println("*" * 100)

    // mapPartitionsWithIndex转换：mapPartitionsWithIndex(func)
    sc
      .parallelize(Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 2)
      .mapPartitionsWithIndex(
        (partitionIndex: Int, iter: Iterator[Int]) => {
          Tuple2(partitionIndex, iter.sum).productIterator
        }
      )
      .collect().foreach(println)
    println()
    println("*" * 100)

    // flatMap转换：flatMap(func)
    val charRDD: RDD[Char] = sc
      .parallelize(Array("hello", "world"), 2)
      .flatMap(str => str.toCharArray)
    charRDD.collect().foreach(c => print(c + " "))
    println()
    println("*" * 100)

    // filter转移：filter(func)
    sc
      .parallelize(List.range(1, 11, 1), 2)
      .filter(_ % 2 == 0)
      .collect().foreach(num => print(num + " "))
    println()
    println("*" * 100)

    // sample转换：sample(withReplacement, fraction, seed)
    sc
      .parallelize(List.range(1, 11, 1), 2)
      .sample(false, 0.6)
      .collect().foreach(num => print(num + " "))
    println()
    println("*" * 100)

    // distinct:distinct([numPartitions]))
    sc
      .parallelize(Array(1, 2, 3, 3, 5, 5, 7, 7, 9), 2)
      .distinct()
      .collect().foreach(num => print(num + " "))


    // 补充：mapPartitionsWithIndex的另一种使用
    // 可以根据分区的不同，执行不同的操作
    sc.parallelize(List.range(1, 11, 1), 2)
      .mapPartitionsWithIndex(
        (partitionIndex: Int, iter: Iterator[Int]) => {
          if (partitionIndex == 0) { // 0分区执行累积
            var multi = 1;
            for (elem <- iter) {
              multi *= elem
            }
            Tuple1(partitionIndex, multi).productIterator
          } else if (partitionIndex == 1) { // 1分区执行累加
            var sum = 0;
            for (elem <- iter) {
              sum += elem
            }
            Tuple1(partitionIndex, sum).productIterator
          } else {
            None.productIterator
          }
        }
      )
      .collect().foreach(t => {
      if (t.isInstanceOf[(Int, Int)]) {
        val value = t.asInstanceOf[(Int, Int)]
        println("分区号：" + value._1 + ",数值：" + value._2)
      }
    })
    /*
    分区号：0,数值：120
    分区号：1,数值：40
     */

    // keyBy转换：当在类型为T的数据集上调用时，返回一个(K, T)元组对的数据集。通过应用func函数创建这个RDD中元素的元组
    val nameRDD: RDD[String] = sc.parallelize(Array("John", "Fred", "Anna", "James"),2)
    val namePairRDD: RDD[(Char, String)] = nameRDD.keyBy(name => name.charAt(0))
    val keyByResultString: String = namePairRDD.collect().mkString(",")
    println(keyByResultString)
    println("*" * 100)

    // groupBy：当在类型为T的数据集上调用时，返回一个(K, Iterable[T])元组的数据集。
    val namesRDD: RDD[String] = sc.parallelize(Array("Joseph", "Jimmy", "Tina", "Thomas", "James", "Cory",
      "Christine", "Jackeline", "Juan"), 4)
    val namesPairRDD: RDD[(Char, Iterable[String])] = namesRDD.groupBy(name => name.charAt(0))
    val groupByResultString = namesPairRDD.collect().mkString(",")
    println(groupByResultString)
    // 案例：单词统计
    val wordRDD = sc.parallelize(List("hello", "world", "hello", "scala", "hello", "world"))
    val groupByRDD = wordRDD.groupBy(word => word)
    val wordCountResult: RDD[(String, Int)] = groupByRDD.map(t => (t._1, t._2.count(word => true)))
    println(wordCountResult.collect().mkString(","))

    // sortBy转换：返回这个按给定key函数排序的RDD
    //val dataRDD = sc.parallelize(List(3, 1, 90, 3, 5, 12))
    val studentRDD = sc.parallelize(List(("小明", 20), ("小红", 19), ("小黑", 21)))
    val sortByRDD = studentRDD.sortBy(student => student._2,false)
    //val sortByRDD = dataRDD.sortBy(x => x,false)
    println(sortByRDD.collect().mkString(","))

    // glom转换：返回将每个分区中的所有元素合并到一个数组中创建的RDD，一个分区一个数组。当在类型为T的RDD上调用时，返回一个Array[T]的RDD
    val numbersRDD: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), 4)
    val glomRDD: RDD[Array[Int]] = numbersRDD.glom()
    glomRDD.collect().foreach(arr => println(arr.mkString(",")))
    println("*" * 100)

    // repartition转换：随机地重新shuffle RDD中的数据，以创建更多或更少的分区，并在它们之间进行平衡
    val rangeRDD = sc.parallelize(List.range(1, 101, 1), 4)
    println(rangeRDD.getNumPartitions)
    val repartitionRDD = rangeRDD.repartition(1)
    println(repartitionRDD.getNumPartitions)

    // coalesce转换：将RDD中的分区数量减少
    val coalesceRDD = rangeRDD.coalesce(6, false)
    println(coalesceRDD.getNumPartitions)
    println("*" * 100)

    // randomSplit：使用提供的权重随机分割这个RDD，以数组形式返回拆分后的RDD（即拆分后的RDD组成的数组并返回）
    val splitArrayRDD: Array[RDD[Int]] = sc
      .parallelize(Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
      .randomSplit(Array(0.8, 0.2))
    println(splitArrayRDD(0).collect().mkString(","))
    println(splitArrayRDD(1).collect().mkString(","))



    sc.stop()
  }

  def addOne(number: Int): Int = {
    number + 1
  }
}
