package com.alison.scala

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RddTest {
  def main(args: Array[String]): Unit = {
    //    testMapPartition
    //    testMap
    //    testMapPartitionsWithIndex
    //    testFlatMap
    //    testgroup
        testfilter
    //    testsample
    //    testpipe
    //    testgroupby
    //    testreduceby
    //    testaggregate
    //    testcombine
    //    testjoin
    //    testReduce()
    //    testSaveAsXXX
  }


  def testSaveAsXXX() = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(1 to 10, 2)
    rdd1.saveAsTextFile("D:/111") // 这是一个目录
    //    rdd1.saveAsObjectFile("D:/111")
  }

  def testReduce(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(1 to 10, 2)
    //    val res = rdd1.reduce(_ + _)
    //    val res = rdd1.aggregate(0)(_+_, _+_)
    val res = rdd1.fold(0)(_ + _)
    println(res)
  }

  """
    |在类型为(K,V)和(K,W)的RDD上调用，返回一个相同key对应的所有元素对在一起的(K,(V,W))的RDD
    |""".stripMargin

  def testjoin() = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(Array((1, "a"), (2, "b"), (3, "c")))
    val rdd2 = spark.parallelize(Array((1, 4), (2, 5), (3, 6)))
    rdd1.join(rdd2).collect().foreach(println) //Array((1,(a,4)), (2,(b,5)), (3,(c,6)))
    """
      |(1,(a,4))
      |(2,(b,5))
      |(3,(c,6))
      |""".stripMargin
    println
    rdd1.cogroup(rdd2).collect().foreach(println)
    """
      |(1,(CompactBuffer(a),CompactBuffer(4)))
      |(2,(CompactBuffer(b),CompactBuffer(5)))
      |(3,(CompactBuffer(c),CompactBuffer(6)))
      |""".stripMargin
  }


  def testcombine() = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(Array(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)), 2)
    val combineRdd = input.combineByKey((_, 1), (acc: (Int, Int), v) => (acc._1 + v, acc._2 + 1), (acc1: (Int, Int), acc2: (Int, Int)) => (acc1._1 + acc2._1, acc1._2 + acc2._2))
    combineRdd.collect().foreach(println)
    """
      |(b,(3,1))
      |(a,(5,2))
      |(c,(18,3))
      |""".stripMargin
    // 计算平均值
    println()
    combineRdd.mapValues(v => v._1 / v._2.toDouble).collect().foreach(println) // 这样也可以
    println()
    val resultrdd = combineRdd.map { case (key, value) => (key, value._1 / value._2.toDouble) }
    resultrdd.collect().foreach(println)
    """
      |(b,3.0)
      |(a,2.5)
      |(c,6.0)
      |""".stripMargin
  }

  """
    | aggregateByKey(zeroValue:U,[partitioner: Partitioner]) (seqOp: (U, V) => U,combOp: (U, U) => U)
    |在kv对的RDD中，，按key将value进行分组合并，合并时，将每个value和初始值作为seq函数的参数，进行计算，返回的结果作为一个新的kv对，然后再将结果按照key进行合并，
    |最后将每个分组的value传递给combine函数进行计算（先将前两个value进行计算，将返回结果和下一个value传给combine函数，以此类推），将key与计算结果作为一个新的kv对输出。
    |zeroValue, 初始值
    |seqOp, 对每一个分区的value处理
    |combOp, 将分区的value合并处理
    |""".stripMargin

  def testaggregate(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)), 2)
    //取出每个分区相同key对应值的最大值，然后相加
    val aggrdd = input.aggregateByKey(0)(math.max(_, _), _ + _)
    aggrdd.collect().foreach(println)
    println
    """
      |(b,3)
      |(a,3)
      |(c,12)
      |""".stripMargin
    val foldrdd = input.foldByKey(0)(_ + _)
    foldrdd.foreach(println)
    """
      |(b,3)
      |(a,5)
      |(c,18)
      |""".stripMargin
  }
  """
    |reduceByKey比groupBykey效率更高，主要方式是reduceByKey先worker内reduce一次，再按key进行group，而groupByKey是先group，再reduce。
    |""".stripMargin

  def testreduceby(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(("female", 1), ("male", 5), ("female", 5), ("male", 2)))
    //计算相同的key
    val reducerdd = input.reduceByKey((x, y) => x + y)
    reducerdd.collect().foreach(println)
    """
      |(female,6)
      |(male,7)
      |""".stripMargin
    //    val tuple = input.reduce((x, y) => Tuple2.apply(x._1, x._2 + y._2))
    //    println(tuple)// (male,13) 这个结果没有意义
  }

  """
    |groupByKey()和Groupby()，都是返回CompactBuffer，这是一个类似ArrayBuffer的结构（An append-only buffer similar to ArrayBuffer, but more memory-efficient for small buffers），区别是groupByKey()的返回(key，CompactBuffer(value1, value2, …))，groupBy()返回(key,CompactBuffer((key,value1), (key,value2), …)结构。
    |groupByKey是按Key进行分组，所以作用对象必须是PairRDD型的。而GroupBy分组规则需要我们自己设定，groupBy的参数可以是一个函数，该函数的返回值将作为Key。
    |""".stripMargin

  def testgroupby(): Unit = {
    // 创建一个pairRDD，将相同key对应值聚合到一个sequence中，并计算相同key对应值的相加结果。
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(Array("one", "two", "two", "three", "three", "three"))
    val maprdd: RDD[(String, Int)] = input.map((_, 1))
    val grouprdd: RDD[(String, Iterable[Int])] = maprdd.groupByKey() // Array((one, CompactBuffer(1), (two, CompactBuffer(1,1))...)
    //    wordmap.groupBy(_)
    grouprdd.collect().foreach(println)
    """
      |(two,CompactBuffer(1, 1))
      |(one,CompactBuffer(1))
      |(three,CompactBuffer(1, 1, 1))
      |""".stripMargin
    println()
    grouprdd.map(t => (t._1, t._2.sum)).collect().foreach(println) //
    """
      |(two,2)
      |(one,1)
      |(three,3)
      |""".stripMargin
    println()
    // groupBy底层实际上是重新把数据整理成KV形式 再调用groupByKey
    maprdd.groupBy(_._1).collect().foreach(println) //
    """
      |(two,CompactBuffer((two,1), (two,1)))
      |(one,CompactBuffer((one,1)))
      |(three,CompactBuffer((three,1), (three,1), (three,1)))
      |""".stripMargin
    println()
  }

  def testpartitionBy(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(Array((1, "aaa"), (2, "bbb"), (3, "ccc"), (4, "ddd")), 4)
    val rdd2 = rdd1.partitionBy(new org.apache.spark.HashPartitioner(2))
    rdd2.partitions.size //2
  }

  def testzip() = {
    // 将两个RDD组合成Key/Value形式的RDD,这里默认两个RDD的partition数量以及元素数量都相同，否则会抛出异常。
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(Array(1, 2, 3), 3)
    val rdd2 = spark.parallelize(Array("a", "b", "c"), 3)
    rdd1.zip(rdd2).collect // Array((1,a), (2,b), (3,c))
    rdd2.zip(rdd1).collect // Array((a,1), (b,2), (c,3))
    // 创建第三个RDD（与1,2分区数不同）
    val rdd3 = spark.parallelize(Array("a", "b", "c"), 2)
    rdd1.zip(rdd3).collect // Can't zip RDDs with unequal numbers of partitions: List(3, 2)
  }

  def testValue() = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val rdd1 = spark.parallelize(1 to 7) //创建一个有两个分区的RDD
    val rdd2 = spark.parallelize(5 to 10) //创建一个有两个分区的RDD
    rdd1.union(rdd2).collect() // rdd1 和rdd2的并集
    rdd1.subtract(rdd2).collect() // rdd1 除去 rdd2的差集
    rdd1.intersection(rdd2).collect() // rdd1, rdd2交集
    rdd1.cartesian(rdd2) //笛卡尔积， 尽量避免使用
  }


  def testpipe(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(2, 1, 33, 7, 3), 2) //创建一个有两个分区的RDD
    input.pipe("pipe.sh").collect().foreach(println) //将脚本作用该RDD并打印
    // Array(AA, >>>2, >>>1, AA, >>>33, >>>7, >>>3)
  }

  def testsorted(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(2, 1, 33, 7, 3))
    input.sortBy(x => x).collect().foreach(println) //按照自身大小排序
    input.sortBy(x => x % 3).collect().foreach(println) //按照与3余数的大小排序
  }

  def testrepartition(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(1 to 16, 4)
    val rerdd = input.repartition(2) //重新分区
    // 等效与 input.coalesce(2, shuffle = true)
  }

  def testcoalesce(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[*]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(1 to 10, 4)
    val coalesceRdd = input.coalesce(3) // 对RDD重新分区，分区数=3
    println(coalesceRdd.partitions.size) //3
  }

  def testsample(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[1]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(1 to 10)
    val sampleRdd = input.sample(true, 0.5, 2)
    sampleRdd.collect().foreach(print)
    println()
    input.collect().foreach(print)
  }

  def testfilter(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[1]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(Array("aa", "ab", "bb", "cc"))
    val filterrdd = input.filter(_.contains("a"))
    filterrdd.collect().foreach(println)
  }

  def testgroup(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[1]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(1 to 5) //RDD有6个元素，分成2个partition
    val grouprdd = input.groupBy(_ % 2)
    grouprdd.collect().foreach(items => {
      println(items)
    })
  }

  def testFlatMap(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[1]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(1 to 5) //RDD有6个元素，分成2个partition
    val flatmap = input.flatMap(1 to _) // new rdd, (1->1,2->1,2,3->1,2,3...5->1,2,3,4,5
    flatmap.collect().foreach(x => print(x + ",")) // 1,1,2,1,2,3,1,2,3,4,1,2,3,4,5,
  }

  def testMapPartitionsWithIndex(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[1]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(1, 2, 3, 4, 5, 6)) //RDD有6个元素，分成2个partition
    val result = input.mapPartitionsWithIndex((index, items) => {
      items.map((index, _))
    })
    println("\n")
    result.foreach(println)
  }

  def testMap(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[2]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(1, 2, 3, 4, 5, 6), 2) //RDD有6个元素，分成2个partition
    val result = input.reduce((a, b) => a + b)
    println(result)
  }

  def testMapPartition(): Unit = {
    val conf = new SparkConf().setAppName("Spark Rdd Test").setMaster("local[2]")
    val spark = new SparkContext(conf)
    val input = spark.parallelize(List(1, 2, 3, 4, 5, 6), 2) //RDD有6个元素，分成2个partition
    //    print(input.partitions.size)
    //    input.collect().foreach(i => print(i + ","))
    val result = input.mapPartitions { partition =>
      Iterator(sumOfEveryPartition(partition)) // 多个分区
    }
    //    result.collect().foreach(println)
  }

  def sumOfEveryPartition(input: Iterator[Int]): Int = {
    var total = 0
    input.foreach {
      elem => {
        total += elem
      }
    }
    total
  }
}
