package rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDD_Action {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("RDD_Action")

    val sc = new SparkContext(conf)
    val rdd: RDD[Int] = sc.parallelize(Array(4, 5, 1, 2, 8, 9, 3, 6, 7, 10))
    println(rdd.count())
    println(rdd.first())
    println(rdd.collect().mkString(","))

    println(sc
      .parallelize(Array("hello", "world", "hello","spark"))
      .countByValue())

    println(sc
      .parallelize(Array("hello", "world", "hello", "spark"))
      .map(word => (word, null))
      .countByKey())

    println(rdd.take(3).mkString(","))

    println(rdd.takeOrdered(3).mkString(","))

    println(rdd.top(3).mkString(","))

    println(rdd.takeSample(false, 6).mkString(","))

    // 几个重点action函数：reduce
    // 使用reduce计算List(1,2,3,3)中所有元素的和
    val sumResult = sc
      .parallelize(List(1, 2, 3, 3))
      .reduce(sumTotal)
    println("RDD中所有数据通过reduce计算后结果是："+sumResult)

    // 使用aggregate计算List(1,2,3,3)中所有元素的和
    val sumResultByAggregate = sc
      .parallelize(List(1, 2, 3, 3))
      .aggregate(0)(sumTotal, sumTotal)
    println("RDD中所有数据通过aggregate计算后结果是："+sumResultByAggregate)

    // 使用aggregate计算List(1,2,3,3)中所有元素的平均值
    val accResult: (Int, Int) = sc
      .parallelize(List(1, 2, 3, 3), 2)
      .aggregate((0, 0))(seqOp, combOp)
    val avg: Double = accResult._1 / accResult._2.toDouble
    println("RDD中数据集的平均值是："+avg)


    sc.stop()
  }

  def sumTotal(x:Int,y:Int):Int={
    return x+y
  }

  def seqOp(acc:(Int,Int),num:Int):(Int,Int)={
    (acc._1+num,acc._2+1)
  }

  def combOp(acc1:(Int,Int),acc2:(Int,Int)):(Int,Int)={
    (acc1._1+acc2._1,acc1._2+acc2._2)
  }
}
