package chapter03

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}

object Test31_Action {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val ads = new SparkConf().setMaster("local[*]").setAppName("action")
    val sc = new SparkContext(ads)
    //reduce
    val value = sc.makeRDD(List(1, 3, 2, 4, 6, 5))
    val i:Int = value.reduce((x, y) => x + y)
    println(i)
    //collect 以数组Array的形式返回数据集的所有元素
    val arr:Array[Int] = value.collect()
    println(arr.mkString(","))
    //count 计算rdd中元素个数
    val l = value.count()
    println(l)
    //first 返回第一个元素
    val i1:Int = value.first()
    println(i1)
    //take 返回前n个元素组成的数组
    val arr1 = value.take(3)
    println(arr1.mkString(","))
    //takeOrder
    val arr2 = value.takeOrdered(3)(Ordering.Int.reverse)
    println(arr2.mkString(","))
    //aggregate 初始值 分区内部计算值 分区之间计算值
    val value1 = sc.makeRDD(List(1, 2, 3, 4, 5, 6), 2)
    val i2 = value1.aggregate(0)((x, y) => math.max(x, y), (x, y) => x + y)
    println(i2)
    //fold 简化后的agg 分区内和分区之间规则一致
    val i3 = value1.fold(0)(_ + _)
    println(i3)
    //countByKey scala中的类型
    val value2 = sc.makeRDD(List((1, 'a'), (2, 'b'),
      (3, 'c'), (3, 'c'), (2, 'b'), (2, 'b'), (2, 'b'), (3, 'c')))
    val intToLong = value2.countByKey()
    println(intToLong)
  }
}
