package com.atguigu0.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description: xxx
 * @time: 2020/6/11 14:41
 * @author: baojinlong
 **/
object RddActionDemos {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("myWordCount").setMaster("local[*]")
    // 创建sparkContext对象
    val sc: SparkContext = new SparkContext(sparkConf)
    println("01:reduce案例")
    // 创建rdd
    val value: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4, 5))
    println(value.reduce(_ + _))
    val value2: RDD[String] = sc.parallelize(Array("a", "b", "c", "d", "e"))
    println(value2.reduce(_ + _))
    // 先按照区内进行聚合,然后再聚合,哪个分区先计算完成先返回
    value2.mapPartitionsWithIndex((index, items) => items.map((index, _)))

    println("02:collect案例,测试环境中使用,生产过程中慎用,会将executor中数据全部放入到driver端")
    // 创建rdd
    val value3: RDD[Int] = sc.parallelize(Array(1, 2, 3, 4, 5))
    println(value3.collect)

    println("03:count案例")
    println(value3.count)

    println("04:take案例只能按照升序排序")
    println(value3.take(2))

    println("05:aggregate案例")
    // zeroValue作用在每个分区里面计算,然后分区之间还加上一个初始值,分区内/间都会用到初始化值
    println(value3.aggregate(1)(_ + _, _ + _))

    println("06:fold案例")
    println(value3.fold(1)(_ + _))

    println("07:countByKey案例")
    val value1: RDD[(Int, Int)] = sc.parallelize(List((1, 3), (1, 2), (1, 4), (2, 3), (3, 6), (3, 8)), 3)
    println(value1.countByKey)

    println("08:foreach案例")
    // 在集群模式上foreach不一定能在控制台输出,都是在executor端执行的,driver只做任务切分发布任务等
    value1.foreach(println)
    println("09:foreachPartition案例")
    value1.foreachPartition(x => x.foreach(println))
    println("ok-end")
  }
}
