package com.atbeijing.bigdata.spark.core.rdd.operator.action

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

object Spark03_Oper_Action {

    def main(args: Array[String]): Unit = {

        val conf = new SparkConf().setMaster("local[*]").setAppName("ActionOperator")
        val sc = new SparkContext(conf)

        // TODO 算子 - 行动

        // TODO collect
        val rdd : RDD[Int] = sc.makeRDD(List(4,2,3,1),2)

        //分区0[ 8,4]  分区1[6,2]
        val rdd1 = rdd.map(_*2)

        // collect算子会按照分区编号采集数据
        // collect会将分布式的数据汇总到driver端
        val ints: Array[Int] = rdd1.collect()
        ints.foreach(println)//8 4 6 2
        println("================")
        val cnt: Long = rdd1.count()
        println(cnt)//4
        println("================")
        val i: Int = rdd1.first()
        println(i)//8
        println("================")
        val ints1: Array[Int] = rdd1.take(3)
        println(ints1.mkString(","))//8,4,6
        println("================")
        val ints2: Array[Int] = rdd1.takeOrdered(3)
        println(ints2.mkString(","))//2,4,6


        val map = mutable.Map(
            (1,1),(2,1)
        )
        //更改了map中key为1的value,如果key不存在就将这个kv添加进map
        val unit: Unit = map.update(1, 10)
        println(map)//Map(2 -> 1, 1 -> 10)
        //不更改原map,将原map复制一份后更改key为1的value,如果key不存在就将这个kv添加进map,产生新map
        val map1: mutable.Map[Int, Int] = map.updated(1, 20)
        println(map)//Map(2 -> 1, 1 -> 10)
        println(map1)//Map(2 -> 1, 1 -> 20)

        val map2: mutable.Map[Int, Int] = map.updated(5, 20)
        println(map2)
        sc.stop()

    }
}
