package cn.jly.bigdata.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author lanyangji
 * @date 2019/11/24 10:47
 */
object Spark02_expr16 {

  def main(args: Array[String]): Unit = {

    // local模式

    // 创建 SparkConf 对象
    // 这边也自定义了本地的模式（分配多少个cpu）
    // app id
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")

    // 创建spark上下文对象，SparkContext -> sc
    val sc = new SparkContext(sparkConf)

    val listTupleRDD: RDD[(String, Int)] = sc.makeRDD(List(("a", 3), ("a", 2), ("c", 4), ("b", 3), ("c", 6), ("c", 8)), 2)

    //listTupleRDD.saveAsTextFile("out1")
    val collect: Array[Array[(String, Int)]] = listTupleRDD.glom.collect
    for (elem <- collect) {
      println(elem.mkString(","))
    }

    // 需求，每个分区内求相同key的最大值（分区内操作），然后将分区间各个相同key的最大值相加（分区间操作）
    // aggregateByKey算子
    //listTupleRDD.aggregateByKey(0)(math.max(_, _), _ + _)
    val aggregateByKeyRDD: RDD[(String, Int)] = listTupleRDD.aggregateByKey(0)(math.max, _ + _)

    //aggregateByKeyRDD.saveAsTextFile("out2")
    val res: Array[Array[(String, Int)]] = aggregateByKeyRDD.glom().collect
    res.foreach(
      elem => println(elem.mkString(","))
    )
  }
}
