package com.shujia.core.transformations

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 上下合并，合并要求以及特点
 *
 * 要求：能够合并的两个RDD，其中的元素数据类型一定是一样的
 * 特点：合并后的rdd的分区数，默认是参与合并的rdd分区数之和
 *
 */
object UnionOpt {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("union合并")

    val sc = new SparkContext(conf)

    //将scala中的集合数据封装成一个RDD内部
    val rdd1: RDD[(Int, String)] = sc.parallelize(List(
      (1001, "张三"),
      (1002, "张三2"),
      (1003, "张三3"),
      (1004, "张三4"),
      (1005, "张三5")
    ))
    println(s"rdd1的分区数：${rdd1.getNumPartitions}")

    val rdd2: RDD[(Int, String)] = sc.parallelize(List(
      (1006, "李四"),
      (1002, "方直"),
      (1007, "李四7"),
      (1008, "李四8"),
      (1009, "李四9")
    ))
    println(s"rdd2的分区数：${rdd2.getNumPartitions}")

    val rdd3: RDD[(String, Int)] = sc.parallelize(List(
      ("年龄1", 12),
      ("年龄2", 13),
      ("年龄3", 14),
      ("年龄4", 15),
      ("年龄5", 16)
    ))

    //    rdd1.union(rdd3)
    val resRDD: RDD[(Int, String)] = rdd1.union(rdd2)
    println(s"resRDD的分区数：${resRDD.getNumPartitions}")

    resRDD.foreach(println)

  }
}
