package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code07Union {
  def main(args: Array[String]): Unit = {
    /**
     * UNION:
     * ①功能：可以拼接多个RDD
     * ②注意：要求多个RDD之间数据类型格式一致
     */


    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("Mysql2Text"))

    //    val list1: List[Int] = List(1, 2, 3, 4)
    ////    val list2: List[Int] = List(4, 5, 6)
    //    val list3: List[String] = List("4", "5", "6")
    //    val intListRDD: RDD[Int] = sc
    //      .parallelize(list1)
    //
    ////    intListRDD.foreach(println)
    //
    //    val List3RDD: RDD[String] = sc
    //      .parallelize(list3)
    //
    //    intListRDD
    //      .union(List3RDD)
    //      .foreach(println)

    val tuples1: List[(String, Int)] = List(
      ("key1", 1)
      , ("key2", 2)
      , ("key3", 3)
    )
    val tuples2: List[(String, String)] = List(
      ("key1", "1")
      , ("key2", "1")
      , ("key3", "1")
    )
    //
    //    // 错误：
    //    val list2RDD: RDD[(String, String)] = sc.parallelize(tuples2)
    //    sc.parallelize(tuples1)
    //      .union(list2RDD)

    val list2RDD: RDD[(String, String)] = sc.parallelize(tuples2)
    val newList2RDD: RDD[(String, Int)] = list2RDD.map {
      case (key, value) => {
        (key, value.toInt)
      }
    }
    println("newList2RDD:" + newList2RDD.getNumPartitions)
    val list1RDD: RDD[(String, Int)] = sc.parallelize(tuples1)
    println("list1RDD:" + list1RDD.getNumPartitions)

    val unionRDD: RDD[(String, Int)] = list1RDD
      .union(newList2RDD)
    println("unionRDD:" + unionRDD.getNumPartitions)

    unionRDD.foreach(println)


  }
}
