package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo14Union {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo14Union")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    val rdd01: RDD[String] = stuRDD.sample(withReplacement = false, fraction = 0.02, seed = 111)
    val rdd02: RDD[String] = stuRDD.sample(withReplacement = false, fraction = 0.02, seed = 111)
    // 两个结构一样的RDD可以进行union操作
    // union之后得到的RDD的分区数，等于两个RDD的分区数之和
    rdd01.union(rdd02).foreach(println) // 不会进行去重的 相当于是SQL中的union all
    rdd01.union(rdd02).distinct().foreach(println) // 如果需要实现SQL中的union去重的效果，则可以在后面接distinct

  }

}
