package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo07Union {
  def main(args: Array[String]): Unit = {
    /**
     * union：转换算子，类似SQL中的union all
     * 可以将两个同类型的RDD进行合并
     * Spark中的union操作并不会对数据进行去重
     * 如果需要去重可以使用distinct算子
     *
     * distinct：转换算子，可以对RDD的数据进行去重
     */

    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo07Union")

    val sc: SparkContext = new SparkContext(conf)
    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")


    val stuRDD01: RDD[String] = stuRDD.sample(withReplacement = false, fraction = 0.01, seed = 1)
    println(stuRDD01.getNumPartitions)
    val stuRDD02: RDD[String] = stuRDD.sample(withReplacement = false, fraction = 0.01, seed = 1)
    println(stuRDD02.getNumPartitions)

    // union之后得到的RDD分区数等于两个RDD分区数之和
    val unionRDD: RDD[String] = stuRDD01.union(stuRDD02)
    println(unionRDD.getNumPartitions)

    unionRDD.foreach(println)

    val distinctUnionRDD: RDD[String] = unionRDD.distinct()
    distinctUnionRDD.foreach(println)

  }

}
