package com.bdqn.spark.chapter05.value

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark09_RDD_Operator_Transform {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("operator-distinct")
    val sc = new SparkContext(sparkConf)

    val sourceRDD: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4, 1, 2, 3, 4))

    // scala集合中，有一个函数distinct:使用hashset的key来去重的
    // spark中，distinct实现的方式
    // map(x => (x,null)).reduceByKey((x,_)=>x,numPartitions).map(_._1)
    // (1,null) (2,null) ... (1,null) (2,null)
    // (1,null)(1,null) -> (1,List(null,null)) -> 1
    // (2,null)(2,null) -> (2,List(null,null)) -> 2
    // ...


    // 算子 - distinct
    val resultRDD: RDD[Int] = sourceRDD.distinct()
    resultRDD.collect.foreach(println)

    sc.stop()
  }
}
