package com.doit.spark.day03

import com.doit.spark.day01.utils.SparkUtil
import com.doit.spark.day02.User
import org.apache.spark.rdd.RDD

/**
 * @DATE 2022/1/5/10:36
 * @Author MDK
 * @Version 2021.2.2
 * */
object C03_Distinct {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val rdd1 = sc.parallelize(List[Int](1,2,3,4,5,6,3,4,5,7), 3)
    val rdd2 = rdd1.distinct()
    //去重后  分区并未发生变化  去重后的数据再分区 默认分区器HashPartitioner
//    rdd2.glom().map(_.toList).foreach(println)
val rdd3: RDD[User] = sc.parallelize(List[User](User(1, "zss"), User(1, "zss"), User(2, "lss")))
    val res = rdd3.distinct()
    res.foreach(println)
  }
}
