package com.doit.spark.day05

import com.doit.spark.day01.utils.SparkUtil
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD



/**
 * @DATE 2022/1/8/10:29
 * @Author MDK
 * @Version 2021.2.2
 * */
object C02_Stage {
  def main(args: Array[String]): Unit = {
    val sc = SparkUtil.getSc
    val rdd = sc.textFile("data/word.txt")
    val rdd2: RDD[String] = rdd.flatMap(_.split("\\s+"))
    val rdd3: RDD[(String, Int)] = rdd2.map((_, 1))
//    val rdd4: RDD[(String, Int)] = rdd3.reduceByKey(_+_,2)
//    val rdd5: RDD[(String, Int)] = rdd4.map(tp => (tp._1, tp._2))

    val rdd4 = rdd3.partitionBy(new HashPartitioner(2))
    val rdd5 = rdd4.reduceByKey(_ + _, 2)

    println(rdd5.toDebugString)
    rdd5.foreach(println)
rdd5.collect()

    Thread.sleep(Int.MaxValue)

  }


}
