package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo19Stage {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setAppName("Demo19Stage").setMaster("local")
    val sc = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("spark/data/stage")

    //getNumPartitions  获取RDD分区数
    println("linesRDD分区数：" + linesRDD.getNumPartitions)

    val wordsRDD = linesRDD.flatMap(line => line.split(","))

    val kvRDD = wordsRDD.map(word => (word, 1))


    /**
      * reduceByKey 会产生shuffle  所以kvRDD和countRDD之间是宽依赖，前后就被分成了两个stage
      *
      */
    val countRDD = kvRDD.reduceByKey(_ + _)


    countRDD.foreach(println)

    while (true) {

    }

  }

}
