package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code26Partition2 extends App {


  private val conf: SparkConf = new SparkConf()
  conf.setAppName("WordCount")
  conf.setMaster("local")
  conf.set("spark.default.parallelism","3") // 设置Spark的默认Shuffle并行度为 3

  private val sc = new SparkContext(conf)
  private val wordFileData: RDD[String] = sc.textFile("spark_code/data/word")
  println("wordFileDataRDDPartition:" + wordFileData.getNumPartitions)


  private val flatMapWord: RDD[String] = wordFileData
    //    .foreach(x => println("line:" + x))
    .flatMap(
      (line: String) => {

        line.split(",")
      }

    )
  println("flatMapWordRDDPartition:" + flatMapWord.getNumPartitions)


  private val firstMap: RDD[String] = flatMapWord.map {
    //  [java,spark,java,hadoop] => 每个单词都会触发一次map计算逻辑
    case word => {
      println("first map running...")
      word
    }
  }


  private val groupByWord: RDD[(String, Iterable[String])] = firstMap.groupBy(x => x)

  println("groupByWordRDDPartition:" + groupByWord.getNumPartitions)


  private val mapRes: RDD[(String, Int)] = groupByWord.map {
    case (word, wordLine) => {
      (word, wordLine.size)
    }
  }

  println("mapResRDDPartition:" + mapRes.getNumPartitions)

  // Job是由特定函数去触发的
  mapRes.foreach(println) // 通过两个foreach函数分别触发了两个job任务
  mapRes.foreach(println)

  while (true) {

  }


}
