package Spark原理.逻辑图_RDD计算链

import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class WordCounts {

  @Test
  def test: Unit ={

    // 1.创建sc
    // // 2.读取数据
    // 3.处理数据
    //    3.1 拆分
    //    3.2 赋予词频
    //    3.3 词频统计
    //    3.4 字符串转换
    // 4.获取结果
    val conf = new SparkConf().setMaster("local[6]").setAppName("wordCounts")
    val sc   = new SparkContext(conf)

    val data = sc.textFile("dataset/wordcount.txt")
    val splitRDD = data.flatMap( _.split(" ") )
    val mapRDD = splitRDD.map( (_,1) )
    val reduceRDD = mapRDD.reduceByKey( _+_ )
    val strRDD = reduceRDD.map(item => s"${item._1},${item._2}" )

    strRDD.collect().foreach(println(_))
    //println(strRDD.toDebugString)

    /**
     * (2) MapPartitionsRDD[5] at map at WordCounts.scala:26 []
     * |  ShuffledRDD[4] at reduceByKey at WordCounts.scala:24 []
     * +-(2) MapPartitionsRDD[3] at map at WordCounts.scala:22 []
     * |  MapPartitionsRDD[2] at flatMap at WordCounts.scala:20 []
     * |  dataset/wordcount.txt MapPartitionsRDD[1] at textFile at WordCounts.scala:16 []
     * |  dataset/wordcount.txt HadoopRDD[0] at textFile at WordCounts.scala:16 []
     */

    // 5.关闭sc，释放资源
    sc.stop()
  }

}
