package com.bdqn.spark.chapter02

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

class WordCount{

}
object WordCount {
  def main(args: Array[String]): Unit = {
    // 获取spark程序运行的上下文对象 SparkContext
    val conf: SparkConf = new SparkConf()
    // local yarn spark://hadoop182:7072 mesos
    conf.setMaster("local[*]")
    conf.setAppName("word-count")
    val sc: SparkContext = new SparkContext(conf)
    // println(sc)

    // 就可以对数据进行处理
    //  hello spark
    //  hello scala
    val lineRDD: RDD[String] =
    sc.textFile("data/word.txt")

    // "hello" "spark" "hello" "scala"
    val wordRDD: RDD[String] = lineRDD.flatMap(line => {
      line.split(" ")
    })


    // ("hello",1) ("spark",1) ("hello",1) ("scala",1)
    val wordToOneRDD: RDD[(String, Int)] = wordRDD.map(word => {
      (word, 1)
    })

    // ("hello",[("hello",1) ("hello",1)] )
    // ("spark",[("spark",1)]
    // ("scala",[("scala",1)]
    val groupRDD: RDD[(String, Iterable[(String, Int)])] = wordToOneRDD.groupBy((t2: (String, Int)) => {
      t2._1
    })

    val valueRDD: RDD[(String, (String, Int))] = groupRDD.mapValues(values => {
      var w: String = null
      var sum: Int = 0
      val list = values.toList
      for ((word, cnt) <- list) {
        w = word
        sum += cnt
      }
      (w, sum)
    })

    val resultRDD: RDD[(String, Int)] = valueRDD.map(t => {
      t._2
    })

    // 把最终的结果，输出到控制台
    resultRDD.collect().foreach(println)




    // 关闭资源
    sc.stop()
  }
}
