package cn.whuc.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo_Persist1 {
  def main(args: Array[String]): Unit = {
    // 1 创建sparkContext
    val sc: SparkContext = new SparkContext(
      new SparkConf()
        .setMaster("local[*]")
        .setAppName(" ")
    )

    // 2 编写代码
    val datas: RDD[String] = sc.textFile("input/word.txt")

    val words: RDD[String] = datas.flatMap(line => {
      line.split(" ")
    })

    val wordToOne: RDD[(String, Int)] = words.map(word => {
      println("##############")
      (word, 1)
    })

    val result1: RDD[(String, Int)] = wordToOne.reduceByKey(_ + _)
    result1.collect().foreach(println)

    println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")

    val result2: RDD[(String, Iterable[Int])] = wordToOne.groupByKey()
    result2.collect().foreach(println)


    // 3 关闭上下文对象
    sc.stop()
  }

}
