package com.atbeijing.bigdata.spark.mytest.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCount1 {
  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("wordCount1")
    val context = new SparkContext(sparkConf)

    //一行一行的数据 hello word spark,hello scala spark
    //   data/word.txt
    val lines: RDD[String] = context.textFile("/opt/module/spark-local/data/word.txt")

    //数据拆分成单词hello,word,spark,hello,scala,spark
    val word: RDD[String] = lines.flatMap(_.split(" "))

    //给数据加个1  (hello 1),(word 1),(spark 1),(hello 1),(scala 1),(spark 1)
    val wordCount: RDD[(String, Int)] = word.map((_,1))

    //根据tuple的key分组,分组后组内的value两两聚合做+运算,最后返回(key,组内value的聚合结果)
    val wordCount1: RDD[(String, Int)] = wordCount.reduceByKey(_ + _)

    wordCount1.collect().foreach(println)

    context.stop()

  }
}
