package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}

object WordCount {
  def main(args: Array[String]): Unit = {

    //1、创建spark配置文件对象
    val conf = new SparkConf()
    // 运行方式
    conf.setMaster("local") //本地运行
    conf.setAppName("WordCount") //程序名

    //2、创建spark上下文对象
    val sc = new SparkContext(conf)

    //3、读取数据  hdfs路径或者本地路径
    //返回一个rdd  弹性分布式数据集
    val linesRDD = sc.textFile("data/word.txt")

    val wordRDD = linesRDD.flatMap(line => line.split(","))

    val kvRDD = wordRDD.map(word => (word, 1))

    val countRDD = kvRDD
      .groupByKey() //通过单词进行分组
      .map(kv => {
      val word = kv._1
      val count = kv._2.sum //组内求和

      //最后一行作为返回值
      word + "\t" + count
    })

    //打印结果
    countRDD.foreach(println)

    kvRDD.reduceByKey((x, y) => x + y)
    kvRDD.reduceByKey(_ + _)

  }

}
