package com.hngy.scala

import org.apache.spark.{SparkConf,SparkContext}


object WordCountScala {
  /**
    * 单词计数
    * @param args
    */
  def main(args: Array[String]): Unit ={
    //第一步：创建SparkContext
    val conf = new SparkConf()
    conf.setAppName("WordCountScala") //设置任务名称
      .setMaster("local") //本地运行 打包时要注释掉
    val sc = new  SparkContext(conf)

    //加载数据
    var path = "D:\\b.txt"
    if(args.length==1){
      path = args(0)
    }
    val linesRDD = sc.textFile(path)
    //对数据进行切割，把一行数据切分成一个一个的单词
    val wordsRDD = linesRDD.flatMap(_.split(" "))
    //wordsRDD.foreach(wordCount=>println(wordCount)) //打印测试

    //迭代words,将每个word转化为(word,1)这种形式
    val pairRDD = wordsRDD.map((_,1))
    //根据key(其实就是word)进行分组聚合统计
    val wordCountRDD = pairRDD.reduceByKey(_ + _)
    //将结果打印到控制台
    wordCountRDD.foreach(wordCount=>println(wordCount._1+"--"+wordCount._2))
    //停止SparkContext
    sc.stop()
  }
}
