package helloo


import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.hadoop.io.Text
import org.apache.spark.SparkContext

object wordcount {
  
  def main(args: Array[String]): Unit = {
    //编写一个简单的spark单词计数程序
 //one.实例化SparkConf对象:
    val conf = new SparkConf()
 //设置应用程序名 设置运行模式
 //conf.setMaster("local") 
  //-不设置默认集群模式
    conf.setAppName("WordCount")
 //two.实例化SparkContext对象
    val sc = new SparkContext(conf)
    
 //three.定义本地需统计单词个数的文本文件
 // val file = "file:///e:/words.txt"  
    val in_path="hdfs://namenode:8020/user/words.txt";
 //four.通过spark上下文加载本地文件从而获取到一个RDD类型的变量
    //val textFile = sc.textFile(file)
    val textFile = sc.textFile(in_path)
    
//fifve.通过各种的组合函数完成文本文件中单词的计数
    val wordcount = textFile.flatMap(line=>line.split(" ")).map(word=>(word,1)).reduceByKey((a,b)=>a+b)
    wordcount.foreach(println)
    wordcount.saveAsTextFile("hdfs://namenode:8020/user/wordcount01")
   /* wordcount.saveAsHadoopFile("hdfs://namenode:8020/user/wordcount", classOf[Text],
        classOf[IntWritable], classOf[TextOutputFormat[Text, IntWritable]])*/

  }
}