package might


import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
/**
  * Created by hadoop on 21-5-14.
  */
object wordCountScala {
  def main(args:Array[String]): Unit ={

    val conf=new SparkConf()
      .setMaster("local")
      .setAppName("wordCountScala")

    val sc=new SparkContext(conf)

    val textFile=sc.textFile("hdfs://localhost:9000/user/hadoop/localReceive/week2/week2Input.txt")
    val wordCount=textFile.flatMap(line=>line.split(" "))
      .map((_,1))
      .reduceByKey((a,b)=>a+b)
      .collect()
      .foreach(println)
  }

}
