package spark

class wordcount {
  /*
  val file = spark.sparkContext.textFile("file:///root/hadoop/data/wc.txt")     file://   指定本地文件

  val wordCounts = file.flatMap(line => line.split(",")).map((word => (word, 1))).reduceByKey(_ + _)

  wordCounts.collect
  */
}
