package com.zha

import org.apache.spark.{SparkConf, SparkContext}
//scala版
//spark wordcount在linux运行

object wordcounts {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("wordcounts")
      .set("spark.testing.memory", "2147480000")//这一句是保证其内存足够
    val context = new SparkContext(conf)
    val unit = context.textFile("/a.txt")//这个路径默认是hdfs的路径，所以需要启动HDFS集群
    //匿名内部类 =>
    val mapadd = unit.flatMap(line => line.split(","))
    val pair = mapadd.map(word => (word,1))
    val wordcount = pair.reduceByKey(_+_)
     wordcount.foreach(wordco => println(wordco._1+","+wordco._2))
    context.stop()
  }

}
