package org.nerve.data.mining.spark

import org.apache.spark.SparkContext

/**
  * Created by zengxm on 2016/6/30.
  */
class FileWordCountModeling(sparkContext: SparkContext) extends CommonModeling(sparkContext){

  /**
    * 对指定文件进行统计
    * @param filePath
    */
  def compute(filePath:String)={
    val textFile=sparkContext.textFile(filePath).cache()

    // Create the FileInputDStream on the directory and use the
    val words = textFile.flatMap(_.split(" "))
    val wordCounts=words.map(x=>(x,1)).reduceByKey(_+_)

    wordCounts.foreach(d=>println(d._1+"\t\t"+d._2))

    println("total line on %s is %d".format(filePath, textFile.count()))
  }
}