package org.shj.spark.core

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToOrderedRDDFunctions
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions


object WordCount{
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf
    conf.setAppName("WordCountShj").setMaster("local")
    val sc = new SparkContext(conf)
    //sc.setLogLevel("DEBUG")
    
    /**
     * path 可以是目录，也可以包含通配符. 如：  "/my/directory", "/my/directory/\*.txt"
     * 默认情况下，Spark为每个block 创建一个 partition。 在HDFS中，假如一个文件有132M，因为每个 block 默认大小是 128M，
     * 所以在此情况下，Spark将创建 2个 partition
     */
    val lines = sc.textFile("D:\\workspace\\scala\\sparkscala\\sparkscala\\file\\wordcnt.txt")
	  //val lines = sc.textFile("/usr/shj")
    
    val words = lines.flatMap(line => line.split("\\s+"))
    val pairRdd = words.map(word => (word, 1))
    val red = pairRdd.reduceByKey(_+_)
    val pair = red.map(pair => (pair._2, pair._1))
    val sort = pair.sortByKey(false)
    val map = sort.map(pair => (pair._2, pair._1))
    val take = map.take(5)
    take.foreach(println)
    
    sc.stop()
    println("======= after ===== stop()")
  }
}