import org.apache.spark.{SparkContext, SparkConf}

/**
  * Created by laiweiwei on 15/11/19.
  */
object ViviWordCount {

  def main (args: Array[String]): Unit = {
    var master = "local"; // 使用本地单线程模式
//    master = "spark://localhost:7077"
//    master = "spark://10.17.1.30:7077"
//    master = "yarn-standalone"
    val conf = new SparkConf()
        .setAppName("Vivi's WordCount")
        .setMaster(master)
    val ctx = new SparkContext(conf)

    // 从HDFS获取文件
    var hdfsBase = "hdfs://10.17.1.30:9000";
    val textFile = ctx.textFile(hdfsBase + "/vivi/java-vs-scala.txt")
//    val textFile = ctx.textFile("file:///Users/laiweiwei/git/icar-app/doc/api.txt")
//    val textFile = ctx.parallelize(Array("hello world", "it is time to have rest", "today is a good day"))
    val wc = textFile
      .flatMap(line => line.toString.split("\\s+"))
      .map(word => (word -> 1))
      .reduceByKey((v1, v2) => v1 + v2)
      .collect()

    wc.toList
      .sortBy( _._2 )//对元组里的第二个元素(即count)进行排序(升序)
      .reverse//我想要降序
      .foreach(println)

    ctx.stop()
  }

}
