package com.apache.spark
//一切任务的起源，所以计算的开头
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
//sprak的配置信息，相当与
import org.apache.spark.SparkConf
/**
  * Created by zhuge on 2017/8/29.
  *
  */

object WordCount {
  /** com.apache.spark.WordCount
    * 线下运行
    * @param args
    */
  def main2(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\bigdata\\hadoop-2.4.1\\")
    val conf = new SparkConf().setAppName("workcount").setMaster("local[1]")
    val sc = new SparkContext(conf)
    val file:RDD[String] = sc.textFile("hdfs://hadoop-server-00:9000/wordcount.txt")
    
    val words = file.flatMap(_.split(" "))
    val keyListTuple = words.map((_,1)).reduceByKey(_+_)
    val sortList = keyListTuple.sortBy(_._2,false)
    sortList.foreach(println)
    sortList.saveAsTextFile("D:\\bigdata\\out004")

    //参数设置-Xmx512M -DHADOOP_USER_NAME=root
    //sortList.saveAsTextFile("hdfs://hadoop-server-00:9000/out001")
  }

  /**
    * 线上运行
    * @param args
    */
  def main(args: Array[String]): Unit = {
    //System.setProperty("hadoop.home.dir", "D:\\bigdata\\hadoop-2.4.1\\")
    val conf = new SparkConf().setAppName("workcount")
    val sc = new SparkContext(conf)
    val file:RDD[String] = sc.textFile(args(0))

    val words = file.flatMap(_.split(" "))
    val keyListTuple = words.map((_,1)).reduceByKey(_+_)
    val sortList = keyListTuple.sortBy(_._2,false)
    sortList.foreach(println)
    sortList.saveAsTextFile(args(1))

    //参数设置-Xmx512M -DHADOOP_USER_NAME=root
    //sortList.saveAsTextFile("hdfs://hadoop-server-00:9000/out001")
  }

}
