package wc

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}


object WordCount {
  def main(args: Array[String]): Unit = {
    //创建sparkConfig对象
    val sparkConf = new SparkConf().setAppName("WordCount")

    //创建sparkContext对象
    val sc: SparkContext = new SparkContext(sparkConf)
    //读取文件
//    val dataLine = sc.textFile("file:///F:\\text.txt")
    //将数据切割并组成一个数组集合 _代表读取的一行数据
val dataLine = sc.textFile(args(0))
    val words = dataLine.flatMap(_.split(" "))
    //将单词进行标记 形成元组数据 （key，）value
    val wordAnd1: RDD[(String, Int)] = words.map(x=>(x,1))
    //出现的单词累计
    val wc: RDD[(String, Int)] = wordAnd1.reduceByKey(_+_)
    //对统计的结果进行排序ArrayBuffer((love,3), (too,1), (i,3), (u,3)) 按元组的的第二个元素进行排序
     val sortData: RDD[(String, Int)] = wc.sortBy(_._2)
    //打印结果ArrayBuffer((too,1), (love,3), (i,3), (u,3))
//    val result: Array[(String, Int)] = sortData.collect()
//    println(result.toBuffer)
    //保存数据到hdfs上
    sortData.saveAsTextFile(args(1))
    //释放资源
    sc.stop()
  }

}
