package cn.doitedu.day01

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object T02_LocalWordCount {

  def main(args: Array[String]): Unit = {

    System.setProperty("HADOOP_USER_NAME", "root")
    //1.创建SparkConf
    val conf = new SparkConf().setAppName("WordCount")
      .setMaster("local[4]") //如果提交到集群中运行，setMaster必须注释掉

    //2.创建SparkContext
    val sc = new SparkContext(conf)

    //3.调用SparkContext的source方法，创建RDD
    val lines: RDD[String] = sc.textFile(args(0))

    //4.调用RDD上的方法（Transformation）
    //切分压平
    val words: RDD[String] = lines.flatMap(line => {
      val strings = line.split(" ")
      strings
    })
    //将单词和1组合
    val wordAndOne: RDD[(String, Int)] = words.map((_, 1))
    //分组聚合
    val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)
    //排序
    val sorted: RDD[(String, Int)] = reduced.sortBy(_._2, false)

    //5.触发Action，将结果写入到HDFS
    sorted.saveAsTextFile(args(1))

    //6.释放资源
    sc.stop()

  }

}
