package com.fwmagic.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  *  bin/spark-submit --master spark://hd1:7077 --executor-memory 512m --total-executor-cores 2 --class com.fwmagic.spark.core.WordCount /home/hadoop/apps/spark/spark-2.2.2/examples/myjars/fwmagic-spark-1.0.jar hdfs://hd1:9000/tmp/word.txt hdfs://hd1:9000/tmp/word_counts
  */
object WordCount {
    def main(args: Array[String]): Unit = {

        //往HDFS写入数据，将程序所属的用户设置成跟hdfs一样的用户
        System.setProperty("HADOOP_USER_NAME","hadoop")
        val conf: SparkConf = new SparkConf()
                .setAppName(this.getClass.getSimpleName)
                //代表本地运行模式，并开启多个线程(* 表线程数)，线程数=本机核数 * 2
                .setMaster("local[*]")

        val sc = new SparkContext(conf)

        val lines: RDD[String] = sc.textFile(args(0))

        val flated: RDD[String] = lines.flatMap(_.split("\\s"))

        val wordAndOne: RDD[(String, Int)] = flated.map((_, 1))

        val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_ + _)

        val sorted: RDD[(String, Int)] = reduced.sortBy(_._2, false)

        sorted.saveAsTextFile(args(1))

        sc.stop()


    }

}
