package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo15Submit {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()

    //提交到服务器运行需要注释掉
    //conf.setMaster("local")

    conf.setAppName("submit")

    val sc = new SparkContext(conf)


    //1、读取hdfs中的文件, 写hdfs的路径
    val linesRDD: RDD[String] = sc.textFile("/data/words")

    //2、展开
    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    //3、转换成kv格式
    val kvRDD: RDD[(String, Int)] = wordsRDD.map((word: String) => (word, 1))

    //4、统计单词的数量
    val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey(_ + _)

    //5、整理数据
    val resultRDD: RDD[String] = countRDD.map {
      case (word: String, count: Int) => s"$word\t$count"
    }

    //6、保存数据到hdfs
    resultRDD.saveAsTextFile("/data/wordcount")

    /**
     * 提交到集群运行
     * spark-submit --class com.shujia.spark.core.Demo15Submit --master yarn-client spark-1.0-SNAPSHOT.jar
     *
     */
  }

}
