package com.shujia.spark.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo02WordCntSubmit {
  /**
   * 将Spark代码提交到集群：
   * 1、将setMaster注释掉，在提交命令中指定，方便调试
   * 2、注意输入输出路径，必须是HDFS存在的某个路径
   * 3、使用maven将代码打成jar包并上传
   * 4、构建提交的命令：
   * spark-submit --master yarn --deploy-mode client --class com.shujia.spark.core.Demo02WordCntSubmit spark-1.0.jar
   */
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    //    conf.setMaster("local")
    conf.setAppName("Demo01WordCnt")
    val sc: SparkContext = new SparkContext(conf)
    val lineRDD: RDD[String] = sc.textFile("/data/spark/words")
    val wordsRDD: RDD[String] = lineRDD.flatMap(_.split(","))
    val grpRDD: RDD[(String, Iterable[String])] = wordsRDD.groupBy(word => word)
    val wordCntRDD: RDD[String] = grpRDD.map(kv => s"${kv._1},${kv._2.size}")

    /**
     * 可以使用HDFS的Java API提前将目录删除避免出错
     */
    val fs: FileSystem = FileSystem.get(new Configuration())
    if (fs.exists(new Path("/data/spark/wcnt"))) {
      fs.delete(new Path("/data/spark/wcnt"), true)
    }


    wordCntRDD.saveAsTextFile("/data/spark/wcnt")

  }

}
