package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo20Submit {
  def main(args: Array[String]): Unit = {

    /**
      * 1、创建spark 环境
      *
      */
    //spark 配置文件对象
    val conf = new SparkConf()
    //任务名
    conf.setAppName("wc")


    //提交到集群运行需要注释
    //conf.setMaster("local")

    //创建spark  上下文对象 ，  spark入口
    val sc = new SparkContext(conf)


    //将读取数据的目录改成hdfs的目录

    val linesRDD: RDD[String] = sc.textFile("/data/words/")

    //2、将单纯切分出来
    val wordsRDD: RDD[String] = linesRDD.flatMap(line => line.split(","))

    //3、安装单词进行分组
    val groupRDD: RDD[(String, Iterable[String])] = wordsRDD.groupBy(word => word)

    //4、统计单词的数量
    val countRDD: RDD[(String, Int)] = groupRDD.map {
      case (word: String, ws: Iterable[String]) =>
        //计算单词的数量
        val count: Int = ws.size
        (word, count)
    }


    //5、整理数据
    val resultRDD: RDD[String] = countRDD.map {
      case (word: String, count: Int) =>
        word + "\t" + count
    }


    //5、保存数据
    resultRDD.saveAsTextFile("/data/wc")


  }
}
