package com.shujia.spark.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo20Submit {
  def main(args: Array[String]): Unit = {
    /**
      * 创建spark环境
      *
      */
    //spark配置文件对象
    val conf: SparkConf = new SparkConf()
    //任务名
    conf.setAppName("wc")
    //在集群中运行需要注释掉
    //conf.setMaster("local")
    //创建spark 上下文对象，spark入口
    val sc = new SparkContext(conf)

    /**
      * RDD:弹性的分布式数据集
      */
    //读取数据
    val linesRDD: RDD[String] = sc.textFile("/data/words")
    linesRDD.foreach(println)

    //将单词切分出来
    val wordsRDD: RDD[String] = linesRDD.flatMap(line => line.split(","))

    //按照单词分组
    val groupRDD: RDD[(String, Iterable[String])] = wordsRDD.groupBy(word => word)

    //统计单词的数量
    val countRDD: RDD[(String, Int)] = groupRDD.map{
      case(word:String,ws:Iterable[String]) =>
        //计算单词的数量
        val count: Int = ws.size
        (word,count)
    }
    //整理数据
    val resultRDD: RDD[String] = countRDD.map{
      case(word:String,count:Int) =>
        word+"\t"+count
    }
    //保存数据
    resultRDD.saveAsTextFile("data/wc")
  }
}
