package com.doit.day05

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 * spark是一个分布式计算引擎
 * 可以加载HDFS上的数据 , 处理 , 输出结果
 */
object GetStarting {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf
   // conf.setMaster("local[*]")
    val sc = SparkContext.getOrCreate(conf)

    val data: RDD[String] = sc.textFile(args(0))
    val word: RDD[String] = data.flatMap(_.split("\\s+"))

    val wordOne = word.map((_, 1))

    val grouped: RDD[(String, Iterable[(String, Int)])] = wordOne.groupBy(_._1)


    val wordCnt = grouped.mapValues(tp => tp.size)


    wordCnt.saveAsTextFile(args(1))

    Thread.sleep(1000000)
    sc.stop()

  }

}
