package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/*
  RDD: 弹性的分布式数据集
 */
object Demo1WordCount {
  def main(args: Array[String]): Unit = {
    //1、创建Spark环境
    //1.1 创建配置文件对象
    val conf: SparkConf = new SparkConf()

    //1.2 指定运行的模式（local  Standalone  Mesos  YARN）
    conf.setMaster("local") //可以执行所运行需要核数资源local[2]，不指定的话默认使用所有的资源执行程序

    //1.3 给spark作业起一个名字
    conf.setAppName("wc")

    //2、创建spark运行时的上下文对象
    val sparkContext: SparkContext = new SparkContext(conf)

    //3、读取文件数据
    val wordsLine: RDD[String] = sparkContext.textFile("spark/data/words.txt")

    //4、每一行根据|分隔符进行切分
    val words: RDD[String] = wordsLine.flatMap(_.split("\\|"))

    val wordsTuple2: RDD[(String, Int)] = words.map((_, 1))

    val wordsTuple2Group: RDD[(String, Iterable[(String, Int)])] = wordsTuple2.groupBy(_._1)

    val wordCount: RDD[(String, Int)] = wordsTuple2Group.map((kv: (String, Iterable[(String, Int)])) => (kv._1, kv._2.size))

    wordCount.saveAsTextFile("spark/data/word_count")

  }
}
