package com.gin.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object WordCountScala {

  def main(args: Array[String]): Unit = {
    //通用配置
    //spark 配置类
    val conf = new SparkConf()
    //任务名称(必须)
    conf.setAppName("wordCountScala")
    //local: 单机本地运行
    conf.setMaster("local")

    //spark上下文对象,加载配置
    val sc = new SparkContext(conf)


    //具体数据处理应用程序
    //单词统计
    //默认按照换行符进行每行切割
    val fileRDD: RDD[String] = sc.textFile("scala/data/wordCount.txt")
    //数据扁平化处理, 按空格对每行数据进行切割(hello world)
    val words: RDD[String] = fileRDD.flatMap((x: String) => {
      x.split(" ")
    })
    //数据映射, 单个元素映射为利于统计的二元组
    // hello -> (hello,1)
    // world -> (world,1)
    val pairWord: RDD[(String, Int)] = words.map((x: String) => {
      new Tuple2(x, 1)
    })
    //利用二元组进行统计(x:oldValue  y:currentValue)
    val res: RDD[(String, Int)] = pairWord.reduceByKey((x: Int, y: Int) => {
      x + y
    })
    //必须要有输入时,RDD才会真正执行
    res.foreach(println)

    //简写, 用下划线代替参数
    //fileRDD.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).foreach(println)

  }

}
