package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    /**
      * 编写sparkd代码
      * 1、创建spark环境
      *
      */

    val conf: SparkConf = new SparkConf()

    //设置spark运行方式   local: 本地测试模式
    conf.setMaster("yarn-client")
    conf.setAppName("wc")

    //spark上下文对象   ，  spark入口
    val sc: SparkContext = new SparkContext(conf)


    ///2、读取数据
    //  rdd -->  弹性的分布式数据集
    val linesEDD: RDD[String] = sc.textFile("/data/words")


    //3、将每一行拆开    返回一个新的rdd
    val words: RDD[String] = linesEDD.flatMap(line => line.split(","))


    //将数据转换成kv格式
    val kvRDD: RDD[(String, Int)] = words.map(word => (word, 1))

    /**
      * reduceByKey  通过key对value进行聚合
      *
      * rdd的类型不变
      */

    //统计单词的数量
    val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x, y) => x + y)


    //整理格式
    val resultRDD: RDD[String] = countRDD.map(kv => kv._1 + "," + kv._2)


    //保存数据
    resultRDD.saveAsTextFile("/data/wc")




    //一行搞定

    /* sc.textFile("spark/data/words.txt")
       .flatMap(_.split(","))
       .map((_, 1))
       .reduceByKey(_ + _)
       .map(kv => kv._1 + "," + kv._2)
       .saveAsTextFile("spark/data/wc1")*/


    /**
      * spark-submit --class com.shujia.spark.Demo1WordCount --master yarn-client spark-1.0.jar
      *
      */


  }
}
