package com.demo

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}


/**
  * sparkStreaming 实时单词计数
  */
object SparkStreaming1 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("v").setMaster("local[1]")
    val scc: StreamingContext = new StreamingContext(conf,Seconds(2))
    //读取数据
    //val lines: DStream[String] = scc.socketTextStream("47.105.147.253",9000)
    val lines=scc.textFileStream("E:\\input")
    /**
      * 进行单词计数
      * */
    val result: DStream[(String, Int)] =lines.flatMap(_.split(",")).map(e=>(e,1)).reduceByKey(_+_)
    result.print()

    scc.start()//启动，开始接收并处理实时流数据
    scc.awaitTermination()//等待程序停止

  }
}

/**
  * spark core 单词计数
  */
object WordCount {
  def main(args: Array[String]): Unit = {
    val Array(inputPath,outputPath)=args
    val conf = new SparkConf().setAppName("WordCount").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd1:RDD[String] = sc.textFile(inputPath)
    val rdd2:RDD[String] = rdd1.flatMap(_.split(" "))
    val rdd3:RDD[(String,Int)] = rdd2.map((_,1))
    val rdd4:RDD[(String,Iterable[(String,Int)])] = rdd3.groupBy(_._1)
    val rdd5:RDD[(String,Int)] = rdd4.mapValues(_.size)
    rdd5.saveAsTextFile(outputPath)
  }
}


