// import org.apache.spark._
// import org.apache.spark.SparkContext._

// object WordCount {
//     def main(args: Array[String]) {
//       val inputFile = args(0)
//       val outputFile = args(1)
//       val conf = new SparkConf().setAppName("wordCount")
//       // Create a Scala Spark Context.
//       val sc = new SparkContext(conf)
//       // Load our input data.
//       val input =  sc.textFile(inputFile)
//       // Split up into words.
//       val words = input.flatMap(line => line.split(" "))
//       // Transform into word and count.
//       val counts = words.map(word => (word, 1)).reduceByKey{case (x, y) => x + y}
//       // Save the word count back out to a text file, causing evaluation.
//       counts.saveAsTextFile(outputFile)
//     }
// }


import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}


object StreamingWordCount {
  def main(args: Array[String]) {
  
    //程序在运行时receiver会独占一个线程,所以streaming程序至少要两个线程,防止starvation scenario
    val conf: SparkConf = new SparkConf().setMaster("local[2]").setAppName("StreamingWordCount")

//所有流功能的主要入口
    val ssc: StreamingContext = new StreamingContext(conf , Seconds(5))

//指定从TCP源数据流的离散流,接收到的每一行数据都是一行文本
    val stream: ReceiverInputDStream[String] =  ssc.socketTextStream("localhost",9999)

//将接收到的文本压平,转换,聚合
    val dStream: DStream[(String, Int)] =  stream.flatMap(_.split(" ")).map((_,1)).reduceByKey(_ + _)
    
dStream.print()

// Spark Streaming 只有建立在启动时才会执行计算，在它已经开始之后，并没有真正地处理。
//---------------------------------------
//启动计算
    ssc.start()
//等待计算终止
    ssc.awaitTermination()
    //true    会把内部的sparkcontext同时停止
    //false  只会停止streamingcontext  不会停sparkcontext
    ssc.stop(true)
  }

}
