import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}


class StreamingWordCount {
  def main(args: Array[String]): Unit = {
    //初始化环境
    val spacoConf = new SparkConf()
      .setMaster("local[6]")
      .setAppName("Test")
    //每一秒划分为一批
    //StreamingContext是入口
    val ssc = new StreamingContext(spacoConf, Seconds(1))

    //socketTextStream这个方法创建一个DStream,监听Socket，当作文本来处理
    //DStream可以理解为一个流式的RDD
    val lines: ReceiverInputDStream[String] = ssc.socketTextStream(
      hostname = "192.168.0.155",
      port = 9999,
      storageLevel = StorageLevel.MEMORY_AND_DISK_SER
    )
    //数据的处理
    //把句子拆分
    val words: DStream[String] = lines.flatMap(item => item.split(" "))
    //转换单词
    val tuples: DStream[(String, Int)] = words.map(item => (item, 1))
    //计算词频
    val counts: DStream[(String, Int)] = tuples.reduceByKey(_ + _)
    //展示和启动
    counts.print()
    //启动流
    ssc.start()
    //main方法执行完毕后会推出，我们需要阻塞她
    ssc.awaitTermination()
  }
}
