package com.offcn.bigdata.spark.streaming.p1

import org.apache.spark.SparkConf
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  *  SparkStreaming的入门案例之：
  *     通过socket获取数据，进行业务处理
  */
object _01StreamingSocketApp {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[2]")//如果是local模式，并且有receiver接收外部数据，那么至少要分配2个工作线程
            .setAppName("StreamingSocket")
        val batchDuration = Seconds(2)
        //意为每隔2s获取外部摄入的一批数据，
        val ssc = new StreamingContext(conf, batchDuration)

        val lines: ReceiverInputDStream[String] = ssc.socketTextStream("bigdata01", 9999, StorageLevels.MEMORY_AND_DISK_SER_2)
        val pet = lines.flatMap(_.split("\\s+")).map((_, 1)).reduceByKey(_+_)

        pet.print()

        ssc.start()//开启流式计算
        /*
            IllegalStateException:
            Adding new inputs, transformations, and output operations after starting a context is not supported
            pet.count()
        */
        ssc.awaitTermination()//持续不断的执行计算
    }
}
