package com.offcn.bigdata.spark.streaming.p2

import org.apache.spark.SparkConf
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  *  SparkStreaming的入门案例之：
  *     通过socket获取数据，进行业务处理
  */
object _05DriverHAApp {
    def main(args: Array[String]): Unit = {
        if(args == null || args.length != 4) {
            println("Usage: <batchInterval> <host> <port> <checkpoint>")
            System.exit(-1)
        }
        val Array(batchInterval, host, port, checkpoint) = args
        val conf = new SparkConf()
            .setAppName("StreamingSocket")
        val batchDuration = Seconds(batchInterval.toLong)
        //意为每隔2s获取外部摄入的一批数据，
        def createFunc(): StreamingContext = {
            val ssc = new StreamingContext(conf, batchDuration)
            ssc.checkpoint(checkpoint)
            val lines: ReceiverInputDStream[String] = ssc.
                socketTextStream(host, port.toInt, StorageLevels.MEMORY_AND_DISK_SER)
            val pet = lines.flatMap(_.split("\\s+")).map((_, 1)).reduceByKey(_+_)

            pet.print()

            ssc
        }
        val ssc = StreamingContext.getOrCreate(checkpoint, createFunc)

        ssc.start()//开启流式计算
        ssc.awaitTermination()//持续不断的执行计算
    }
}
