package com.itcast.spark.flume

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.flume.{FlumeUtils, SparkFlumeEvent}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * DESC:
 */
object SparkStreamingFlumePush {
  def main(args: Array[String]): Unit = {
    //1-环境准备
    val conf: SparkConf = new SparkConf().setAppName("SparkStreamingFlumePush").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    //这里就是指定配置项将数据按照5秒为周期进行处理
    val ssc = new StreamingContext(sc, Seconds(5))
    //如何去flulme使用poll方式将数据发到了node01:8888端口上
    val flumedata: ReceiverInputDStream[SparkFlumeEvent] = FlumeUtils.createStream(ssc, "192.168.137.60", 8888)
    //flumedata的数据不能直接读取，需要将flume的数据封装了event的事件
    //需要获取event的boby消息的猪蹄，将字节数组类型转化为string进一步读取
    val result: DStream[String] = flumedata.map(x => new String(x.event.getBody.array()))
    //3-接下来对该数据进行简单的wordcount的统计
    val flatMapDS: DStream[String] = result.flatMap(_.split(" "))
    val mapDS: DStream[(String, Int)] = flatMapDS.map(x => (x, 1))
    val reduceDS: DStream[(String, Int)] = mapDS.reduceByKey((x: Int, y: Int) => x + y)
    //4-如果想对当前的结果进行排序的化，请问如何实现-transform(func)
    //5-执行结果的输出
    reduceDS.print()
    //6-开启程序执行---Start the execution of the streams.开启streams的执行
    ssc.start()
    //7-等到程序停止---等待执行器的停止，用户或程序触发任何异常都会引起当前程序停止
    ssc.awaitTermination()
    //8-停止streamingcontext
    //第一个参数就是停止saprkcontext
    //第二个参数就是优雅停止，等到所有的接收器接受的数据处理完毕之后再停止
    ssc.stop(true, true)
  }
}
