package com.atguigu.bigdata.streaming

import java.io.{BufferedReader, InputStream, InputStreamReader}
import java.net.Socket

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.{Seconds, StreamingContext}

//自定义采集器
object SparkStreaming03_MyReceiver {
//  使用SparkStreaming 完成WordCount
  def main(args: Array[String]): Unit = {
//    spark配置对象
     val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming01_WordCount")
//   实时数据分析环境对象
//   采集周期，以指定的时间为周期采集实时数据
     val streamingContext = new StreamingContext(sparkConf,Seconds(3))
//   从指定文件夹中读取采集数据
     val receiverDStream = streamingContext.receiverStream(new MyReceiver("10.21.13.181",9999))
//   将采集的数据进行分解（扁平化）
     val wordDStream = receiverDStream.flatMap(line=>line.split(" "))
//   将数据进行结构的转换方便统计
     val mapDStream = wordDStream.map((_,1))
//
     val wordToSumDStream = mapDStream.reduceByKey(_+_)

    wordToSumDStream.print()
//    不能停止采集
//    streamingContext.stop()
//   启动采集器
    streamingContext.start()
//    Driver等待采集器的执行
    streamingContext.awaitTermination()
  }
}
//声明采集器
class MyReceiver(host:String,port:Int) extends  Receiver[String](StorageLevel.MEMORY_ONLY){
  var socket: Socket = null
  def receive():Unit={
    socket=new Socket(host,port)
    val reader = new BufferedReader(new InputStreamReader(socket.getInputStream,"UTF-8"))
    var line:String = null
    while((line = reader.readLine()) != null){
//      将采集到的数据存储到采集器的内部进行转换
      if("END".equals(line)){
        return
      }else{
        this.store(line)
      }

    }
  }
  override def onStart(): Unit = {
       new Thread(
    new Runnable {
      override def run(): Unit = {
        receive()
      }
    }
    ).start()
  }

  override def onStop(): Unit = {
    if(socket !=null){
      socket.close()
      socket=null
    }
  }
}
