package org.cancer.handler

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.cancer.bean.CancersData
import org.cancer.util.{MyKafkaUtil, SparkUtil}

object DataHandler_Laurel {


  val ssc: StreamingContext = SparkUtil.takeSSC()

  //处理Kafka的数据       将组名和主题传递进来
  def KafkaDataHandler(groupId: String, topic: String): DStream[CancersData] = {
    val kfDataDS: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(groupId, topic, ssc)
    /*
    val cancersData: DStream[CancersData] = kfDataDS.map(kafkaData => {
      val data = kafkaData.value()
      val datas = data.split(" ")
      CancersData(datas(0), datas(1), datas(2))
    })
     */
    val cancersData: DStream[CancersData] = kfDataDS.flatMap { kafkaData =>
      val data = kafkaData.value()
      val datas = data.split("\\s+")
      // 只处理长度为3的正常数据，避免脏数据报错
      if (datas.length == 3) Some(CancersData(datas(0), datas(1), datas(2)))
      else None

    }
    cancersData.foreachRDD(_.foreach(println))
    cancersData
  }

  //让ssc开启采集和等待采集（阻塞主线程）
  def startAndAwait(): Unit = {
    ssc.start()
    ssc.awaitTermination()
  }
}
