package org.niit.handler

import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.niit.bean.AdClickData
import org.niit.util.{MyKafkaUtil, SparkUtil}

object DataHandler {

  private val ssc = SparkUtil.takeSSC()
  //处理Kafka的数据       将组名和主题传递进来

  // 处理douYin的Kafka数据
  def KafkaDataHandler(groupId:String,topic:String): DStream[AdClickData] ={
    val kfDataDS: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(groupId,topic, ssc)
    val adClickData: DStream[AdClickData] = kfDataDS.map(kafkaData => {
      val data = kafkaData.value()
      val datas = data.split(",")
      AdClickData(datas(0), datas(1), datas(2), datas(3), datas(4), datas(5), datas(6), datas(7), datas(8),
        datas(9), datas(10), datas(11), datas(12), datas(13), datas(14)
      )  // 将从Kafka拿到的数据 变成 样例类对象****
    })
    adClickData
  }




  //让ssc开启采集和等待采集（阻塞主线程）
  def startAndAwait(): Unit ={
    ssc.start()
    ssc.awaitTermination()
  }

}
