package org.cancer.handler

import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.cancer.bean.HighRiskIdentificationData
import org.cancer.util.{MyKafkaUtil, SparkUtil}

object DataHandler {
  private val ssc = SparkUtil.takeSSC()

  def KafkaDataHandler(groupId: String, topic: String): DStream[HighRiskIdentificationData] = {
    println(s"正在从Kafka主题 $topic 读取数据...")
    val kafkaStream = MyKafkaUtil.getKafkaStream(groupId, topic, ssc)

    // 解析Kafka数据
    kafkaStream.map(record => {
      val data = record.value()
      println(s"收到Kafka数据: $data")
      val fields = data.split("\t")
      val tsAndInstallment = if (fields.length > 0) fields(0).split(" ") else Array("")
      val ts = if (tsAndInstallment.length > 0) tsAndInstallment(0) else ""
      val Installment = if (tsAndInstallment.length > 1) tsAndInstallment(1) else ""
      val transfer = if (fields.length > 1) fields(1) else ""
      val Complications = if (fields.length > 2) fields(2) else ""
      HighRiskIdentificationData(
        ts = ts,
        Installment = Installment,
        transfer = transfer,
        Complications = Complications
      )
    })
  }

  def startAndAwait(): Unit = {
    println("启动流式处理引擎...")
    ssc.start()
    ssc.awaitTermination()
  }
}
