package handler

import bean.MajorCategoryMatch
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import util.{MyKafkaUtil, SparkUtil}

object MajorCategoryHandler {
  private val ssc = SparkUtil.takeSSC()

  // 专业类别数据分析方法
  def kafkaMajorCategoryDataHandler(groupId: String, topic: String): DStream[MajorCategoryMatch] = {
    val kfDataDS: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(groupId, topic, ssc)
    val majorData: DStream[MajorCategoryMatch] = kfDataDS.map(kafkaData => {
      val data: String = kafkaData.value()
      val gson = new Gson()
      val jsonObj = gson.fromJson(data, classOf[java.util.Map[String, AnyRef]])
      val resumeId = jsonObj.get("resume_id").asInstanceOf[Double].toInt
      val majorType = jsonObj.get("major_type").asInstanceOf[String]
      val screeningResult = jsonObj.get("screening_result").asInstanceOf[String]
      MajorCategoryMatch(resumeId, majorType, screeningResult)
    })
    majorData
  }

}
