package com.example.bigdata.spark.Kafka

import java.time.Duration
import java.util.Properties

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecords, KafkaConsumer}
import org.apache.kafka.common.serialization.StringDeserializer

object DcKafkaTest {
    def main(args: Array[String]): Unit = {
        val TOPIC_NAME = "filebeat_hdfs_audit_jd"
        dcConsumer(TOPIC_NAME)
    }
    def consumerConfig(): Properties ={
        var props = new Properties
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.8.128.106:9092")
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "jd_hdfsaudit_group")
        props.setProperty("enable.auto.commit", "false")
        props.setProperty("auto.commit.interval.ms", "1000")
        // 序列化、反序列化 标准写法
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer].getName)
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer].getName)
        return props
    }

    def dcConsumer(topic_name:String): Unit ={
        val TOPIC_NAME = "filebeat_hdfs_audit_jd"
        val props: Properties = consumerConfig()
        val consumer: KafkaConsumer[String, String] = new KafkaConsumer[String,String](props)
        consumer.subscribe(java.util.Arrays.asList(topic_name))
        import scala.collection.JavaConversions._
        while (true) {
            val records: ConsumerRecords[String, String] = consumer.poll(Duration.ofMillis(1000))
            for (record  <- records){
                println(s"topic = ${record.topic()} , patition = ${record.partition} , offset = ${record.offset}, value = ${record.value} , timestamp = ${record.timestamp()}")
            }
        }
    }

}
