package com.atguigu.gmall.realtime.utils

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

import java.util.Properties
import scala.collection.mutable

/**
 * @author caodan
 * @date 2022-08-23 14:25
 * @version 1.0
 */

/**
 * kafka 工具类 用于生产和消费数据
 */
object KafkaUtil {


  val ConsumerParams: mutable.Map[String, String] = mutable.Map[String, String](
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop102:9092,hadoop103:9092,hadoop104:9092", // kafka 集群地址
    ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer", // kafka Key 的反序列化
    ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer", // kafka value 的反序列化
    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "latest",
    ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "true" // 偏移量自动提交
  )

  /**
   * 获取kafka消费者
   *
   * @param topic 主题
   * @param scc   对象
   * @param group 消费者组
   * @return
   */
  def getKafkaDstream(topic: String, scc: StreamingContext, group: String): DStream[ConsumerRecord[String, String]] = {
    ConsumerParams.put(ConsumerConfig.GROUP_ID_CONFIG, group)
    val kafkaConsumer: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(scc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](Array(topic), ConsumerParams))
    kafkaConsumer
  }

  /**
   * 获取kafka消费者 指定偏移量消费
   *
   * @param topic 主题
   * @param scc   对象
   * @param group 消费者组
   * @return
   */
  def getKafkaDstream(topic: String, scc: StreamingContext, group: String, paratition: Map[TopicPartition, Long]): DStream[ConsumerRecord[String, String]] = {
    ConsumerParams.put(ConsumerConfig.GROUP_ID_CONFIG, group)
    val kafkaConsumer: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(scc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](Array(topic), ConsumerParams,paratition))
    kafkaConsumer
  }

  /**
   * 创建Kafka生产者对象
   */
  def createKafkaProducer(): KafkaProducer[String, String] = {
    //Kafka生产配置
    val props = new Properties()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop102:9092,hadoop103:9092,hadoop104:9092")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")

    val producer: KafkaProducer[String, String] = new KafkaProducer[String, String](props)
    producer
  }

  private var producer: KafkaProducer[String, String] = createKafkaProducer()

  /**
   * 生产
   */
  def send(topic: String, msg: String): Unit = {
    producer.send(new ProducerRecord[String, String](topic, msg))
  }

  /**
   * 刷新kafka 缓冲区
   */
  def flush(): Unit ={
    producer.flush()
  }
}
