package com.atguigu.gmall.realtime.utils

import java.util
import java.util.HashMap
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import scala.collection.mutable

/**
 * Kafka工具类，用于生产和消费
 */
object MyKafkaUtils {

    /**
     * 消费者配置
     *
     * kafka提供的消费者配置类ConsumerConfig
     */
    private val consumerParams: mutable.Map[String, Object] = mutable.Map[String, Object](
        // kafka集群的位置
//        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "project105:9092,project106:9092,project107:9092",
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> MyPropsUtils.apply(MyConfig.KAFKA_BOOTSTRAP_SERVER),
        // kv的反序列化器
        ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
        ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
        // offset自动提交
        ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "true",
        ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG -> "5000",
        // offset重置
        ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "latest"
    )

    /**
     * 消费
     * 基于SparkStreaming获取到DStream对象
     */
    def getKafkaDStream(ssc: StreamingContext, topic: String, groupId: String) = {

        // 消费者组
        consumerParams.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)

        val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](Array(topic), consumerParams)
        )
        kafkaDStream
    }

    /**
     * 生产者对象
     */
    val producer: KafkaProducer[String, String] = createProducer();

    def createProducer(): KafkaProducer[String, String] = {
        // kafka提供的生产者配置类 ProducerConfig
        val producerParams:HashMap[String, AnyRef] = new util.HashMap[String, AnyRef]()
        // kafka集群位置
        producerParams.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,MyPropsUtils.apply(MyConfig.KAFKA_BOOTSTRAP_SERVER))
        // kv的序列化器
        producerParams.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
        producerParams.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
        //ack级别
        producerParams.put(ProducerConfig.ACKS_CONFIG, "all")
        // 重试次数
        producerParams.put(ProducerConfig.RETRIES_CONFIG, "100");
        // 批次大小
        producerParams.put(ProducerConfig.BATCH_SIZE_CONFIG, "16384")
        // linger.ms
        producerParams.put(ProducerConfig.LINGER_MS_CONFIG, "0");
        // 启用幂等
        producerParams.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
        //partitioner.class
        //interceptor.classes

        val kp = new KafkaProducer[String, String](producerParams)
        kp
    }

    /**
     * 生产者
     * @param topic
     * @param msg
     * @return
     */
    def send(topic:String,msg:String)={
        producer.send(new ProducerRecord[String,String](topic,msg))
    }

    /**
     * 关闭方法
     */
    def close(): Unit ={
        if (producer!=null)
            producer.close()
    }
}
