package com.atguigu.gmall.realtime.utils

import java.util

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

import scala.collection.mutable

/**
 * Kafka工具类, 用于从Kafka中消费数据 及 往Kakfa生产数据
 */
//伴生对象，默认创建一次连接
object MyKafkaUtils {
  /*
  *构建生产生配置参数
   */
  private val consumerConf: mutable.Map[String, Object] = mutable.Map(
    //可以去kafka官网去查找配置信息，配置名容易写错，可以采取其给的配置类复制
    ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
    ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
    //此处通过配置文件类获取kafka的地址信息
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> MyPropsUtil(MyConfig.KAFKA_SERVERS),
    //offset提交 自动 | 手动
    ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "true",
    //消费者组
    //ConsumerConfig.GROUP_ID_CONFIG -> "aaa"
    //offset重置  earliest 表示会从头开始读数据| latest latest表示消费者没有携带offset时会从最新的offset位置开始读取数据
    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "latest",
    //自动提交间隔
    ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG -> "5000"
  )

  /*
**使用sparkstreaming连接kafka
* 构建消费者组
 */
  def GetConsumerDStream(ssc: StreamingContext, topic: String, groupID: String) = {
    //      需要三个参数：
    //      第一个参数表示：SparkStreaming的环境
    //      第二个参数表示：采集器位置策略 =》一般都选此处的默认策略，让excutor自动选择从某个kafka节点去拉取数据
    //      当boroker与excutor在同一个节点时选择preferBroker不过很少用
    //      第三个参数表示：消费策略（订阅主题，编写消费参数）
    consumerConf.put("group.id", groupID)
    val kafkaDstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), consumerConf)
    )
    kafkaDstream
  }
/*
*函数重载，针对当从redis中取到offset时使用
 */
  def GetConsumerDStream(ssc: StreamingContext, topic: String, groupID: String,offsets:Map[TopicPartition,Long]) = {
    consumerConf.put("group.id", groupID)
    val kafkaDstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), consumerConf,offsets)
    )
    kafkaDstream
  }



  /*
  *构造生产者对象
   */
  def GetProducer() = {
    val producerConfig: util.HashMap[String, AnyRef] = new util.HashMap[String, AnyRef]()
    producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
    producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
    producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,MyPropsUtil(MyConfig.KAFKA_SERVERS))



    val producer: KafkaProducer[String, String] = new KafkaProducer[String, String](producerConfig)
    producer
  }
  /*
  *生产者对象属性
   */
  private val producter = GetProducer()

  /*
  *生产者发送数据
   */
  def send(topic:String,msg:String): Unit ={
    producter.send(new ProducerRecord[String,String](topic,msg))
  }
  def flush(): Unit ={
    producter.flush()
  }
}
