package com.atguigu.gmall.realtime.utils

import java.util

import org.apache.kafka.clients.consumer
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.sql.catalyst.analysis.TypeCoercion.CaseWhenCoercion
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

/**
 * kafka工具类,kafka的消费和生产数据功能
 * sparkstreaming同时作为kafka的消费者和生产者
 *
 *
 */
object MyKafkaUtils {
  //kafka消费者的配置
  val consumerParams : mutable.Map[String,Object] = mutable.Map[String,Object](
    //kafka集群位置
    //ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop102:9092,hadoop103:9092,hadoop104:9092",
    //ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> MyProperties("kafka.bootstrap.server"),
    ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> MyPropertiesUtils(Myconfig.KAFKA_BOOTSTRAP_SERVER),

    //kafka反序列化器
    ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
    ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
    //offset提交
    ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "true",
    //自动提交时间默认5000ms
    ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG -> "5000",
    //offset自动重置
    ConsumerConfig.AUTO_OFFSET_RESET_CONFIG ->"latest"
  )

  //从kafka消费数据 使用kafka默认维护的offset消费
  def getKafkaDstream(ssc : StreamingContext , topic :String , group : String) = {
     //val conf: SparkConf = new SparkConf().setAppName("kafkaConsumer").setMaster("local[4]")
     //val ssc = new StreamingContext(conf,Seconds(5))
    //动态指定消费者组
    consumerParams.put(ConsumerConfig.GROUP_ID_CONFIG,group)
    //从kafka中采集数据
    val kafkaDstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), consumerParams))
    kafkaDstream
  }
  //从kafka消费数据 使用redis维护的offset消费
  def getKafkaDstream(ssc : StreamingContext , topic :String , group : String , offsets : Map[TopicPartition,Long]) = {
    //val conf: SparkConf = new SparkConf().setAppName("kafkaConsumer").setMaster("local[4]")
    //val ssc = new StreamingContext(conf,Seconds(5))
    //动态指定消费者组
    consumerParams.put(ConsumerConfig.GROUP_ID_CONFIG,group)
    //从kafka中采集数据
    val kafkaDstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), consumerParams , offsets))
    kafkaDstream
  }

  //创建生产者对象
  val kafkaProducer : KafkaProducer[String,String] = createProducer()

  def createProducer() : KafkaProducer[String,String] ={
    val producerParas = new util.HashMap[String,AnyRef]()
    //集群位置
    producerParas.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,MyPropertiesUtils(Myconfig.KAFKA_BOOTSTRAP_SERVER))
    //序列化器
    producerParas.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")
    producerParas.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer")


    val producer = new KafkaProducer[String,String](producerParas)
    producer
  }

  //往kafak中生产数据
  def send(topic : String , msg : String) ={
    kafkaProducer.send(new ProducerRecord[String,String](topic,msg))
  }
  //指定key发送
  def send(topic : String , key : String , msg : String) ={
    kafkaProducer.send(new ProducerRecord[String,String](topic,key,msg))
  }
  /**
   * 刷写Kafka的缓冲区
   * 将缓冲区数据刷写到broker中
   */
  def flush(): Unit ={
    if(kafkaProducer != null ) kafkaProducer.flush()
  }

}
