package com.wenge.datagroup.storage.util

import com.alibaba.fastjson.JSONObject
import org.apache.commons.lang3.StringUtils
import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata}
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast

import java.util.{List => JavaList, Properties}
import java.util.concurrent.Future

/**
 * @author
 * Depp
 */
class KafkaSink(createProducer: () => KafkaProducer[String, String]) {
  lazy val producer = createProducer()

  def sendData(list: JavaList[JSONObject], topic: String): Unit = {
    if (StringUtils.isNotBlank(topic) && list != null && list.size > 0) {
      for (i <- 0 until list.size()) {
        send(topic, list.get(i).toJSONString)
      }
    }
  }

  def send(topic: String, value: String): Future[RecordMetadata] = {

    val record = new ProducerRecord[String, String](topic, value)
    producer.send(
      record,
      new Callback() {
        override def onCompletion(recordMetadata: RecordMetadata, e: Exception): Unit = {
          val topic = recordMetadata.topic
          val partition = recordMetadata.partition
          val offset = recordMetadata.offset
        }
      }
    )
  }
}

object KafkaSink {
  def apply[K, V](broker: String): KafkaSink = {
    val props: Properties = getPro(broker)
    val createProducerFunc = () => {
      val producer = new KafkaProducer[String, String](props)
      sys.addShutdownHook {
        // Ensure that, on executor JVM shutdown, the Kafka producer sends
        // any buffered messages to Kafka before shutting down.
        producer.close()
      }
      producer
    }
    new KafkaSink(createProducerFunc)
  }

  private def getPro(broker: String) = {
    val props = new Properties
    props.put("bootstrap.servers", broker)
    // ack方式，all，会等所有的commit
    props.put("acks", "all")
    // 失败是否重试，设置会可能产生重复数据
    props.put("retries", 0.asInstanceOf[Integer])
    // 对于每个partition的batch buffer大小
    props.put("batch.size", 1638400.asInstanceOf[Integer])
    // 等多久，如果batch buffer没满，比如设为1，则消息发送会多1ms的延迟
    props.put("linger.ms", 1.asInstanceOf[Integer])
    // 整个producer可以用于buffer的内存大小
    props.put("buffer.memory", 1638400.asInstanceOf[Integer]) // 缓存空间

    // 序列化key，value----传输约定的协议，类在传输时会先转换为byte，再接收端再通过序列化的约定转换为相应的类
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    import com.wenge.datagroup.storage.util.ConfigUtil
    /*//认证
    props.put("security.protocol", ConfigUtil.get("security.protocol"))
    props.put("sasl.mechanism", ConfigUtil.get("sasl.mechanism"))
    props.put("sasl.jaas.config", ConfigUtil.get("sasl.jaas.config"))
*/
    props
  }

  def getKafkaProducer(sc: SparkContext, broker: String): Broadcast[KafkaSink] = {
    sc.broadcast(KafkaSink[String, String](broker))
  }
}
