package com.baishancloud.log.common.connector.kafka

import com.baishancloud.log.common.util.ParamUtil
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaProducer, KafkaSerializationSchema}
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}

import java.io.Serializable
import java.nio.charset.StandardCharsets
import java.util.{Objects, Properties}
import java.{lang, util}
import scala.beans.BeanProperty

/**
 * 提供kafka sink连接器的获取功能，该类为参数对象。<br>
 *
 * @author ziqiang.wang 
 * @date 2021/10/18 17:37
 */
class KafkaSinkConnector extends Serializable {

  /**
   * 必选<br>
   * 连接kafka的url，示例：broker1:9092,broker2:9092
   */
  @BeanProperty var bootstrapServers: String = ""
  /**
   * 必选<br>
   * 主题名称
   */
  @BeanProperty var topic: String = ""
  /**
   * 交付语义，默认为恰好一次。<br>
   * none：无交付，数据可能会丢失，也可能会重复。<br>
   * exactly_once：恰好一次，依赖于程序开启checkpoint，并且使用了Kafka事务，因此会对写入性能有所影响。<br>
   * at_least_once：至少一次，数据可能会重复，但不会丢失。<br>
   */
  @BeanProperty var semantic: FlinkKafkaProducer.Semantic = FlinkKafkaProducer.Semantic.EXACTLY_ONCE
  /**
   * 事务超时时间，默认15分钟，不可设置比此时间大的数字。单位：分钟。
   */
  @BeanProperty var transactionMaxTimeoutMs: Long = 15 * 60 * 1000
  /**
   * 生产者对每个分区缓存记录的空间占用大小，默认5M，不可小于此值，否则在开启切好一次语义时，可能会因为发送数据总空间占用过大而报错。单位：M
   */
  @BeanProperty var batchSize: Long = 5 * 1024 * 1024
  /**
   * 生产者发送请求的大小，默认为6M，不可小于batch.size。单位：M
   */
  @BeanProperty var maxRequestSize: Long = 6 * 1024 * 1024
  /**
   * 生产者要求写入几个副本算是写入成功，默认为-1，全部写入。在开启切好一次写入时，必须是-1.
   */
  @BeanProperty var acks: Int = -1
  /**
   * 生产者压缩格式，目前集群broker未设置压缩格式，交由生产者自己指定，默认为不压缩
   */
  @BeanProperty var compressionType: String = ""
  /**
   * 配置对象，保存设置好的参数
   */
  val properties: Properties = new Properties()

  /**
   * 构建内部properties配置对象，返回本参数对象实例
   */
  def build(): KafkaSinkConnector = {
    properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
    properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, transactionMaxTimeoutMs.toString)
    properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
    properties.setProperty(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, maxRequestSize.toString)
    properties.setProperty(ProducerConfig.ACKS_CONFIG, acks.toString)
    if (compressionType.nonEmpty) properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType)
    this
  }


  /**
   * 返回只写入value数据的kafka sink
   *
   * @return 只写入value数据的kafka sink
   */
  def sinkOnlyValue(): FlinkKafkaProducer[String] = {
    new FlinkKafkaProducer[String](
      topic,
      new KafkaSerializationSchema[String] {
        override def serialize(element: String, timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
          new ProducerRecord[Array[Byte], Array[Byte]](topic, element.getBytes(StandardCharsets.UTF_8))
        }
      },
      properties,
      semantic
    )
  }

  /**
   * 返回写入key、value数据的kafka sink
   *
   * @return 写入key、value数据的kafka sink
   */
  def sinkKeyValue(): FlinkKafkaProducer[(String, String)] = {
    new FlinkKafkaProducer[(String, String)](
      topic,
      new KafkaSerializationSchema[(String, String)] {
        override def serialize(element: (String, String), timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
          new ProducerRecord[Array[Byte], Array[Byte]](topic, element._1.getBytes(StandardCharsets.UTF_8), element._2.getBytes(StandardCharsets.UTF_8))
        }
      },
      properties,
      semantic
    )
  }

  /**
   * 返回可自由写入任何数据的kafka sink<br>
   * 注意泛型类型ProducerRecord的K、V泛型均为Array[Byte]，这是FlinkKafkaProducer内部写死的。
   *
   * @return 可自由写入任何数据的kafka sink
   */
  def sinkProducerRecord(): FlinkKafkaProducer[ProducerRecord[Array[Byte], Array[Byte]]] = {
    new FlinkKafkaProducer[ProducerRecord[Array[Byte], Array[Byte]]](
      topic,
      new KafkaSerializationSchema[ProducerRecord[Array[Byte], Array[Byte]]] {
        override def serialize(element: ProducerRecord[Array[Byte], Array[Byte]], timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
          element
        }
      },
      properties,
      semantic
    )
  }

}


object KafkaSinkConnector extends Serializable {

  /**
   * 构建参数对象
   *
   * @param parameterTool 主类参数对象
   * @param number        参数对应数字，用来区分多个连接器
   */
  def builder(parameterTool: ParameterTool, number: String = ""): KafkaSinkConnector = {
    val connector: KafkaSinkConnector = new KafkaSinkConnector()

    if (parameterTool.has(ParamUtil.paramName(sinkBootstrapServersP, number))) {
      connector.setBootstrapServers(parameterTool.get(ParamUtil.paramName(sinkBootstrapServersP, number)))
    } else {
      throw new IllegalArgumentException(s"未设置  --${sinkBootstrapServersP}  参数，可能是在sink后面加上了数字，但是初始化时没指定数字？")
    }

    if (parameterTool.has(ParamUtil.paramName(sinkTopicP, number))) {
      connector.setTopic(parameterTool.get(ParamUtil.paramName(sinkTopicP, number)))
    } else {
      throw new IllegalArgumentException(s"未设置  --${sinkTopicP}  参数，可能是在sink后面加上了数字，但是初始化时没指定数字？")
    }

    if (parameterTool.has(ParamUtil.paramName(sinkSemanticP, number))) {
      if (Objects.equals(parameterTool.get(ParamUtil.paramName(sinkSemanticP, number)).toLowerCase(), semanticNone)) {
        connector.setSemantic(FlinkKafkaProducer.Semantic.NONE)
      } else if (Objects.equals(parameterTool.get(ParamUtil.paramName(sinkSemanticP, number)).toLowerCase(), semanticExactlyOnce)) {
        connector.setSemantic(FlinkKafkaProducer.Semantic.EXACTLY_ONCE)
      } else if (Objects.equals(parameterTool.get(ParamUtil.paramName(sinkSemanticP, number)).toLowerCase(), semanticAtLeastOnce)) {
        connector.setSemantic(FlinkKafkaProducer.Semantic.AT_LEAST_ONCE)
      }
    }

    if (parameterTool.has(ParamUtil.paramName(sinkTransactionMaxTimeoutMsP, number))) {
      connector.setTransactionMaxTimeoutMs(parameterTool.getLong(ParamUtil.paramName(sinkTransactionMaxTimeoutMsP, number)) * 60 * 1000)
    }

    if (parameterTool.has(ParamUtil.paramName(sinkBatchSize, number))) {
      connector.setBatchSize(parameterTool.getLong(ParamUtil.paramName(sinkBatchSize, number)) * 1024 * 1024)
    }


    if (parameterTool.has(ParamUtil.paramName(sinkMaxRequestSizeP, number))) {
      connector.setMaxRequestSize(parameterTool.getLong(ParamUtil.paramName(sinkMaxRequestSizeP, number)) * 1024 * 1024)
    }

    if (parameterTool.has(ParamUtil.paramName(sinkAcks, number))) {
      connector.setAcks(parameterTool.getInt(ParamUtil.paramName(sinkAcks, number)))
    }

    if (parameterTool.has(ParamUtil.paramName(sinkCompressionTypeP, number))) {
      connector.setCompressionType(parameterTool.get(ParamUtil.paramName(sinkCompressionTypeP, number)))
    }

    //处理其他参数
    parameterTool.toMap.entrySet().forEach((entry: util.Map.Entry[String, String]) => {
      val key: String = entry.getKey
      if (key.contains(".other.") && key.startsWith("sink" + number)) {
        //如果这里启动输入参数包含 .other. ，则说明设置的参数属于其他性能调整类可选参数
        val temp: String = key.substring(key.indexOf(".") + 1)
        connector.properties.setProperty(temp.substring(temp.indexOf(".") + 1), parameterTool.get(key))
      }
    })

    connector
  }

}
