package com.alibaba.otter.node.etl.load.loader.mq.producer;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.function.BiConsumer;

public class KafkaProducer implements MQProducer {


    private static final Logger logger = LoggerFactory.getLogger(KafkaProducer.class);

    private Producer<String, String> producer;

    private MQProperties kafkaProperties;
    /**
     * 指定topic
     */
    public final static String TOPIC_NAME = "topic";

    /**
     * 指定partition
     */
    public final static String PARTITION = "partition";

    /**
     * key
     */
    public final static String KEY = "key";

    @Override
    public void init(MQProperties kafkaProperties) {

        this.kafkaProperties = kafkaProperties;
        Properties properties = new Properties();
        //kafka 链接地址
        properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getServers());
        //应答配置
        properties.put(ProducerConfig.ACKS_CONFIG, kafkaProperties.getAcks());
        //数据压缩配置none/gzip/snappy
        properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, kafkaProperties.getCompressionType());
        //发送批次大小
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaProperties.getBatchSize());
        //发送延迟时间，用于数据堆积批量发送
        properties.put(ProducerConfig.LINGER_MS_CONFIG, kafkaProperties.getLingerMs());
        //
        properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, kafkaProperties.getMaxRequestSize());
        //消息堆积最大大小
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaProperties.getBufferMemory());
        //key序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //value序列化器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        //最大最确认请求数目，此处设定为1保证单分区内消息有序性
        properties.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
        //重试配置
        properties.put(ProducerConfig.RETRIES_CONFIG, kafkaProperties.getRetries());

        if (!kafkaProperties.getProperties().isEmpty()) {
            properties.putAll(kafkaProperties.getProperties());
        }
        if (kafkaProperties.isKerberosEnable()) {
            File krb5File = new File(kafkaProperties.getKerberosKrb5FilePath());
            File jaasFile = new File(kafkaProperties.getKerberosJaasFilePath());
            if (krb5File.exists() && jaasFile.exists()) {
                // 配置kerberos认证，需要使用绝对路径
                System.setProperty("java.security.krb5.conf", krb5File.getAbsolutePath());
                System.setProperty("java.security.auth.login.config", jaasFile.getAbsolutePath());
                System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
                properties.put("security.protocol", "SASL_PLAINTEXT");
                properties.put("sasl.kerberos.service.name", "kafka");
            } else {
                String errorMsg = "ERROR # The kafka kerberos configuration file does not exist! please check it";
                logger.error(errorMsg);
                throw new RuntimeException(errorMsg);
            }
        }
        producer = new org.apache.kafka.clients.producer.KafkaProducer<>(properties);
    }

    @Override
    public void stop() {
        try {
            logger.info("## stop the kafka producer");
            if (producer != null) {
                producer.close();
            }
        } catch (Throwable e) {
            logger.warn("##something goes wrong when stopping kafka producer:", e);
        } finally {
            logger.info("## kafka producer is down.");
        }
    }

    @Override
    public void send(Map<String, Object> message, Callback callback) {
        try {
            doSend(kafkaProperties.getPartition(),null, kafkaProperties.getDefaultTopic(), message);
            callback.commit();
        } catch (Throwable e) {
            logger.error(e.getMessage(), e);
            callback.rollback();
        }
    }

    @Override
    public boolean send(Map<String, Object> message) {

        try {
            Integer partition = null;
            String key = null;
            // 动态topic
            String topic = (String) message.get(TOPIC_NAME);
            if (message.containsKey(PARTITION)) {
                partition = (Integer) message.get(PARTITION);
            }
            if (message.containsKey(KEY)){
                key = (String) message.get(KEY);
            }
            //Valid characters are '-', '.', '_'.
            doSend(partition,key, topic, message);
            return true;
        } catch (Throwable e) {
            logger.error(e.getMessage(), e);
            return false;
        }
    }



    @Override
    public boolean send(Integer partition, String key, String topic,Map<String, Object> data) {
        try {
            doSend(partition,key, topic, data);
            return true;
        } catch (Throwable e) {
            logger.error(e.getMessage(), e);
            return false;
        }
    }

    private void doSend(Integer partition, String key, String topicName, Map<String, Object> message) {

        ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topicName, partition, key,
                JSON.toJSONString(message, SerializerFeature.WriteMapNullValue));
        Future<RecordMetadata> future;
        try {
            // 异步发送，因为在partition hash的时候已经按照每个分区合并了消息，走到这一步不需要考虑单个分区内的顺序问题
            future = producer.send(producerRecord);
            if (logger.isDebugEnabled()) {
                logger.debug("Send  message to kafka topic: [{}], packet: {}", topicName, producerRecord);
            }
            // 批量刷出
            // flush操作也有可能是发送失败,这里需要异步关注一下发送结果,针对有异常的直接出发rollback
            producer.flush();
            future.get();
        } catch (InterruptedException | ExecutionException e) {
            throw new RuntimeException(e);
        }
    }

    @Override
    public boolean send(Integer partition, String key, String topic,List<Map<String, Object>> messages, BiConsumer<Integer,Boolean> biConsumer) {
        try {
            List<ProducerRecord<String, String>> batchData = new ArrayList<>();
            for (Map<String, Object> message : messages) {
                ProducerRecord<String, String> producerRecord = new ProducerRecord<>(topic, partition, key,
                        JSON.toJSONString(message, SerializerFeature.WriteMapNullValue));
                batchData.add(producerRecord);
            }
            doSend(topic, batchData,biConsumer);
            return true;
        } catch (Throwable e) {
            logger.error(e.getMessage(), e);
            return false;
        }
    }

    private void doSend(String topicName, List<ProducerRecord<String,String>> records, BiConsumer<Integer,Boolean> biConsumer) {

        List<Future<RecordMetadata>> futures = new ArrayList<>();
        try {
            // 异步发送，因为在partition hash的时候已经按照每个分区合并了消息，走到这一步不需要考虑单个分区内的顺序问题
            for (ProducerRecord<String,String> record : records) {
                futures.add(producer.send(record));
            }
        } finally {
            if (logger.isDebugEnabled()) {
                for (ProducerRecord<String,String> record : records) {
                    logger.debug("Send  message to kafka topic: [{}], packet: {}", topicName, record.toString());
                }
            }
            // 批量刷出
            producer.flush();
        }
        // flush操作也有可能是发送失败,这里需要异步关注一下发送结果,针对有异常的直接出发rollback
        for (int i = 0; i < futures.size(); i++) {
            try {
                futures.get(i).get();
                biConsumer.accept(i,true);
            } catch (InterruptedException | ExecutionException e) {
                logger.error("batch send error");
                biConsumer.accept(i,false);
            }
        }
    }
}
