package com.kafka.sync2;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/**
 * description
 *
 * @Author: quan.xiong
 * @Date: 2020/10/23 15:28
 */
public class KafkaSync {

    /**
     * 日志
     */
    private static Logger logger;

    /**
     * 属性配置
     */
    private final Properties config;

    /**
     * kafka消费者
     */
    private KafkaConsumer<String, String> consumer;

    /**
     * kafka生产者
     */
    private KafkaProducer<String, String> producer;

    /**
     * 写文件
     */
    private BufferedWriter writer;

    public KafkaSync(Properties config) {
        this.config = config;
        init();
    }

    private void init() {
        initLog();
        initConsumer();
        initProducer();
        if (config.containsKey(Config.FILE_BACKUP.value())) {
            String filename = config.getProperty(Config.FILE_BACKUP.value());
            if (filename != null && !filename.trim().equals("")) {
                try {
                    writer = new BufferedWriter(new FileWriter(filename, true));
                } catch (IOException e) {
                    logger.error("文件打开失败");
                    throw new RuntimeException();
                }
            }
        }
    }

    private void initLog() {
        File programPath = ClassUtils.getCurrentProgramPath();
        String path = programPath.getPath();
        System.out.println(path);
        System.setProperty("base.dir", path);
        System.setProperty("name", config.getProperty(Config.NAME.value()));
        logger = LoggerFactory.getLogger(KafkaSync.class);
    }

    /**
     * 初始化生产者
     */
    private void initProducer() {
        Properties properties = new Properties();
        // krb5 验证
        if (config.containsKey(Config.T_AUTH.value())) {
            System.setProperty("java.security.krb5.conf", config.getProperty(Config.T_KRB5_CONF.value()));
            System.setProperty("java.security.auth.login.config", config.getProperty(Config.T_JAAS_CONF.value()));
            //配置安全协议security.protocol和sasl.kerberos.service.name
            properties.put("security.protocol", "SASL_PLAINTEXT");
            properties.put("sasl.kerberos.service.name", "kafka");
        }

        // 连接集群
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getProperty(Config.T_KAFKA_BROKE_URL.value()));

        // key和value的序列化
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");

        // 一个batch 最多停留多久
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 50);

        producer = new KafkaProducer<>(properties);
        logger.info("kafkaProducer init success!");
    }

    /**
     * 初始化消费者
     */
    private void initConsumer() {
        Properties properties = new Properties();
        // krb5 验证
        if (config.containsKey(Config.S_AUTH.value())) {
            System.setProperty("java.security.krb5.conf", config.getProperty(Config.S_KRB5_CONF.value()));
            System.setProperty("java.security.auth.login.config", config.getProperty(Config.S_JAAS_CONF.value()));
            //配置安全协议security.protocol和sasl.kerberos.service.name
            properties.put("security.protocol", "SASL_PLAINTEXT");
            properties.put("sasl.kerberos.service.name", "kafka");
        }

        // 连接集群
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getProperty(Config.S_KAFKA_BROKE_URL.value()));

        // key和value的反序列号
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

        // 是否自动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        // 自动提交频率
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");

        // 自动重置为最早的偏移量。
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // 消费者组
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, config.getProperty(Config.S_KAFKA_CONSUMER_GROUP.value()));
        // Consumer每次调用poll()时取到的records的最大数。
        properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 500);

        consumer = new KafkaConsumer<>(properties);
        logger.info("kafkaConsumer init success!");
    }

    public void sync() {
        // 订阅主题
        consumer.subscribe(Collections.singletonList(config.getProperty(Config.S_KAFKA_TOPIC.value())));
        while (true) {
            //拉取消息，并设置等待时间。一次可以拉取多条数据。1.s内拉取消息，一次拉取的数量不确定
            ConsumerRecords<String, String> polls = consumer.poll(Duration.ofMillis(1000));
            if (polls.isEmpty()) {
                continue;
            }
            for (ConsumerRecord<String, String> poll : polls) {
                ProducerRecord<String, String> record = new ProducerRecord<>(config.getProperty(Config.T_KAFKA_TOPIC.value())
                        , poll.key(), poll.value());
                producer.send(record);
                if (writer != null) {
                    try {
                        writer.write(poll.value());
                        writer.newLine();
                    } catch (IOException e) {
                        logger.error("记录写入失败: " + record.value());
                    }
                }
            }
            logger.info("同步记录数: " + polls.count());
        }
    }

}
