package com.sbp.message.consumer;

import com.sbp.message.api.service.MessageSysInfoService;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

/**
 * Created by wangmin on 2019/4/19.
 */
public class KafkaExclusiveConsumerThread extends KafkaBaseConsumerThread {
    private static final Logger logger = LoggerFactory.getLogger(KafkaExclusiveConsumerThread.class);

    private final MessageConsumer messageConsumer;
    private final String group;
    private final String topic;
    private final KafkaConsumer<String, String> kafkaConsumer;

    public KafkaExclusiveConsumerThread(MessageConsumer messageConsumer, String group, String topic,
            Properties kafkaProperties) {
        this.messageConsumer = messageConsumer;
        this.group = group;
        this.topic = topic;

        kafkaProperties.put("group.id", MessageSysInfoService.generateKafkaRealGroupId(group));
        this.kafkaConsumer = new KafkaConsumer<>(kafkaProperties);
    }

    @Override
    public void run() {
        try {
            Set<String> kafkaRealTopics = new HashSet<>();
            kafkaRealTopics.add(MessageSysInfoService.generateKafkaRealTopicName(topic));
            kafkaConsumer.subscribe(kafkaRealTopics);

            while (isRun) {
                try {
                    // 一次只取一条
                    ConsumerRecords<String, String> records = kafkaConsumer.poll(10000);
                    if (records.isEmpty()) {
                        continue;
                    }
                    kafkaConsumer.commitSync();

                    // 验证是否commit成功
                    ConsumerRecord<String, String> record = records.iterator().next();
                    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
                    OffsetAndMetadata committedOffsetAndMetadata = kafkaConsumer.committed(topicPartition);
                    if (committedOffsetAndMetadata.offset() != record.offset()+1) {
                        logger.warn("committedOffsetAndMetadata.offset={}, record.offset={}, message={}",
                                    committedOffsetAndMetadata.offset(), record.offset(), record.value());
                        continue;
                    }

                    try {
                        messageConsumer.consumeMessage(record.value(), group);
                    } catch (Exception e) {
                        logger.warn("message = {}", record.value());
                        logger.warn("", e);
                    }
                } catch (Exception e) {
                    logger.error("", e);
                }
            }
        } finally {
            kafkaConsumer.close();
        }
    }

    @Override
    public String threadName() {
        return "KafkaExclusiveConsumer_" + group + "_" + topic;
    }

    @Override
    public void shutdown() {
        kafkaConsumer.close();
        isRun = false;
    }

}

