package mylab.starters.mq.impl.kafka.loop;

import com.google.common.base.Charsets;
import lombok.extern.slf4j.Slf4j;
import mylab.starters.mq.api.Message;
import mylab.starters.mq.api.MessageListener;
import mylab.starters.mq.config.MqConst;
import mylab.utils.common.ThreadUtil;
import org.apache.kafka.clients.consumer.CommitFailedException;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.header.Header;

import java.time.Duration;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * ### KafkaConsumer is not safe for multi-threaded access
 * org.apache.kafka.clients.consumer.KafkaConsumer
 * <p>
 * ### 自动提交：消费者会自动把从 poll() 方法接收到的 最大 偏移量提交上去。
 * 参数：
 * enable.auto.comnit=true：在消费者close()的时候也会自动提交
 * auto.commit.interval.ms=默认5s，没过5秒就会提交偏移量，但是在4秒发生了分区在均衡，偏移量还没来得及提交，他们这四秒的消息就会被重复消费
 * 问题：自动提交虽然方便 , 但是很明显是一种基于时间提交的方式 , 不过并没有为我们留有余地来避免重复处理消息。
 * ###
 * <p>
 * ### 异步提交commitAsync
 * 1、commitAsync()不会重试提交偏移量，重试提交可能会导致重复消费
 * 2、commitAsync()也支持回调,在 broker 作出响应时会执行回调。回调经常被用于记录提交错误或生成度量指标。
 * <p>
 * <p>
 * ### 手动提交（同步提交）commitsync：使用commitsync()提交poll()返回最新偏移量
 * 参数：
 * auto.commit.offset = false
 * 1、处理完业务之后，一定要手动调用commitsync（）
 * 2、如果发生了在均衡，由于当前commitsync偏移量还未提交，所以消息会被重复消费
 * 3、commitsync会阻塞直到提交成功
 */
@Slf4j
public class PullLoop implements Runnable {

    static final Duration DURATION = Duration.ofMillis(10);

    KafkaConsumer<String, String> consumer;
    MessageListener listener;
    String[] tags;

    AtomicBoolean shutdown;
    CountDownLatch shutdownLatch;
    ExecutorService executorService;

    public PullLoop(KafkaConsumer<String, String> consumer, MessageListener listener, String... tags) {
        assert consumer != null;
        assert listener != null;
        this.consumer = consumer;
        this.listener = listener;
        this.tags = tags;
    }

    public void start() {
        this.shutdown = new AtomicBoolean(false);
        this.shutdownLatch = new CountDownLatch(1);

        executorService = ThreadUtil.newExecutor();
        executorService.execute(this);
    }

    public void stop() {
        try {
            shutdown.set(true);
            shutdownLatch.await();
            executorService.shutdown();
        } catch (Exception e) {
            onError(e);
        } finally {

            consumer.unsubscribe();
            consumer.close();
            consumer = null;

            shutdown = null;
            shutdownLatch = null;
            executorService = null;
        }
    }

    @Override
    public void run() {
        try {
            while (!shutdown.get()) {
                ConsumerRecords<String, String> records = consumer.poll(DURATION);
                //处理收到的消息
                records.forEach(record -> {
                    System.out.println("### ConsumerRecord: " + record);
                    onRecord(record);
                });

                //避免等待，异步提交
                consumer.commitAsync();
            }
        } catch (CommitFailedException e) {
            log.error("kakfa消息确认提交失败");
            e.printStackTrace();
            onError(e);
        } catch (Exception e) {
            log.error("kakfa消费异常");
            onError(e);
        } finally {
            //在消费者关闭前一般会组合使用 commitAsync() 和 commitsync()
            //同步一定会提交成功，异步可能会失败
            try {
                //最后一次提交，确保成功，同步提交
                consumer.commitSync();
            } finally {
                consumer.close();
                shutdownLatch.countDown();
            }
        }
    }

    void onError(Throwable t) {
        listener.onFailure(t);
    }


    void onRecord(ConsumerRecord<String, String> record) {
        log.debug(String.format("消费消息：topic=%s, partition=%d, offset=%d, key=%s, value=%s\n",
                record.topic(), record.partition(), record.offset(), record.key(), record.value()));

        String tag = getTagValue(record);
        Message message = new Message(record.topic(), record.value());
        message.setKey(record.key());
        message.setTag(tag);
        listener.onSuccess(message);
    }

    String getTagValue(ConsumerRecord<String, String> record) {
        Header header = record.headers().lastHeader(MqConst.KAFKA_MESSAGE_TAG);
        if (header != null && header.value() != null) {
            return new String(header.value(), Charsets.UTF_8);
        }
        return null;
    }


}