package com.qupeng.demo.kafka.kafkaapache.transaction;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.*;

public class KafkaTransaction<K, V> {

    private final static Logger logger = LoggerFactory.getLogger(KafkaTransaction.class);

    public <K, V> KafkaProducer<K, V> createKafkaProducer() {
        Properties producerProps = new Properties();
        producerProps.put("bootstrap.servers", "your_broker_list");
        producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        producerProps.put("transactional.id", "your_transactional_id");

        Producer<String, String> producer = new KafkaProducer<>(producerProps);

        Properties consumerProps = new Properties();
        consumerProps.put("bootstrap.servers", "your_broker_list");
        consumerProps.put("group.id", "your_group_id");
        consumerProps.put("enable.auto.commit", "false");
        consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumerProps.put("isolation.level", "read_committed");

        Consumer<String, String> consumer = new KafkaConsumer<>(consumerProps);

        producer.initTransactions();

        consumer.subscribe(Arrays.asList("input_topic"));
        while (true) {
            ConsumerRecords<String, String> records = null;
            try {
                records = consumer.poll(Duration.ofMillis(100));
                if (records.count() > 0) {
                    producer.beginTransaction();
                    for (ConsumerRecord<String, String> record : records) {
                        // 处理消息
                        String processedValue = processMessage(record.value());
                        // 发送处理后的消息到另一个主题
                        producer.send(new ProducerRecord<>("output_topic", record.key(), processedValue));
                    }
                    Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap = consumerOffsets(records);
                    producer.sendOffsetsToTransaction(offsetAndMetadataMap, consumer.groupMetadata());
                    producer.commitTransaction();
                }
            } catch (WakeupException e) {
                // 关闭消费者
                consumer.close();
                throw new KafkaException("");
            } catch (ProducerFencedException | InvalidProducerEpochException e) {
                // 程序已变为僵尸，只能退出
                throw new KafkaException(String.format("The transactional.id %s is used by another process.", "your_transactional_id"));
            }  catch (KafkaException e) {
                // 其它异常，中止事务，重置偏移量，并进行重试
                producer.abortTransaction();
                resetToLatestCommittedPositions(consumer, records);
            } finally {
                producer.close();
                consumer.close();
            }
        }

    }

    private void resetToLatestCommittedPositions(Consumer<String, String> consumer, ConsumerRecords<String, String> records) {
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            consumer.seek(partition, partitionRecords.get(0).offset());
        }
    }

    private Map<TopicPartition, OffsetAndMetadata> consumerOffsets(ConsumerRecords<String, String> records) {
        Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap = new HashMap<>();
        for (TopicPartition partition : records.partitions()) {
            List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
            offsetAndMetadataMap.put(partition, new OffsetAndMetadata(partitionRecords.get(partitionRecords.size() - 1).offset()));
        }

        return offsetAndMetadataMap;
    }

    private String processMessage(String value) {
        return "Message has been handled.";
    }
}
