package transaction;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.ProducerFencedException;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.util.*;

/**
 * @Author:RenPu
 * @Date: 2020/3/21 16:17
 * @Version: 1.0
 * @description:消费者与生产者并存的事务
 */
public class ConsumerTransferProducer {

    public static void main(String[] args) {

        KafkaProducer<Integer, String> producer = initProducer();

        KafkaConsumer<Integer, String> consumer = initConsumer();


        //1订阅消费主体
        consumer.subscribe(Arrays.asList("t5"));

        //2初始化事务
        producer.initTransactions();

        while (true){

            //开启kafka事务环境
            producer.beginTransaction();
            try {
                //消费者同步kafka集群的最新的消息数据，同时设置时间间隔
                ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofSeconds(5));
                Map<TopicPartition, OffsetAndMetadata> map = new HashMap<>(20);
                for (ConsumerRecord<Integer, String> record : records) {
                    System.out.println(record.key()+"---->"+record.value());

                    //偏移量+1代表每次提交的，都是当前偏移量的+1的值
                    map.put(new TopicPartition(record.topic(),record.partition()),new OffsetAndMetadata(record.offset()+1));

                    //3消费什么内容，就发布什么内容
                    producer.send(new ProducerRecord<Integer, String>("t6",record.partition(),record.value()));

                }

                //4将事务内消费的偏移量进行提交
                producer.sendOffsetsToTransaction(map,"g1");
                //5提交kafka事务
                producer.commitTransaction();
            } catch (ProducerFencedException e) {
                e.printStackTrace();

                //回滚事务
                producer.abortTransaction();
            }

        }

    }

    /**
     * 初始化生产者方法
     * @return
     */
    public static KafkaProducer<Integer,String> initProducer(){

        //创建配置对象
        Properties properties = new Properties();

        //配置与kafka集群连接的参数
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092,node2:9092,node3:9092");

        //配置key序列化器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);

        //配置value的序列化器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

        //配置批量发送数据的大小的限制 1kb=1024字节
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG,2048);

        //配置批量发送数据的时间限制
        properties.put(ProducerConfig.LINGER_MS_CONFIG,1000);

        //配置幂等性支持
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);

        //配置ack机制的策略
        properties.put(ProducerConfig.ACKS_CONFIG,"all");

        //配置重试的次数
        properties.put(ProducerConfig.RETRIES_CONFIG,3);

        //配置请求超时时间
        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,3000);

        //配置事务的唯一ID值
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, UUID.randomUUID().toString());

        //配置事务的超时时间
        properties.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,3000);

        return new KafkaProducer<Integer, String>(properties);

    }


    /**
     * 初始化消费者方法
     * @return
     */
    public static KafkaConsumer<Integer,String> initConsumer(){

        //创建配置对象
        Properties properties = new Properties();

        //配置与kafka集群连接的参数
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092,node2:9092,node3:9092");

        //配置key反序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);

        //配置value的反序列化器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        //配置消费者组的唯一标识
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"g1");

        //配置禁用自动提交offerset
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);

        //配置事务的隔离级别
        properties.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read_committed");

        return new KafkaConsumer<Integer, String>(properties);






    }

}
