package com.ex.kafka.consumer;

import com.alibaba.fastjson.JSONObject;
import com.ex.kafka.pojo.User;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Component;

import java.time.Duration;
import java.util.*;
import java.util.stream.Collectors;

/**
 * @Author longdeng
 * @Date 2020/3/29 0029 10:05
 */
@Component
@Slf4j
public class MyKafkaConsumer {

    @Value(value = "zhouxl.kafka.bootstrap.servers")
    private String bootstrapServers;

    @Value(value = "zhouxl.kafka.group.id")
    private String groupId;

    @Value(value = "zhouxl.kafka.key.deserializer")
    private String keDescrialzer;

    @Value(value = "zhouxl.kafka.value.deserializer")
    private String valueDeserializer;

    @Value("zhouxl.kafka.topic")
    private String topic;

    private boolean autoCommitOffset = false;

    private List<PartitionInfo> partitionInfoList = new ArrayList<>();

    private List<TopicPartition> TopicPartitionList = new ArrayList<>();

    private org.apache.kafka.clients.consumer.KafkaConsumer<String, String> consumer;

    @Value("zhouxl.kafka.user.max.size")
    private static int userMaxSize;

    private static List<User> userList = new ArrayList<>(userMaxSize);

    // kafka数据偏移量
    private long offset = 0;


    public void initKafkaConsumer() {
        Properties props = new Properties();
        props.put("bootstrap.servers", bootstrapServers);
        // group.id，指定了消费者所属群组
        props.put("group.id", groupId);
        props.put("key.deserializer", keDescrialzer);
        props.put("value.deserializer", valueDeserializer);
        props.put("auto.commit.offset", autoCommitOffset);
        consumer = new org.apache.kafka.clients.consumer.KafkaConsumer<String, String>(props);
        consumer.subscribe(Collections.singletonList(topic));
        partitionInfoList = consumer.partitionsFor(topic);
        TopicPartitionList = partitionInfoList.stream().map(x -> {
            int partion = x.partition();
            String topic = x.topic();
            TopicPartition topicPartition = new TopicPartition(topic, partion);
            return topicPartition;
        }).collect(Collectors.toList());
    }

    @Async
    public void onMessage() {
        initKafkaConsumer();
        while (true) {
            if (userList.size() < userMaxSize) {    // 设置一个标准值，用于标识阈值
                consumer.resume(TopicPartitionList);
            }
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
            for (ConsumerRecord<String, String> record : records) {
                Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
                currentOffsets.put( // 偏移量的记录
                        new TopicPartition(record.topic(), record.partition()),
                        new OffsetAndMetadata(record.offset() + 1, "no metadata")
                );
                if (userList.size() < userMaxSize) {
                    String userStr = record.value();
                    try {
                        User user = (User) JSONObject.toJSON(userStr);
                        userList.add(user);
                    } catch (Exception e) {
                        log.info("{}->{}->{}", "MyKafkaConsumer", "onMessage", "JSON解析对象失败");
                    }
                    break;
                } else {
                    /*
                    提交偏移量
                     */
                    consumer.commitAsync(currentOffsets, null);
                    consumer.paused();
                }
            }
        }
    }

}
