package com.yukefms.message;

import com.yukefms.common.Timer;
import com.yukefms.engine.EngineConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.stereotype.Component;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;

/**
 * @Description
 * @Author Ping
 * @Date 2021/6/25 11:30
 **/

public class MessageConsumer implements Runnable {
    public static Logger logger = LogManager.getLogger(MessageConsumer.class);

    KafkaConsumer<String, String> consumer ;
    /**
     * for reading waiting time.
     */
    Timer timer ;
    /**
     * the cache for caching all read messages.
     */

    private MessageContainer<String> container ;
    /**
     * the flag to control switch on/off the consumer.
     */
    private boolean continue_flag = true ;

    public MessageConsumer(MessageContainer<String> container ) {

        consumer = new KafkaConsumer<String, String>(KafkaConfig.props)  ;
        consumer.subscribe(Arrays.asList(KafkaConfig.TOPIC_TEST));
        this.container = container ;
        this.timer = new Timer(System.currentTimeMillis(), EngineConfig.MAX_WAITING_MS) ;
    }

    @Override
    public void run() {
            resetOffset() ;
            logger.error("MessageConsumer started to run and offset was reset.") ;
            while (continue_flag) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(0));
                if (records != null && records.count() != 0) {
                    timer.update();
                    for (ConsumerRecord<String, String> record : records) {
                       //System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                        //System.out.println("======"+record.value()) ;
                        this.container.add(record.value());
                    }
                } else {
                    if (timer.isExpired()) {
                        logger.error("The message consumer has been STOPED, since  DOES NOT read any record for Maximal_Waiting_times") ;
                        stop() ;
                    }
                }

            }
            logger.error("The message consumer was STOP successfully.") ;
    }
    public void stop() {
        this.continue_flag = false ;
    }

    public boolean isEnded() {
        return this.continue_flag == false ;
    }

    /**
     * reset offset for each partition to the last.
     */
    private void resetOffset() {
        consumer.poll(0) ;
        Collection<TopicPartition> topicPartitions = getTopicPartitions(consumer.partitionsFor(KafkaConfig.TOPIC_TEST) );
        consumer.seekToEnd(topicPartitions);
    }


    /**
     * List<PartitionInfo> TO  Collection<TopicPartition>
     * @param partitionInfos :List<PartitionInfo>
     * @return :Collection<TopicPartition>
     */
    private static Collection<TopicPartition> getTopicPartitions(List<PartitionInfo> partitionInfos) {
        ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
        for (PartitionInfo partitionInfo : partitionInfos) {
            TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
            topicPartitions.add(topicPartition);
        }
        return topicPartitions;
    }


}
