package com.bonc.wkafka.utils.consumer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.Deserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.bonc.wkafka.utils.KafkaProps.FirstPollOffsetStrategy;
import com.bonc.wkafka.utils.KafkaProps.ProcessingGuarantee;
import com.bonc.wkafka.utils.consumer.handler.BadRecordsHandler;
import com.bonc.wkafka.utils.consumer.handler.DefaultBadRecordsHandler;
import com.bonc.wkafka.utils.consumer.subscription.Subscription;
import com.bonc.wkafka.utils.consumer.translator.KafkaRecordTranslator;
import com.bonc.wkafka.utils.consumer.translator.KafkaRecordsTranslator;
import com.bonc.wkafka.utils.timer.Timer;

/**
 * kafka consumer</br>
 * 封装了几个功能：</br>
 * 同时支持standalone和group的消费模式</br>
 * 同时支持AMS/ALS/EOS三种语义</br>
 * @author WYB
 *
 */
public class WKafkaConsumer<K, V> {
	
	private static final Logger logger = LoggerFactory.getLogger(WKafkaConsumer.class);
	
	private KafkaConsumer<K, V> consumer;

	private Subscription subscription;

	private ProcessingGuarantee processingGuarantee;

	private final String consumerId;

	private FirstPollOffsetStrategy firstPollOffsetStrategy;

	private final long pollIntervalMs;

	private KafkaRecordTranslator recordTranslator;
	
	private KafkaRecordsTranslator recordsTranslator;

	private KafkaConsumerProps props;

	private long partitionRefreshPeriodMs;

	private Timer refreshSubscriptionTimer;
	
	private BadRecordsHandler<K, V> handler = new DefaultBadRecordsHandler<K, V>();

	private boolean batchConsume;


	public WKafkaConsumer(KafkaConsumerProps props) {
		this.props = props;
		this.subscription = props.getSubscription();
		this.processingGuarantee = props.getProcessingGuarantee();
		this.consumerId = props.getConsumerId();
		this.firstPollOffsetStrategy = props.getFirstPollOffsetStrategy();
		this.pollIntervalMs = props.getPollIntervalMs();
		this.recordTranslator = props.getRecordTranslator();
		this.partitionRefreshPeriodMs = props.getPartitionRefreshPeriodMs();
		this.batchConsume = props.isBatchConsume();
		this.recordsTranslator = props.getRecordsTranslator();
		if (props.getHandler()!=null) {
			this.handler = props.getHandler();
		}
		this.refreshSubscriptionTimer = new Timer(partitionRefreshPeriodMs, partitionRefreshPeriodMs, TimeUnit.MILLISECONDS);
		consumer = new KafkaConsumer<K, V>(props.getProps(), this.props.getKeyDeserializer(), this.props.getValueDeserializer());
	}
	
	public void subscribe() {
		subscription.subscribe(consumer, new DefaultConsumerRebalanceListener());
	}
	
	public void consume() {
		while(true) {
			consumeOnce();
		}
	}
	
	public void consumeOnce() {
		// 只有在standalone的消费模式下，手动周期性调用rebalance
		if (subscription.needRefresh() && this.refreshSubscriptionTimer.isExpiredResetOnTrue()) {
			subscription.refreshAssignment();
		}
		ConsumerRecords<K, V> records = consumer.poll(Duration.ofMillis(pollIntervalMs));
		if (processingGuarantee==ProcessingGuarantee.AT_MOST_ONCE) {
			consumer.commitAsync(getCommitOffsets(), null);
			if (this.batchConsume) {
				recordsTranslator.handle(records);
			} else {
				Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
				while(iterator.hasNext()) {
					ConsumerRecord<K, V> record = iterator.next();
					recordTranslator.handle(record);
				}
			}
		}
		if (processingGuarantee==ProcessingGuarantee.AT_LEAST_ONCE) {
			aloCommit(records);
		}
	}
	
	private void aloCommit(ConsumerRecords<K, V> records) {
		int retry = handler.getRetryTimes();
		if (this.batchConsume) {
			while(retry > 0) {
				if (recordsTranslator.handle(records)) {
					consumer.commitAsync(getCommitOffsets(), null);
					return;
				}
				retry--;
			}
			handler.handle(records.iterator());
		} else {
			List<ConsumerRecord<K, V>> badRecords = new ArrayList<ConsumerRecord<K,V>>();
			Iterator<ConsumerRecord<K, V>> iterator = records.iterator();
			OUT:while(iterator.hasNext()) {
				ConsumerRecord<K, V> record = iterator.next();
				while(retry > 0) {
					if (recordTranslator.handle(record)) {
						Map<TopicPartition, OffsetAndMetadata> map = new HashMap<TopicPartition, OffsetAndMetadata>();
						map.put(new TopicPartition(record.topic(), record.partition()), 
								new OffsetAndMetadata(record.offset()+1, this.consumerId));
						consumer.commitAsync(map , null);
						continue OUT;
					}
					retry--;
				}
				badRecords.add(record);
			}
			if (badRecords.size()!=0) {
				handler.handle(badRecords.iterator());
			}
		}
	}

	/**
	 * 获得提交的分区，以及要提交的offset
	 * @return
	 */
	private Map<TopicPartition, OffsetAndMetadata> getCommitOffsets() {
		Set<TopicPartition> assignment = consumer.assignment();
		return getCommitOffsets(assignment);
	}
	
	/**
	 * 获得提交的分区，以及要提交的offset
	 * @return
	 */
	private Map<TopicPartition, OffsetAndMetadata> getCommitOffsets(Collection<TopicPartition> partitions) {
		Map<TopicPartition, OffsetAndMetadata> map = new HashMap<TopicPartition, OffsetAndMetadata>();
		// 要在offset元数据上添加的consumerId，为了区分是否有别的应用提交offset
		for (TopicPartition tp : partitions) {
			long offset = consumer.position(tp);
			map.put(tp, new OffsetAndMetadata(offset, consumerId));
		}	
		return map;
	}
	
	/**
	 * 
	 * @author WYB
	 *
	 */
	private class DefaultConsumerRebalanceListener implements ConsumerRebalanceListener {

        private Collection<TopicPartition> previousAssignment = new HashSet<>();

        /* (non-Javadoc)
         * @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked(java.util.Collection)
         * 将rebalance期间当前consumer由于阻塞未提交的offset全部提交
         */
        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
        	// rebalance之前，该consumer消费的partitions
            previousAssignment = partitions;


            if (processingGuarantee==ProcessingGuarantee.AT_LEAST_ONCE) {
            	Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<TopicPartition, OffsetAndMetadata>();
            	System.out.println("Partitions Revoked");
				Iterator<TopicPartition> iterator = partitions.iterator();
				while(iterator.hasNext()) {
					TopicPartition tp = iterator.next();
					// 该consumer准备fetch的下一个offset，注意不是committed的offset
					long offset = consumer.position(tp);
					System.out.println("tp: "+tp.topic()+"-"+tp.partition()+", offset: "+offset);
					offsets.put(tp, new OffsetAndMetadata(offset));
				}
				// 将未提交的offset全部提交
				// 提交的前提是poll之前的全部操作都已经完成，
				// 如果想确保严格意义上的安全，可以不提交，rebalance之后会从上一次提交的位置消费，让所有未提交的操作重新执行一遍
				consumer.commitAsync(offsets, null);
            }
        }

        /* (non-Javadoc)
         * @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned(java.util.Collection)
         * 将rebalance之后分配给consumer的partitions都seek到正确的offset上，便于下一次poll
         */
        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
        	System.out.println("Partitions Assigned");
        	// rebalance之后，分配给consumer的partitions
        	Iterator<TopicPartition> iterator = partitions.iterator();
        	while(iterator.hasNext()) {
        		TopicPartition tp = iterator.next();
        		// 获得这个tp上一次commit的位置（整个group的，不一定是这个consumer提交的）
        		OffsetAndMetadata committed = consumer.committed(tp);
        		System.out.println("tp: "+tp.topic()+"-"+tp.partition()+", offset: "+committed.offset());
        		// seek到committed的位置开始下一次的消费
				long fetchOffset = doSeek(tp, committed);
        	}

        }

        /**
         * Sets the cursor to the location dictated by the first poll strategy and returns the fetch offset.
         */
        private long doSeek(TopicPartition newTp, OffsetAndMetadata committedOffset) {

            if (committedOffset != null) {
            	if (consumerId.equals(committedOffset.metadata())) {
            		consumer.seek(newTp, committedOffset.offset());
                } else {
                	// 如果不是一个应用提交的offset，则本应用提交的offset可能被覆盖，只能使用FirstPollOffsetStrategy来决定下一次消费的位置
                    if (firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.EARLIEST)) {
                        consumer.seekToBeginning(Collections.singleton(newTp));
                    } else if (firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.LATEST)) {
                        consumer.seekToEnd(Collections.singleton(newTp));
                    } else {
                        // Resume polling at the last committed offset, i.e. the first offset that is not marked as processed.
                        consumer.seek(newTp, committedOffset.offset());
                    }
                }
            } else {
                // no offset commits have ever been done for this consumer group and topic-partition,
                // so start at the beginning or end depending on FirstPollOffsetStrategy
                if (firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.EARLIEST) || firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.UNCOMMITTED_EARLIEST)) {
                    consumer.seekToBeginning(Collections.singleton(newTp));
                } else if (firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.LATEST) || firstPollOffsetStrategy.equals(FirstPollOffsetStrategy.UNCOMMITTED_LATEST)) {
                    consumer.seekToEnd(Collections.singleton(newTp));
                }
            }
            return consumer.position(newTp);
        }
    }

	public void seek(TopicPartition tp, OffsetAndMetadata offset) {
		consumer.seek(tp, offset);
	}

	public void close() {
		if (consumer!=null) {
			consumer.close();
		}
	}
}
