package com.shux.kafka.consumer;

import java.util.Map;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingDeque;

import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 描述：记录offset的状态，用于判断是否已经消费过，以免重复消费
 * 
 * <pre>HISTORY
 * ****************************************************************************
 *  ID   DATE           PERSON          REASON
 *  1    2017年9月22日      Simba.Hua         Create
 * ****************************************************************************
 * </pre>
 * @author Simba.Hua
 */
public class OffsetStatusHolder {
	private TopicPartition topicPartition;
	//记录当前的消费过的Offset位置
	private BlockingDeque<Long> offsetsQueue = new LinkedBlockingDeque<>();
	//完成发送的
	private Map<Long,Boolean> finishedOffset = new ConcurrentHashMap<>();
	private Logger logger = LoggerFactory.getLogger(getClass());
	public OffsetStatusHolder(){
		
	}
	public OffsetStatusHolder(TopicPartition topicPartition){
		this.topicPartition = topicPartition;
	}
	/**
	 * 判断是否已经消费过
	 * @param offsets
	 * @return
	 */
	public boolean checkIsConsumered(Long offset){
		Long lastOffset = offsetsQueue.peekLast();
		if( lastOffset != null && lastOffset.compareTo(offset) >= 1){//已经消费过了
			logger.warn("current offset is smaller than last offset,maybe the current offset has consumered,"
					+ "lastOffset:{} currentOffset:{} topic:{} partition:{}",
					    lastOffset,offset,topicPartition.topic(),topicPartition.partition());
			return true;
		}
		boolean success = false;
		while( !success ) {
			try {
				offsetsQueue.put(offset);
				success = true;
			} catch (InterruptedException e) {
				logger.error(e.getMessage(),e);
			}
		}
		return false;
	}
	public void complete(Long offset) {
		finishedOffset.put(offset, Boolean.TRUE);
	}
	public Long getCurrentOffset(){
		Long currentoffset = offsetsQueue.getLast();
		if(finishedOffset.containsKey(currentoffset)) {
			return currentoffset;
		}
		return -1L;
	}
	public TopicPartition getTopicPartition() {
		return topicPartition;
	}
	
}
