package com.shux.kafka.consumer;

import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

/**
 * 描述：不断提交已经消费网的offset给kafka或者zookeeper
 * 
 * <pre>HISTORY
 * ****************************************************************************
 *  ID   DATE           PERSON          REASON
 *  1    2017年9月22日      Simba.Hua         Create
 * ****************************************************************************
 * </pre>
 * @author Simba.Hua
 */
public class CommitOffsetThread<T> extends RepeatThread {
	private ShuxKafkaConsumer<T> kafkaConsumser;
	private Map<Integer,OffsetStatusHolder> finishOffsets;

	public CommitOffsetThread(Integer sleepSecords) {
		super(sleepSecords);
	}
	
	public CommitOffsetThread(Integer sleepSecords,ShuxKafkaConsumer<T> kafkaConsumser,Map<Integer,OffsetStatusHolder> finishOffsets) {
		super(sleepSecords);
		this.kafkaConsumser = kafkaConsumser;
		this.finishOffsets = finishOffsets;
	}

	@Override
	protected void repeatWork() throws InterruptedException {
		if(finishOffsets == null) return;
		finishOffsets.forEach((partition,offsetStatusHolder) -> {
			Long currentOffset = offsetStatusHolder.getCurrentOffset();
			if (currentOffset != -1) {
				long nextOffset = currentOffset + 1L;
				Map<TopicPartition, OffsetAndMetadata> commitMap = new HashMap<>();
				commitMap.put(new TopicPartition(kafkaConsumser.getConsumerConfig().getTopic(),partition), new OffsetAndMetadata(nextOffset,null));
				kafkaConsumser.consumer.commitSync(commitMap);
			}
		});
		
	}

}
