package com.gdlt.mq2db;

import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * Created by CM on 2017/3/14.
 *
 */
public class KafkaReader {
	KafkaConsumer<String, String> consumer;
	long offset;
	private static AtomicBoolean  flag = new AtomicBoolean(true);
	//boolean closeFlag = false;
	TopicPartition partition; 
	List<ConsumerRecord<String, String>> list = new ArrayList<ConsumerRecord<String, String>>();
	/**
	 * init consumer
	 * @param partitionId
	 * @param offset
	 */
	public KafkaReader(int partitionId, long offset) {
		Configuration conf = Kafka2pgConfig.getConf();
		Properties props = new Properties();
		// 通过调用 get 方法来获取配置项转化为 String 类型后的值
		String topic = conf.get(Kafka2pgConfig.TOPIC_CONF_NAME, Kafka2pgConfig.TOPIC_DEFAULT_VALUE);
		String serValue = conf.get(Kafka2pgConfig.SERVERS_CONF_NAME, Kafka2pgConfig.SERVERS_DEFAULT_VALUE);
		String groupID = conf.get(Kafka2pgConfig.GROUP_ID_CONF_NAME, Kafka2pgConfig.GROUP_ID_DEFAULT_VALUE);
		String keyDeser = conf.get(Kafka2pgConfig.KEY_DESER_CONF_NAME);
		String valueDeser = conf.get(Kafka2pgConfig.VALUE_DESER_CONF_NAME);

		props.put("bootstrap.servers", serValue);
		props.put("group.id", groupID);
		props.put("enable.auto.commit", "false");
		props.put("auto.commit.interval.ms", "1000");
		props.put("key.deserializer", keyDeser);
		props.put("value.deserializer", valueDeser);
		props.put("auto.offset.reset", "earliest");

		consumer = new KafkaConsumer<String, String>(props);
		System.out.println("in 1");

		// Manually assign just one partition to this consumer.
		partition = new TopicPartition(topic, partitionId);
		ArrayList<TopicPartition> list = new ArrayList<TopicPartition>();
		list.add(partition);
		consumer.assign(list);

		// Overrides the fetch offsets that the consumer will use on the next
		// poll(timeout).

		if (offset == -1) { // 数据库无相应数据时，去最开始的寻找
			consumer.seekToBeginning(list);
		} else {
			consumer.seek(partition, offset);
		}

	}

	public List<ConsumerRecord<String, String>> getData() {

		// poll:Fetch data for the topics or partitions
		// update offset

	//	List<ConsumerRecord<String, String>> list = new ArrayList<ConsumerRecord<String, String>>();

		ConsumerRecords<String, String> poll = consumer.poll(1000);// 拉取超时毫秒数
		for (ConsumerRecord<String, String> kv : poll) {
			//System.out.println("offset:" + kv.offset() + ",key:" + kv.key() + ",value:" + kv.value());
			list.add(kv);
		}
       	
		return list;
	}

	public long getOffset() {
		// offset = list.get(list.size()-1).offset();//取这批数据段的最后一条
		 offset = consumer.position(partition);//是指这里？
		return offset;
	}
	
	public boolean isClosed(){
		return flag.get();
	}
	

	public void close() {
		
		if(flag.compareAndSet(true, false)){
			consumer.close();
			}
		//flag = false;
		
		
	}

}
