package com.gdlt.mq2db;

import org.apache.hadoop.conf.Configuration;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import java.util.ArrayList;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * Created by CM on 2017/3/14.
 *
 */
public class KafkaReader implements Runnable {
	 private final static Logger LOG = LoggerFactory.getLogger(KafkaReader.class);
	 private BlockingQueue<ConsumerRecord<String, String>> queue; //内存缓冲区
	KafkaConsumer<String, String> consumer;
	 long offset,timeout;
	private static AtomicBoolean flag = new AtomicBoolean(true);
	// boolean closeFlag = false;
	TopicPartition partition;
	

	/**
	 * init consumer
	 * 
	 * @param partitionId
	 * @param offset
	 */
	
	public KafkaReader(int partitionId, long offset,BlockingQueue<ConsumerRecord<String, String>> queue) {
		this.queue =  queue;
		Configuration conf = Kafka2pgConfig.getConf();
		Properties props = new Properties();
		// 通过调用 get 方法来获取配置项转化为 String 类型后的值
		String topic = conf.get(Kafka2pgConfig.TOPIC_CONF_NAME, Kafka2pgConfig.TOPIC_DEFAULT_VALUE);
		String serValue = conf.get(Kafka2pgConfig.SERVERS_CONF_NAME, Kafka2pgConfig.SERVERS_DEFAULT_VALUE);
		String groupID = conf.get(Kafka2pgConfig.GROUP_ID_CONF_NAME, Kafka2pgConfig.GROUP_ID_DEFAULT_VALUE);
		String keyDeser = conf.get(Kafka2pgConfig.KEY_DESER_CONF_NAME);
		String valueDeser = conf.get(Kafka2pgConfig.VALUE_DESER_CONF_NAME);
		timeout = conf.getLong(Kafka2pgConfig.TIMEOUT_CONF_NAME, Kafka2pgConfig.TIMEOUT_DEFAULT_VALUE);
		props.put("bootstrap.servers", serValue);
		props.put("group.id", groupID);
		props.put("enable.auto.commit", "false");
		props.put("key.deserializer", keyDeser);
		props.put("value.deserializer", valueDeser);
		//props.put("auto.offset.reset", "earliest");
		props.put("max.poll.records", 8000);

		consumer = new KafkaConsumer<String, String>(props);

		// Manually assign just one partition to this consumer.
		partition = new TopicPartition(topic, partitionId);
		ArrayList<TopicPartition> list = new ArrayList<TopicPartition>();
		list.add(partition);
		consumer.assign(list);

		// Overrides the fetch offsets that the consumer will use on the next poll(timeout).

		if (offset == -1) { // 数据库无相应数据时，去最开始的寻找			
			consumer.seekToBeginning(list);
			//consumer.seekToEnd(list);//从最新的开始寻找
		} else {
			consumer.seek(partition, offset);
		}

	}

	public  void getData() {
		
	//List<ConsumerRecord<String, String>> list =  new ArrayList<ConsumerRecord<String, String>>();
		ConsumerRecords<String, String> poll = consumer.poll(timeout);// 拉取超时毫秒数,仅仅只是说等待消息返回的时间
		
		for (ConsumerRecord<String, String> kv : poll) {
		// System.out.println("offset:" + kv.offset() + ",key:" + kv.key() + ",value:" + kv.value());
		//list.add(kv);	
				try {
					queue.put(kv);
				} catch (InterruptedException e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			
		}
		//return list;
	}

	public long getOffset() {
		// offset = list.get(list.size()-1).offset();//取这批数据段的最后一条
		offset = consumer.position(partition);
		return offset;
	}

	public boolean isClosed() {
		return flag.get();
	}

	public void close() {

		if (flag.compareAndSet(true, false)) {
			consumer.close();
		}

	}

	public void run() {
		System.out.println("Kafka producer id ="+Thread.currentThread().getId());
		
			while(true){
				//	System.out.println("正在获得数据-----");
				  getData();
				 
				 
				
			}
	
	}
	
}
