package job;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import job.Producer.Topic;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import play.jobs.Every;
import play.jobs.Job;
import play.jobs.On;
import play.jobs.OnApplicationStart;
import service.impl.FullReturnSchedulePriceMessageQueue;
import service.impl.SchedulePriceMessageQueue;
import utils.PropertyUtil;
import utils.SystemParam;
import utils.TopicUtil;

//@OnApplicationStart(async=true)
@Every(value = "5s")  
public class FullReturnSchedulePriceConsumer extends Job {
	private final ConsumerConnector consumer;
	private final String topic;
	private final FullReturnSchedulePriceMessageQueue schedulequeue = FullReturnSchedulePriceMessageQueue.getInstance();

	public FullReturnSchedulePriceConsumer() {
		
		//Producer producerThread = new Producer(Producer.Topic.Station);
		//producerThread.start();
		System.out.println("before create consumer");
		consumer = kafka.consumer.Consumer
				.createJavaConsumerConnector(createConsumerConfig());//创建到zookeeper的连接
		System.out.println("after create consumer");
		this.topic = TopicUtil.getTopic(Topic.FullReturnSchedules);
		schedulequeue.setConsumer(consumer);
	}

	private ConsumerConfig createConsumerConfig() {
		Properties props = new Properties();
		props.put("zookeeper.connect", KafkaProperties.zkConnect);//要连接的zookeeper地址
		props.put("group.id", KafkaProperties.groupId);//消费者所在的组
//		props.put("conusmer.id", KafkaProperties.consumerId);//此消费者的唯一标识用于在zookeeper中存储此消费者的偏移量
		//props.put("conusmer.id", "SchedulePriceConsumer");//此消费者的唯一标识用于在zookeeper中存储此消费者的偏移量
		props.put("zookeeper.session.timeout.ms", KafkaProperties.zSessionTimeout);
		// props.put("zookeeper.sync.time.ms", "2000");
		// props.put("auto.commit.interval.ms", "1000");
		props.put("auto.commit.enable", "false");
		props.put("auto.offset.reset", "smallest");

		return new ConsumerConfig(props);
	}
	
	@Override
	public void doJob() throws Exception {
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		topicCountMap.put(topic, new Integer(1));//topic:要取的主题, Integer 创建的数据流个数
		System.out.println("before create message stream");
		Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
				.createMessageStreams(topicCountMap,new StringDecoder(null),new StringDecoder(null));//获取创建的流
		System.out.println("after create message stream");
		List<KafkaStream<String, String>> list = consumerMap.get(topic);//取得某一个消息的流
		System.out.println("after get topic:"+topic);
		KafkaStream<String, String> kafkaStream = list.get(0);//刚才创建流是传入的流个数是1，所以通过get(0)获取
		System.out.println("after get kafkaStream");
		ConsumerIterator<String, String> it = kafkaStream.iterator();//遍历消息，此函数会一直阻塞
		MessageAndMetadata<String,String> data = null;
		
		ConsumerTimer.getInstanse().monitorSynprocess(Topic.FullReturnSchedules);//时间监控消息队列
		while (it.hasNext()) {
			data = it.next();
			schedulequeue.addMessage(data.message());
			//此处做对消息的处理
//			consumer.commitOffsets();//提交此消费者的偏移量
//			it.allDone();//调用此函数后在调用 consumer.shutdown线程会退出。（如若不调用此函数调用consumer.shutdown线程不会立即退出也许是处理完缓存的消息后才会退出，需要测试）
//			consumer.shutdown();
		}
		
		/*while (it.hasNext()) {
			data = it.next();
			System.out.println("cusumer offset:"+data.offset()+":message"+data.message());
			//此处做对消息的处理
			consumer.commitOffsets();//提交此消费者的偏移量
//			it.allDone();//调用此函数后在调用 consumer.shutdown线程会退出。（如若不调用此函数调用consumer.shutdown线程不会立即退出也许是处理完缓存的消息后才会退出，需要测试）
//			consumer.shutdown();
		}*/

		super.doJob();
	}

	
}
