package job;

import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import play.jobs.Every;
import play.jobs.Job;
import utils.DateUtils;
import utils.PropertyUtil;
import utils.TopicUtil;
import utils.SystemParam.Topic;
import bp.NetSaleBP;

/**
 * 
 * <b>类描述：废票消费者</b><br/>
 * <b>类名称：</b>UnlockConsumer<br/>
 * <b>创建人：</b>张兵</a><br/>
 * <b>关键修改：</b><br/>
 * <b>修改时间：</b><br/>
 * <b>修改人：</b><br/>
 * 
 */
@SuppressWarnings("rawtypes")
@Every("3s")
public class CancelConsumer extends Job {
	private final ConsumerConnector consumer;
	private final String topic;

	public CancelConsumer() {

		System.out.println("before create consumer");
		consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig());// 创建到zookeeper的连接
		System.out.println("after create consumer");
		this.topic = TopicUtil.getTopic("Cancel");
	}

	private ConsumerConfig createConsumerConfig() {
		Properties props = new Properties();
		props.put("zookeeper.connect", KafkaProperties.zkConnect);// 要连接的zookeeper地址
		props.put("group.id", KafkaProperties.groupId);// 消费者所在的组
//		props.put("consumer.id", KafkaProperties.consumerId);// 此消费者的唯一标识用于在zookeeper中存储此消费者的偏移量 
		//props.put("consumer.id", "LockConsumer");// 此消费者的唯一标识用于在zookeeper中存储此消费者的偏移量 
		props.put("zookeeper.session.timeout.ms", KafkaProperties.zSessionTimeout);// zookeeper连接超时时间  
		// props.put("zookeeper.sync.time.ms", "2000");
		props.put("auto.offset.reset", "smallest");
		props.put("auto.commit.enable", "false");

		return new ConsumerConfig(props);
	}

	@Override
	public void doJob() throws Exception {
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		topicCountMap.put(topic, new Integer(1));// topic:要取的主题，Integer:创建的数据流个数
		System.out.println("before create message stream");
		Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, new StringDecoder(null), new StringDecoder(null));// 获取创建的流
		System.out.println("after create message stream");
		List<KafkaStream<String, String>> list = consumerMap.get(topic);// 取得某一个消息的流
		System.out.println("after get topic");
		KafkaStream<String, String> kafkaStream = list.get(0);// 刚才创建流是传入的流个数是1，所以通过get(0)获取
		System.out.println("after get kafkaStream");
		ConsumerIterator<String, String> it = kafkaStream.iterator();// 遍历消息，此函数会一直阻塞
		MessageAndMetadata<String, String> data = null;
		while (it.hasNext()) {
			data = it.next();
			// 此处做对消息的处理
			boolean flag = false;
			try {
				flag = new NetSaleBP().cancelticket(data.message());
			} catch (Exception e) {
				flag = false;
			}
			if (flag == false) {
				ErrorMessageConsumer.setOnline("CACL");
				ErrorMessageConsumer.send(TopicUtil.getTopic("ErrorConsumer"), "CACL#0#" + DateUtils.formatDatetime(new Date()) + "#" + data.message());
			}
			consumer.commitOffsets();// 提交此消费者的偏移量
			// it.allDone();//调用此函数后在调用
			// consumer.shutdown线程会退出。（如若不调用此函数调用consumer.shutdown线程不会立即退出也许是处理完缓存的消息后才会退出，需要测试）
		}

		super.doJob();
	}

	
}
