package com.mutouren.modules.datapush;

import javax.annotation.Resource;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import com.mutouren.common.log.LogAlias;
import com.mutouren.common.log.LogManager;
import com.mutouren.common.log.Logger;
import com.mutouren.common.mq.kafka.NewConsumer;
import com.mutouren.common.mq.kafka.Producer;
import com.mutouren.common.mq.kafka.NewConsumer.NewConsumerCallback;
import com.mutouren.common.utils.JsonUtils;
import com.mutouren.modules.datapush.base.DataPushLogAlias;
import com.mutouren.modules.datapush.base.PushMessage;

/**
 * 监听者
 *
 * note:
 * 1. 负责监听推送数据
 */
@Service
public class Listener implements InitializingBean, DisposableBean {
	
	@Value("${datapush.kafka.bootstrapServers}")
	private String bootstrapServers;
	
	private final static String groupId = "group_datapush";
	private final static String topic = "mtr-datapush";
	private final static boolean isSmallestOffset = true;
	
	private NewConsumer<Integer, byte[]> consumer;
	private Producer<Integer, byte[]> kafkaProducer;
	
	private final static Logger errorLogger = LogManager.getLogger(LogAlias.SystemError.name());
	private final static Logger runLogger = LogManager.getLogger(LogAlias.SystemRun.name());
	private final static Logger ReceiveLogger = LogManager.getLogger(DataPushLogAlias.DataPushReceive.name());
	
	@Resource
	private ServerProxy serverProxy;	

	@Override
	public void afterPropertiesSet() throws Exception {
		
		consumer = new NewConsumer<Integer, byte[]>(bootstrapServers, groupId, isSmallestOffset,
				Integer.class, byte[].class);
					
		consumer.addListener(topic, new NewConsumerCallback<Integer, byte[]>() {

			@Override
			public void callback(ConsumerRecord<Integer, byte[]> record) {
				String body = "";
				try {
					PushMessage message = PushMessage.decode(record.value());
					body = JsonUtils.beanToJson(message);
					do {
						if(serverProxy.receive(message)) {
							break;
						}
						
						try {
							Thread.sleep(5000);
						} catch (Throwable t) {
							// when system close, need back to queue
							backToQueue(message);
							throw t;
						}
					} while (true);
				} catch(Throwable t) {
					errorLogger.error("Listener callback error", t);
				} finally {
					writeReceiveLog(record, body);	
				}
			}
		});	
	}
	
	private void backToQueue(PushMessage message) {
		try {
			if (kafkaProducer == null) {
				kafkaProducer = new Producer<Integer, byte[]>(bootstrapServers, byte[].class);
			}
			kafkaProducer.send(topic, message.encode());
		} catch (Throwable t) {
			errorLogger.error("Listener backToQueue error: " + message.getGuid(), t);
		}
	}
	
	private static void writeReceiveLog(ConsumerRecord<Integer, byte[]> record, String content) {
		int partition = record.partition();
		long offset = record.offset();
		
		String log = String.format("partition=%d,offset=%s, content=%s", partition, offset, content);
		ReceiveLogger.info(log);
	}		
	
	@Override
	public void destroy() throws Exception {		
		try {
			runLogger.info("Listener close begin");
			if (consumer != null) {
				consumer.close();
			}
			if (kafkaProducer != null) {
				kafkaProducer.close();
			}			
			runLogger.info("Listener close end");
		} catch(Throwable t) {
			errorLogger.error("Listener close error", t);
		}
	}
}
