package com.ibm.cps.storm;

import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.base.Throwables;
import com.ibm.cps.kafka.KafkaTopicConstructor;
import com.ibm.cps.kafka.KafkaTopicConsumeTemplate;
import com.ibm.cps.kafka.listener.DeleteProcessorListener;
import com.ibm.cps.message.MessageFactory;
import com.ibm.cps.message.MetadataTopologySort;
import com.ibm.cps.newmessage.AbstractMetadata;
import com.ibm.cps.newmessage.BasicMetadata;
import com.ibm.cps.newmessage.MetadataFactory;
import com.ibm.cps.newmessage.OptimizedMetadata;
import com.ibm.util.TopologyMessageFieldIds;
import com.ibm.util.TopologyStreamIds;
import com.ibm.util.exception.CPSException;

public class ProcessorSpout extends BaseRichSpout {

	/**
	 * 
	 */
	private static final long serialVersionUID = 6867427239389924285L;
	private SpoutOutputCollector collector;
	private KafkaTopicConsumeTemplate processorConsumer;
	private String tenantid;
	private static Logger logger;
	private KafkaTopicConstructorForMultiTenants kafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants();

	/**
	 * Initialize PublishSpout
	 */
	@SuppressWarnings("rawtypes")
	public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
		logger = LoggerFactory.getLogger(this.getClass());
		this.collector = collector;
		String zookeeper = (String) conf.get("zookeeper");
		tenantid = (String) conf.get("tenantid");
		String metadataTopic = kafkaTopicConstructorForMultiTenants.getMetadataAddTopic(tenantid);

		processorConsumer = new KafkaTopicConsumeTemplate(zookeeper, metadataTopic) {

			@Override
			public void process(byte[] message) throws CPSException {
				sendMessage(new String(message));
			}
		};

		ExecutorService service = Executors.newFixedThreadPool(1);
		String deletedMetadataTopic = kafkaTopicConstructorForMultiTenants.getMetadataDeleteTopic(tenantid);

		service.execute(new DeleteProcessorListener(zookeeper, deletedMetadataTopic, collector));
		loadingExistedProcessors();
	}

	private void loadingExistedProcessors() {
		try {
			Collection<String> messages = MessageFactory.loadExistedMessage(tenantid);
			List<ObjectNode> outputs = MetadataTopologySort.getSortedMetadatas(messages);
			if (outputs != null) {
				for (ObjectNode metadata : outputs) {
					logger.info("Load processor " + metadata.toString());
				}
			}
			if (outputs != null) {
				for (ObjectNode metadata : outputs) {
					sendMessage(metadata.toString());
				}
			}
		} catch (CPSException e) {
			logger.error(Throwables.getStackTraceAsString(e));
		}
	}

	private void sendMessage(String metadata) throws CPSException {
		AbstractMetadata message = MetadataFactory.parseJsonMetadata(metadata);
		if (message instanceof BasicMetadata || message instanceof OptimizedMetadata) {
			collector.emit(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Values(message));
		} else { // TODO JointMetadata
			collector.emit(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Values(message));
		}
	}

	public void nextTuple() {
		try {
			processorConsumer.consumeMessage();
		} catch (CPSException e) {
			logger.error(Throwables.getStackTraceAsString(e));
		}
	}

	/**
	 * Ouput <type, message>
	 */
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declareStream(TopologyStreamIds.PROCESSOR_SPOUT_STREAM, new Fields(TopologyMessageFieldIds.MESSAGE));
		declarer.declareStream(TopologyStreamIds.PROCESSOR_DELETE_SPOUT_STREAM,
				new Fields(TopologyMessageFieldIds.MESSAGE));
	}

}
