package com.ibm.cps.storm;

import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import com.ibm.cps.kafka.KafkaTopicConstructorForMultiTenants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;

import com.google.common.base.Throwables;
import com.ibm.cps.kafka.KafkaTopicConstructor;
import com.ibm.cps.kafka.KafkaTopicConsumeTemplate;
import com.ibm.cps.kafka.listener.SpoutDeleteDataSourceListener;
import com.ibm.cps.newmessage.DataSourceMetadataFactory;
import com.ibm.cps.newmessage.SerializableDataSourceMetadata;
import com.ibm.util.TopologyMessageFieldIds;
import com.ibm.util.TopologyStreamIds;
import com.ibm.util.exception.CPSException;

public class DataSourceSpout extends BaseRichSpout {
	private KafkaTopicConstructorForMultiTenants kafkaTopicConstructorForMultiTenants = new KafkaTopicConstructorForMultiTenants();
	private static final long serialVersionUID = 1L;
	private static Logger logger;
	private KafkaTopicConsumeTemplate dataSourceConsumer;
	String tenantid;
	private SpoutOutputCollector collector;

	@SuppressWarnings("rawtypes")
	@Override
	public void open(Map conf, TopologyContext context, final SpoutOutputCollector collector) {
		logger = LoggerFactory.getLogger(DataSourceSpout.class);
		this.collector = collector;
		ExecutorService service = Executors.newFixedThreadPool(2);

		tenantid = (String) conf.get("tenantid");
		String zookeeper = (String) conf.get("zookeeper");
		String topicname = kafkaTopicConstructorForMultiTenants.getDataSourceDeleteTopic(tenantid);
		service.execute(new SpoutDeleteDataSourceListener(zookeeper, topicname, collector));

		String metadataTopic = kafkaTopicConstructorForMultiTenants.getDataSourceAddTopic(tenantid);
		dataSourceConsumer = new KafkaTopicConsumeTemplate(zookeeper, metadataTopic) {

			@Override
			public void process(byte[] message) throws CPSException {
				String dataSourceMatadata = new String(message);
				logger.info("Receive data source metadata " + dataSourceMatadata);
				SerializableDataSourceMetadata metadata = new SerializableDataSourceMetadata(dataSourceMatadata);
				collector.emit(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Values(metadata));
			}
		};

		loadExistedDataSource();
	}

	private void loadExistedDataSource() {
		try {
			Collection<String> existedDs = DataSourceMetadataFactory.loadExistedDataSource(tenantid);
			if (existedDs != null) {
				for (String datasource : existedDs) {
					SerializableDataSourceMetadata dsMetadata = new SerializableDataSourceMetadata(datasource);
					collector.emit(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Values(dsMetadata));
				}
			}
		} catch (CPSException e) {
			logger.error(Throwables.getStackTraceAsString(e));
		}
	}

	@Override
	public void nextTuple() {
		try {
			dataSourceConsumer.consumeMessage();
		} catch (Exception e) {
			logger.error(Throwables.getStackTraceAsString(e));
		}
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		declarer.declareStream(TopologyStreamIds.DATASOURCE_SPOUT_STREAM, new Fields(TopologyMessageFieldIds.MESSAGE));
		declarer.declareStream(TopologyStreamIds.DATASOURCE_DELETE_SPOUT_STREAM,
				new Fields(TopologyMessageFieldIds.MESSAGE));
	}

}
