/*
 * This file is a part of qloudgen-esb. 
 * You can redistribute qloudgen-esb and/or modify it under the terms of the Lesser GNU General Public License version 3. 
 * qloudgen-esb is distributed WITHOUT ANY WARRANTY. 
 * 
 * See the Lesser GNU General Public License for more details at http://www.gnu.org/licenses/.
 *  
 * Copyright (C) 2014 Qloudgen Technology
*/

package com.qloudgen.csb;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;

import org.apache.log4j.Logger;
import org.zeromq.ZContext;

public class ZmqConsumerP extends ZmqConsumerBase implements Runnable {
	private int count = 0;
	private ConsumerIterator<byte[], byte[]> it;
	private ConsumerConnector consumer;
	
    private static Logger LOG = Logger.getLogger(ZmqConsumerP.class.getName());
    
    private List<ZmqDispatcher> disps = new ArrayList<ZmqDispatcher>();
    private List<Long> offsets = new ArrayList<Long>();
    
    public synchronized void ack(int pos, int status) {    	
    	if(status == UNREACHABLE || status == TIMEOUT || status == INTERRUPTED)
    			errHandler.OnError(status, offsets.get(pos));
    		
    	count++;
    	if (count >= bufferSize) {
    		consumer.commitOffsets();
    		count = 0;
    		disp();
    	}
    }
    
    public ZmqConsumerP(String topic, ConsumerConfig config, 
    	int bufferSize, ZContext ctx, String conn, 
    		int timeout, int retry, ErrorHandler errHandler) {
    	super(topic, config, bufferSize, ctx, conn, timeout, retry, errHandler);
        id = CSBUtils.sockId();
    }

    private void disp() {
    	offsets.clear();
        for (int i = 0; i < bufferSize; i++) {
        	if(it.hasNext()) {
        		MessageAndMetadata<byte[], byte[]> mam = it.next();
        		byte[] message = mam.message();
        		long offset = mam.offset();
        		offsets.add(i, offset);
        		
        		String msg = new String(message);
        		byte[] rid = null;
            	if (msg.startsWith("%")) {
            		int index = msg.indexOf("%", 1);
            		rid = msg.substring(1, index - 1).getBytes();            		
            		msg = msg.substring(index);
            		message = msg.getBytes();
            	}
        		disps.get(i).dispatch(message, rid);
        	}
        }
    }
    
    @Override
    public void run() {
    	consumer = kafka.consumer.Consumer.
            	createJavaConsumerConnector(config);
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
        		consumer.createMessageStreams(topicCountMap);
        KafkaStream<byte[], byte[]> stream =  
        		consumerMap.get(topic).get(0);
        it = stream.iterator();
        LOG.info(id + ": consumer created, topic: " + topic);
        
        for (int i = 0; i < bufferSize; i++) {
        	disps.add(new ZmqDispatcher(ctx, conn, timeout, retry, this, i));
        }
                
        count = 0;
        disp();
        
        while (!Thread.currentThread().isInterrupted()) {
        	try {
				Thread.sleep(1000*60);
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
        }
        
        for (int i = 0; i < bufferSize; i++) {
        		disps.get(i).disconnect();
        }
        consumer.shutdown();
    }    
}

