/*
 * This file is a part of qloudgen-esb. 
 * You can redistribute qloudgen-esb and/or modify it under the terms of the Lesser GNU General Public License version 3. 
 * qloudgen-esb is distributed WITHOUT ANY WARRANTY. 
 * 
 * See the Lesser GNU General Public License for more details at http://www.gnu.org/licenses/.
 *  
 * Copyright (C) 2014 Qloudgen Technology
*/

package com.qloudgen.csb;

import java.io.FileInputStream;
import java.io.IOException;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

import kafka.producer.ProducerConfig;

import org.I0Itec.zkclient.ZkClient;
import org.I0Itec.zkclient.serialize.BytesPushThroughSerializer;
import org.apache.log4j.Logger;
import org.json.JSONObject;
import org.zeromq.ZContext;

public class Sender implements Runnable {
	private String zkConnect;
	private int numThreads;
	private int ackMode;
	private ZContext ctx;
    private String conn;
    private final String id = CSBUtils.sockId();
    
	private ExecutorService executor;
	private ProducerConfig config;
	
    private static Logger LOG = Logger.getLogger(Sender.class.getName());

    public Sender(String zkConnect, int numThreads, int ackMode,
    		ZContext ctx, String conn) { 
        this.zkConnect = zkConnect;
        this.numThreads = numThreads;
        this.ackMode = ackMode;
        this.ctx = ctx;
        this.conn = conn;
    }
    
    public void run() {    	
    	config = createProducerConfig();       
    	
    	Future<?>[] tasks = new Future[numThreads];
        executor = Executors.newFixedThreadPool(numThreads);
        for (int i = 0; i < numThreads; i++) {
        	tasks[i] = executor.submit(new ZmqProducer
        			(config, ctx, conn));            
        }     
        
        while(true) {
            try {
                checkTasks(tasks);
                Thread.sleep(CSB.SENDER_CHECK_INTERVAL);
            } catch (Exception e) {
            	LOG.error(id + ": task check error, exit loop...");
                break;
            }
        }      
    }
    
    private void checkTasks(Future<?>[] tasks) throws Exception {
    	for (int i = 0; i < numThreads; i++) {
    		if (tasks[i] == null
                || tasks[i].isDone()
                || tasks[i].isCancelled()) {
    			tasks[i] = executor.submit(new ZmqProducer
    					(config, ctx, conn));
    			LOG.warn(id + ": producer stopped, restarting...");
    		}
    	}
    }
    
    private ProducerConfig createProducerConfig() {
        Properties props = new Properties();
        FileInputStream fis = null;
		try {
			fis = new FileInputStream("conf/csbproducer.properties");
			props.load(fis);
		} catch (IOException e) {			
		} finally {
			if (fis != null)
				try {
					fis.close();
				} catch (IOException e) {}
		}
		
    	int sessionTimeout = Integer.parseInt(props.getProperty("zookeeper.session.timeout.ms", "6000"));
    	props.remove("zookeeper.session.timeout.ms");
    	int connTimeout = Integer.parseInt(props.getProperty("zookeeper.connection.timeout.ms", "6000"));
    	props.remove("zookeeper.connection.timeout.ms");
    	
    	String blist = "";
    	ZkClient zkClient = new ZkClient(zkConnect, sessionTimeout, 
    			connTimeout, new BytesPushThroughSerializer());
    	System.out.println("ok");
    	List<String> brokers = zkClient.getChildren("/brokers/ids");
    	if (brokers != null && !brokers.isEmpty()) {
    		int n = brokers.size();
    		if (n > 3) n = 3;
    		for (int i = 0; i < n; i++) {
    			JSONObject jsonObj = new JSONObject
    				(new String((byte[])zkClient.readData
    					("/brokers/ids/" + brokers.get(i))));
    			String host = jsonObj.getString("host");
    			String port = String.valueOf(jsonObj.getInt("port"));
    			if (!host.isEmpty() && !port.isEmpty()) {
    				String conn = host + ":" + port;
    				if (blist.isEmpty()) blist = conn;
    				else blist = blist + "," + conn;
    			}
    		}
    	}
    	
        props.put("metadata.broker.list", blist);
        props.put("request.required.acks", ackMode);
        props.put("producer.type", "sync");
        props.put("key.serializer.class", "kafka.serializer.StringEncoder");
        props.put("serializer.class", "kafka.serializer.DefaultEncoder");
        props.put("partitioner.class", "kafka.producer.DefaultPartitioner");
        props.put("client.id", Sender.class.getName());
 
        return new ProducerConfig(props);
    }
}

