package com.gree.grih.datastream;

import clojure.lang.IFn;
import org.apache.log4j.Logger;
import org.apache.storm.Config;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.topology.TopologyBuilder;

import java.util.Properties;

/**
 * DataStreamTopology
 * Created by wander on 21th.Apr.2017
 */
public class DataStreamTopology {

    private static Logger logger = Logger.getLogger(DataStreamTopology.class);
    public Properties configs;
    public BoltBuilder boltBuilder;
    public SpoutBuilder spoutBuilder;


    public DataStreamTopology(String configFile) throws Exception {
        configs = new Properties();
        try {
            configs.load(DataStreamTopology.class.getResourceAsStream("/config.properties"));
            boltBuilder = new BoltBuilder(configs);
            spoutBuilder = new SpoutBuilder(configs);
        } catch (Exception ex) {
            logger.error("Exception in cofigs.load(). ");
            ex.printStackTrace();
            System.exit(0);
        }
    }

    private void submitTopology() throws Exception {
        TopologyBuilder builder = new TopologyBuilder();
        KafkaSpout kafkaSpout = spoutBuilder.buildKafkaSpout();
        HbaseBolt hbaseBolt = boltBuilder.buildHbaseBolt();



        //set the kafkaSpout to topology
        //parallelism-hint for kafkaSpout - defines number of executors/threads to be spawn per container
        int kafkaSpoutCount = Integer.parseInt(configs.getProperty(KeysConfig.KAFKA_SPOUT_COUNT));
        builder.setSpout(configs.getProperty(KeysConfig.KAFKA_SPOUT_ID), kafkaSpout, kafkaSpoutCount);


        //set the sinktype bolt
        int hbaseBoltCount = Integer.parseInt(configs.getProperty(KeysConfig.HBASE_BOLT_COUNT));
        builder.setBolt(configs.getProperty(KeysConfig.HBASE_BOLT_ID),hbaseBolt,hbaseBoltCount).shuffleGrouping(configs.getProperty(KeysConfig.KAFKA_SPOUT_ID));


        Config conf = new Config();
        String topologyName = configs.getProperty(KeysConfig.TOPOLOGY_NAME);

        //Defines how many worker processes have to be created for the topology in the cluster.
        conf.setNumWorkers(1);
        StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
    }

    public static void main(String[] args) throws Exception {
        String configFile;
        if (args.length == 0) {
            System.out.println("Missing input : config file location, using default");
            configFile = "config.properties";

        } else{
            configFile = args[0];
        }

        DataStreamTopology topology = new DataStreamTopology(configFile);
        topology.submitTopology();
    }
}
