package test.sunyu.storm.demo.demo3;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.kafka.*;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import sunyu.kit.common.LogKit;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * @author 孙宇
 */
public class TopologyMain {

    private static final Logger logger = LogKit.getLogger();

    public static void main(String[] args) {
        //String kafkaCluster = "cdh-kafka1:9092,cdh-kafka2:9092";
        //String zkCluster = "cdh0:2181,cdh1:2181,cdh2:2181";
        String kafkaCluster = "127.0.0.1:9092";
        String zkCluster = "127.0.0.1:2181";
        String kafkaTopic = "US_GENERAL";
        String kafkaZkRoot = "/stormkafka";
        String kafkaId = "test.guobiao";

        Config config = new Config();
        Map<String, String> map = new HashMap<String, String>();
        map.put("metadata.broker.list", kafkaCluster);
        map.put("serializer.class", "kafka.serializer.DefaultEncoder");
        config.put("kafka.broker.properties", map);

        List<String> zkhostlist = new ArrayList<String>();
        int zkport = 2181;
        for (String hostPort : zkCluster.split(",")) {
            String[] host_port = hostPort.split(":");
            zkhostlist.add(host_port[0]);
            zkport = Integer.valueOf(host_port[1]);
        }

        TopologyBuilder builder = new TopologyBuilder();

        BrokerHosts brokerHosts = new ZkHosts(zkCluster, "/brokers");
        SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, kafkaTopic, kafkaZkRoot, kafkaId);
        spoutConfig.zkServers = zkhostlist;
        spoutConfig.zkPort = zkport;
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());

        builder.setSpout("kafkaSpout", new KafkaSpout(spoutConfig));
        builder.setBolt("bolt1", new Bolt1()).localOrShuffleGrouping("kafkaSpout");

        if (args != null && args.length > 0) {
            logger.info("正在提交拓扑到集群处理");
            config.setNumWorkers(3);
            try {
                StormSubmitter.submitTopology(args[0], config, builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            } catch (AuthorizationException e) {
                e.printStackTrace();
            }
        } else {
            logger.info("正在提交拓扑到本地处理");
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("topo", config, builder.createTopology());

            Utils.sleep(1000 * 60 * 60);

            cluster.killTopology("topo");
            cluster.shutdown();
        }
    }


}
