package com.icreate;

import com.icreate.storm.bolt.IndicatorBoltBuilder;
import com.icreate.storm.bolt.XdrTypeBolt;
import com.icreate.storm.methods.indicators.fieldCreater.*;
import com.icreate.storm.spout.KafkaCITY_RC_MMSpout;
import com.icreate.storm.spout.KafkaCITY_SHARE_LTE_UP_MMSpout;
import com.icreate.storm.spout.KafkaCITY_SHARE_MMSpout;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.topology.IRichSpout;
import org.apache.storm.topology.TopologyBuilder;

import java.util.*;

/**
 * Created by hadoop on 17-4-5.
 */
public class XDRKafkaStorm {

    private static final Map<String, FieldCreater> xdrsRCMap = new HashMap<String, FieldCreater>() {{
//        put("Cell_MR", "rows");
        put("UE_MR", new UE_MRfieldCreater());
        put("Uu", new UufieldCreater());
        put("X2_XDR", new X2fieldCreater());
    }};
    private static final Map<String, FieldCreater> xdrsSHAREMap = new HashMap<String, FieldCreater>() {{
        put("MLTE_S1_MME", new S1_MMEfieldCreater());

    }};
    private static final Map<String, FieldCreater> xdrsSHARELTEMap = new HashMap<String, FieldCreater>() {{
        put("MLTE_S1U_HTTP", new S1_UfieldCreater());

    }};
    private static final String TOPOLOGY_NAME = "icreate-xdr-topology";

    public static void main(String[] args)
            throws InvalidTopologyException, AuthorizationException, AlreadyAliveException, InterruptedException {

        TopologyBuilder builder = new TopologyBuilder(); //TopologyBuilder 构建拓扑
        TimeZone.setDefault(TimeZone.getTimeZone("GMT+8"));

        //茂名kafka
        builder.setSpout("kafka-CITY_RC_MM-spout", newMMKafkaSpout("CITY_RC_MM", "kafka-CITY_RC_MM"), 2);
        builder.setSpout("kafka-CITY_SHARE_MM-spout", newMMKafkaSpout("CITY_SHARE_MM", "kafka-CITY_SHARE_MM"), 2);
        builder.setSpout("kafka-CITY_SHARE_LTE_UP_MM-spout", newMMKafkaSpout("CITY_SHARE_LTE_UP_MM", "kafka-CITY_SHARE_LTE_UP_MM"), 2);

        builder.setBolt("CITY_RC_MM-type-bolt", new XdrTypeBolt(xdrsRCMap), 3).localOrShuffleGrouping("kafka-CITY_RC_MM-spout");
        builder.setBolt("CITY_SHARE_MM-type-bolt", new XdrTypeBolt(xdrsSHAREMap), 5).localOrShuffleGrouping("kafka-CITY_SHARE_MM-spout");
        builder.setBolt("CITY_SHARE_LTE_UP_MM-type-bolt", new XdrTypeBolt(xdrsSHARELTEMap), 3).localOrShuffleGrouping("kafka-CITY_SHARE_LTE_UP_MM-spout");
        IndicatorBoltBuilder indicatorBoltBuilder = new IndicatorBoltBuilder(builder, "mongodb://hadoop-slave1.icreate.com:27017/test");
        indicatorBoltBuilder.setUE_MR();
        indicatorBoltBuilder.setUu();
        indicatorBoltBuilder.setX2();
        indicatorBoltBuilder.setS1_MME();
        indicatorBoltBuilder.setS1_U();
        Config conf = new Config();
        if (args != null && args.length > 0) {
            // Nimbus host name passed from command line
//            conf.put(Config.NIMBUS_HOST, args[0]);
//            conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10000);//设置一个spout task上面最多有多少个没有处理的tuple（没有ack/failed）回复，以防止tuple队列爆掉
            conf.setMaxSpoutPending(10000);
            conf.setNumAckers(0);
            conf.setNumWorkers(4);
            StormSubmitter.submitTopologyWithProgressBar(TOPOLOGY_NAME, conf, builder.createTopology());
        } else {
            conf.setMaxTaskParallelism(4);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createTopology());
            Thread.sleep(60000);
            cluster.killTopology(TOPOLOGY_NAME);
            cluster.shutdown();
        }

    }

    public static void test(String[] args)
            throws InvalidTopologyException, AuthorizationException, AlreadyAliveException, InterruptedException {

        TopologyBuilder builder = new TopologyBuilder(); //TopologyBuilder 构建拓扑
        TimeZone.setDefault(TimeZone.getTimeZone("GMT+8"));

        //测试kafka
        builder.setSpout("kafka-CITY_RC_MM-spout", new KafkaCITY_RC_MMSpout());
        builder.setSpout("kafka-CITY_SHARE_MM-spout", new KafkaCITY_SHARE_MMSpout());
        builder.setSpout("kafka-CITY_SHARE_LTE_UP_MM-spout", new KafkaCITY_SHARE_LTE_UP_MMSpout());


        builder.setBolt("CITY_RC_MM-type-bolt", new XdrTypeBolt(xdrsRCMap)).shuffleGrouping("kafka-CITY_RC_MM-spout");
        builder.setBolt("CITY_SHARE_MM-type-bolt", new XdrTypeBolt(xdrsSHAREMap)).shuffleGrouping("kafka-CITY_SHARE_MM-spout");
        builder.setBolt("CITY_SHARE_LTE_UP_MM-type-bolt", new XdrTypeBolt(xdrsSHARELTEMap)).shuffleGrouping("kafka-CITY_SHARE_LTE_UP_MM-spout");
        IndicatorBoltBuilder indicatorBoltBuilder = new IndicatorBoltBuilder(builder, "mongodb://192.168.7.73:27017/test");
        indicatorBoltBuilder.setUE_MR();
        indicatorBoltBuilder.setUu();
        indicatorBoltBuilder.setX2();
        indicatorBoltBuilder.setS1_MME();
        indicatorBoltBuilder.setS1_U();
        Config conf = new Config();
        if (args != null && args.length > 0) {
            // Nimbus host name passed from command line
            conf.put(Config.NIMBUS_HOST, args[0]);
            conf.setNumWorkers(4);
            StormSubmitter.submitTopologyWithProgressBar(TOPOLOGY_NAME, conf, builder.createTopology());
        } else {
            conf.setMaxTaskParallelism(4);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createTopology());
            Thread.sleep(60000);
            cluster.killTopology(TOPOLOGY_NAME);
            cluster.shutdown();
        }

    }

//    private static IRichSpout newKafkaSpout() {
//        // 注册 ZooKeeper 主机
//        BrokerHosts brokerHosts = new ZkHosts("localhost:2181");
//        // 所接收 Kafka 的 topic 名称
//        String topic = "CITY_RC_MM";
//        // ZooKeeper 的注册 node 名称（注意：需要加“/”，否则 ZooKeeper 会无法识别）
//        String zkRoot = "/kafkastorm";
//        // 配置 Spout
//        String spoutId = "myKafka";
//        SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
//        // 配置 Scheme（可选）
////        spoutConfig.scheme = new SchemeAsMultiScheme(new SimpleMessageScheme());
//        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
//        return kafkaSpout;
//    }

    //CELL_MR,Uu,UE_MR,X2
    private static IRichSpout newMMKafkaSpout(String topic, String spoutId) {
        // 注册 ZooKeeper 主机
        BrokerHosts brokerHosts = new ZkHosts("188.5.26.35:2181,188.5.26.36:2181,188.5.26.37:2181");
        // 所接收 Kafka 的 topic 名称
//        String topic = "CITY_RC_MM";
        // ZooKeeper 的注册 node 名称（注意：需要加“/”，否则 ZooKeeper 会无法识别）
        String zkRoot = "/ICREATE_HADOOP_GZ_GROUP";
        // 配置 Spout
//        String spoutId = "kafka-CITY_RC_MM";
        SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId);
        // 配置 Scheme（可选）
//        spoutConfig.scheme = new SchemeAsMultiScheme(new SimpleMessageScheme());
        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
        return kafkaSpout;
    }

}
