package main.java.top_ip;

import lombok.SneakyThrows;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.*;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;

import java.util.Arrays;

/**
 * TopIpTopology
 *
 * @author zhangyimin
 * @version 1.0
 * @date 2018-11-13 下午2:48
 */
public class TopIpTopology {

    @SneakyThrows
    public static void main(String[] args) {
        String zks = "10.16.7.36:2181";
        String topic = "top_ip";
//        storm在zk的根目录
        String zkRoot = "/storm";
        String id = "top_ip";
        BrokerHosts brokerHosts = new ZkHosts(zks);
        SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, id);
        //指定scheme,指定从kafka中接收的数据类型(下面是接收的字符串),如果是对象,需要自己去定义序列化机制
        spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
        spoutConfig.zkServers = Arrays.asList(new String[]{"10.16.7.36"});
        spoutConfig.zkPort = 2181;

        TopologyBuilder builder = new TopologyBuilder();
//指定任务的Spout组件,从kafka中获取数据
        builder.setSpout("kafka-reader", new KafkaSpout(spoutConfig));
//指定第一级的bolt组件,并进行随机分组
        builder.setBolt("split_bolt", new TopIpSplitBolt()).shuffleGrouping("kafka-reader");
//        指定第二级的bolt组件,并按字段进行分组
        builder.setBolt("count_bolt", new TopIpCountBolt()).fieldsGrouping("split_bolt", new Fields("ip"));

        Config config = new Config();
        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("top_ip", config, builder.createTopology());

//        Thread.sleep(60000);
////
//        cluster.shutdown();

    }

}
