/*
 * Licensed to the Apache Software Foundation (ASF) under one
 *   or more contributor license agreements.  See the NOTICE file
 *   distributed with this work for additional information
 *   regarding copyright ownership.  The ASF licenses this file
 *   to you under the Apache License, Version 2.0 (the
 *   "License"); you may not use this file except in compliance
 *   with the License.  You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 *   Unless required by applicable law or agreed to in writing, software
 *   distributed under the License is distributed on an "AS IS" BASIS,
 *   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *   See the License for the specific language governing permissions and
 *   limitations under the License.
 */

package org.apache.storm.kafka;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;

import java.io.Serializable;

/**
 * This example sets up 3 topologies to put data in Kafka via the KafkaBolt,
 * and shows how to set up a topology that reads from some Kafka topics using the KafkaSpout.
 *
 * 启动 zk ：cd $KAFKA_HOME && nohup bin/zookeeper-server-start.sh config/zookeeper.properties &
 * 启动 kafka： cd $KAFKA_HOME && nohup bin/kafka-server-start.sh config/server.properties &
 * 本地运行： java -jar target/storm-cpp-integration-1.1.1.jar  org.apache.storm.kafk.Main
 */
public class Main {

    private static final String KAFKA_LOCAL_BROKER = "127.0.0.1:9092";
    public static final String TOPIC_0 = "kafka-2-shell-topic";

    public static void main(String[] args) throws Exception {
        final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER;
        System.out.println("Running with broker url: " + brokerUrl);

        Config tpConf = getConfig();
        LocalCluster cluster = new LocalCluster();

        // 往kafka 发送测试数据
         StormSubmitter.submitTopology(TOPIC_0 + "-producer", tpConf, ProducerTopology.newProducerTopology(brokerUrl, TOPIC_0));
        //cluster.submitTopology(TOPIC_0 + "-producer", tpConf, ProducerTopology.newProducerTopology(brokerUrl, TOPIC_0));

        // 从kafka 订阅测试数据进行处理
         StormSubmitter.submitTopology(TOPIC_0 + "-consumer", tpConf, ConsumerTopology.newConsumerTopology(brokerUrl,TOPIC_0));
        //cluster.submitTopology(TOPIC_0 + "-consumer", tpConf, ConsumerTopology.newConsumerTopology(brokerUrl,TOPIC_0));

    }


    protected static Config getConfig() {
        Config config = new Config();
        config.setDebug(true);
        return config;
    }




}
