package kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Tuple;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Map;

public class ReadFromKafka {
    private static final Logger LOGGER = LoggerFactory.getLogger(ReadFromKafka.class);
    private static final String BOOTSTRAP_SERVERS = "liu:9092";
    private static final String TOPIC_NAME = "storm-kafka-topic";
    private static final String TOPOLOGY_NAME = "read-from-kafka-topology";

    public static void main(String[] args) throws Exception {
        KafkaSpoutConfig<String, String> spoutConfig = KafkaSpoutConfig.builder(BOOTSTRAP_SERVERS, TOPIC_NAME)
                // 除了分组 ID,以下配置都是可选的。分组 ID 必须指定,否则会抛出 InvalidGroupIdException 异常
                .setProp(ConsumerConfig.GROUP_ID_CONFIG, "kafkaSpoutGroup")
                .build();

        KafkaSpout<String, String> kafkaSpout = new KafkaSpout<>(spoutConfig);
        PrintBolt printBolt = new PrintBolt();

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("kafkaSpout", kafkaSpout);
        builder.setBolt("printBolt", printBolt).shuffleGrouping("kafkaSpout");

        Config config = new Config();

        if (args != null && args.length > 0) {
            // 集群运行模式
            StormSubmitter.submitTopology(args[0], config, builder.createTopology());
        } else {
            // 本地集群模式
            // # 启动一个生产者用于发送测试数据
            // kafka-console-producer.sh --bootstrap-server node-1:9092 --topic storm-topic --property parse.key=true

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, config, builder.createTopology());
        }
    }

    private static class PrintBolt extends BaseRichBolt {
        private OutputCollector collector;

        @Override
        public void prepare(Map<String, Object> map, TopologyContext topologyContext, OutputCollector outputCollector) {
            collector = outputCollector;
        }

        @Override
        public void execute(Tuple tuple) {
            String key = tuple.getStringByField("key");
            String value = tuple.getStringByField("value");

            LOGGER.info("key received from kafka: " + key);
            LOGGER.info("value received from kafka: " + value);

            // 必须 ack,否则会重复消费 kafka 中的消息
            collector.ack(tuple);
        }

        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        }
    }

}