package com.zbj.storm.kafka;

import com.google.common.collect.Maps;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.kafka.spout.ByTopicRecordTranslator;
import org.apache.storm.kafka.spout.Func;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
 * KafKaTopic
 * 参考链接
 *  http://blog.itpub.net/31506529/viewspace-2215095/
 *  https://www.cnblogs.com/ye-hcj/p/10264092.html
 * @author weigang
 * @create 2019-09-18
 **/
public class KafKaTopic {

    public static final String TOPIC = "kafkaStorm1";

    public static void main(String[] args) throws Exception {

        String spoutId = "kafkaSpout";

        //BrokerHosts brokerHosts = new ZkHosts("172.31.15.175:2181");

        // 模拟kafka生产者生产数据
        KafkaSpoutConfig.Builder builder = new KafkaSpoutConfig.Builder(
                "172.31.15.175:9092", Arrays.asList(TOPIC));
        //"172.31.15.175:9092,172.31.15.175:9093,172.31.15.175:9094", Arrays.asList(TOPIC));

        builder.setFirstPollOffsetStrategy(KafkaSpoutConfig.FirstPollOffsetStrategy.LATEST);

        //该类将传入的kafka记录转换为storm的tuple
        //// jdk 1.8
        //ByTopicRecordTranslator<String, String> recordTranslator = new ByTopicRecordTranslator<>((r) -> new Values(r.value(), r.topic()), new Fields("values", TOPIC));
        //// jdk 1.7即以下
        ByTopicRecordTranslator<String, String> recordTranslator = new ByTopicRecordTranslator<>(new Func<ConsumerRecord<String, String>, List<Object>>() {
            @Override
            public List<Object> apply(ConsumerRecord<String, String> record) {
                return new Values(record.value(), record.topic());
            }
        }, new Fields("values", TOPIC));

        // 与上面构造函数 ByTopicRecordTranslator() 二选一即可
        //设置要消费的topic即 KafKaTopic.TOPIC
        //// jdk 1.8
        //recordTranslator.forTopic(TOPIC, (r) -> new Values(r.value(), r.topic()), new Fields("values", TOPIC));
        //// jdk 1.7即以下
        //recordTranslator.forTopic(TOPIC, new Func<ConsumerRecord<String, String>, List<Object>>() {
        //    @Override
        //    public List<Object> apply(ConsumerRecord<String, String> record) {
        //        return new Values(record.value(), record.topic());
        //    }
        //}, new Fields("values", TOPIC));

        builder.setRecordTranslator(recordTranslator);


        Map<String, Object> consumerMap = Maps.newHashMap();
        consumerMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerMap.put(ConsumerConfig.GROUP_ID_CONFIG, "zbj-kafka");
        builder.setProp(consumerMap);

        KafkaSpoutConfig spoutConfig = new KafkaSpoutConfig(builder);
        TopologyBuilder topologyBuilder = new TopologyBuilder();

        //////////////////////////////方法已过期////////////////////////////////////
        // 可以跑通 但是没有数据输入
        //SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, "/", spoutId);
        //spoutConfig.scheme = new SchemeAsMultiScheme(new MessageScheme());
        //设置一个spout用来从kaflka消息队列中读取数据并发送给下一级的bolt组件，此处用的spout组件并非自定义的，而是storm中已经开发好的KafkaSpout
        //我们只需要传入strom需要的东西即可,kafka为我们继承了类 kafakaSpout()
        //builder.setSpout(spoutId, new KafkaSpout(spoutConfig));


        KafkaSpout kafkaSpout = new KafkaSpout<>(spoutConfig);
        topologyBuilder.setSpout(spoutId, kafkaSpout);


        topologyBuilder.setBolt("word-split", new WordSpliter()).shuffleGrouping(spoutId);

        // 产生4个文件，以uuid命名 指定了4个bolt同时处理数据 但是每一行只会同一个bolt处理
        topologyBuilder.setBolt("writer", new WriterBolt(), 4).fieldsGrouping("word-split", new Fields("word"));

        Config config = new Config();
        config.setNumWorkers(4);
        config.setNumAckers(0);
        config.setDebug(false);

        LocalCluster cluster = new LocalCluster();
        cluster.submitTopology("wordCount", config, topologyBuilder.createTopology());

        TimeUnit.MINUTES.sleep(10);
        cluster.killTopology("wordCount");

        // 提交 topology 到Storm集群中运行
        //StormSubmitter.submitTopology("wordCount", config, builder.createTopology());

    }
}