package com.roy.kafka;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.kafka.spout.ByTopicRecordTranslator;
import org.apache.storm.kafka.spout.DefaultRecordTranslator;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

import java.util.Properties;

/**
 * 提交方式： storm jar *.jar com.roy.kafka.KafkaTopology [cluster]
 */
public class KafkaTopology {

    public static final String ZK_NODES = "master:2181,slave1:2181,slave2:2181";
    public static final String BOOTSTRAP_SERVERS = "master:9092,slave1:9092,slave2:9092";
    public static final String TOPIC="track";
    public static void main(String[] args) {

        ByTopicRecordTranslator<String,String> brt =
                new ByTopicRecordTranslator<>( (r) -> new Values(r.value(),r.topic()),new Fields("values",TOPIC));
        brt.forTopic(TOPIC,(r) -> new Values(r.value(),r.topic()), new Fields("values",TOPIC));

        Properties properties = new Properties();
//        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");
        properties.put("group.id", "track");
//        properties.put("enable.auto.commit", "false");
//        properties.put("auto.commit.interval.ms", "60000");
//        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaSpoutConfig.Builder<String, String> kafkaBuilder = new KafkaSpoutConfig.Builder<>(BOOTSTRAP_SERVERS,TOPIC);
        kafkaBuilder.setProp(properties)
                .setRecordTranslator(new DefaultRecordTranslator<>())//使用这个默认转换器的字段为record.topic(), record.partition(), record.offset(), record.key(), record.value()
                .setFirstPollOffsetStrategy(KafkaSpoutConfig.FirstPollOffsetStrategy.LATEST)
                .setOffsetCommitPeriodMs(1000);

        KafkaSpoutConfig spoutConfig = kafkaBuilder.build();

        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("kafka_spout", new KafkaSpout(spoutConfig), 1);
        builder.setBolt("bolt1",new MyKafkaBolt(),1).shuffleGrouping("kafka_spout");


        Config config = new Config();
        config.setDebug(true);
//        config.setMaxSpoutPending(4);
        config.setNumWorkers(4);
        config.setNumAckers(0);
//        config.put("kafka.topic.wildcard.match",true);

        if(args.length>0 && "cluster".equals(args[0])){
            //集群提交
            try {
                StormSubmitter.submitTopology("StormKafkaClient", config, builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            } catch (AuthorizationException e) {
                e.printStackTrace();
            }
        }else{
            //本地提交
            LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology("kafkatest",config,builder.createTopology());
        }
    }
}
