package com.imooc.bigdata.integration.kafka;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.topology.TopologyBuilder;

import java.util.UUID;

/**
 * Kafka整合Storm测试
 * Created by zghgchao 2018/2/5 21:58
 */
public class StormKafkaTopo {
    public static void main(String[] args) {
        TopologyBuilder builder = new TopologyBuilder();

        // Kafka使用的zk地址
        ZkHosts hosts = new ZkHosts("hadoop000:2181");

        // Kafka存储数据的topic名称
        String topic = "project_topic";

        // 指定ZK中的一个根目录，存储的是KafkaSpout读取数据的位置信息（offset）
        String zkRoot = "/" + topic;
        String id = UUID.randomUUID().toString();
        SpoutConfig spoutConfig = new SpoutConfig(hosts, topic, zkRoot, id);

        // 设置读取偏移量的操作
        spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();

        KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);

        String SPOUT_ID = KafkaSpout.class.getSimpleName();
        builder.setSpout(SPOUT_ID, kafkaSpout);

        String BOLT_ID = LogProcessBolt.class.getSimpleName();
        builder.setBolt(BOLT_ID,new LogProcessBolt()).shuffleGrouping(SPOUT_ID);

        LocalCluster localCluster = new LocalCluster();
        localCluster.submitTopology(StormKafkaTopo.class.getSimpleName(),
                new Config(),
                builder.createTopology());
    }

    /**
     * Storm整合Kafka步骤
     * 1）创建KafkaSpout
     * 2）Bolt中获取到KafkaSpout发送过来的数据，field名字叫做 bytes
     *
     * Storm整合Kafka报错
     * 1）Exception in thread “main”
     *    java.lang.NoClassDefFoundError: Kafka/api/OffsetRequest
     *
     *     添加kafka依赖
     *       <dependency>
     *           <groupId>org.apache.kafka</groupId>
     *           <artifactId>kafka_2.11</artifactId>
     *           <version>0.9.0.0</version>
     *       </dependency>
     *
     * 2）java.lang.NoClassDefFoundError:org/curator/shaded/com/google/common/cache/CacheBuilder
     *      <dependency>
     *       <groupId>org.apache.curator</groupId>
     *       <artifactId>curator-client</artifactId>
     *       <version>2.12.0</version>
     *       <exclusions>
     *       <exclusion>
     *       <groupId>org.slf4j</groupId>
     *       <artifactId>slf4j-log4j12</artifactId>
     *       </exclusion>
     *       </exclusions>
     *     </dependency>
     *  3)java.lang.NoClassDefFoundError:Could not initialize class org.apache.log4j.Log4jLoggerFactory
     *
     *
     *  logstash和kafka的整合
     *      1）为什么一直从头消费Kafka message
     *      // 设置读取偏移量的操作
     *       spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime();
     *      2）通过配置使得logstash发送的消息简化
     *
     *      vi project.conf
     *       input {
     *       file {
     *       path => "/home/hadoop/data/logs/access.log"
     *       }
     *       }
     *
     *       output {
     *       kafka {
     *       topic_id => "project_topic"
     *       bootstrap_servers => "hadoop000:9092"
     *       batch_size => 1
     *       codec => plain {
     *       format => "%{message}"
     *       }
     *       }*     */
}
