/*
 * TOP SECRET Copyright 2006-2015 Transsion.com All right reserved. This software is the confidential and proprietary
 * information of Transsion.com ("Confidential Information"). You shall not disclose such Confidential Information and
 * shall use it only in accordance with the terms of the license agreement you entered into with Transsion.com.
 */
package com.palmplaystore.bigdata.storm.appupdate;

import java.io.IOException;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import com.palmplaystore.bigdata.redis.RedisCluHelper;

@Component
public class AppUpdateTopo {
    private Logger              logger               = LoggerFactory.getLogger(AppUpdateTopo.class);
    private static final String KAFKA_SPOUT_ID       = "kafka-spout";
    private static final String APP_SPLIT_BOLT       = "app-split-bolt";
    private static final String APP_SAVE_BOLT        = "app-save-bolt";
    private static final String TOPOLOGY_NAME        = "app-update-topology";
    private static final String STORM_MODE           = "cluster";

    @Value("#{settings['spring.kafka.consumer.group-id']}")
    private String              groupId;

    @Value("#{settings['storm.kafka.consumer.appupdate_topic']}")
    private String              appUpdateTopic;

    @Value("#{settings['spring.kafka.bootstrap-servers']}")
    private String              bootstrapServers;

    @Value("#{settings['storm.kafka.consumer.topology']}")
    private String              topology;

    @Value("#{settings['storm.kafka.cluster.mode']}")
    private String              clusterMode;

    @Value("#{settings['storm.kafka.spout.parallelism_hint']}")
    private int                 parallelismHint;
    
    @Value("#{settings['spring.kafka.consumer.auto-offset-reset']}")
    private String autoOffsetReset;
    @Value("#{settings['storm.num.workers']}")
    private String numWorkers;
    @Value("#{settings['storm.kafka.topology.max.spout.pending']}")
    private String maxSpoutPending;
    @Value("#{settings['storm.kafka.work.heap.memory.mb']}")
    private String workHeapMemory;
    @Value("#{settings['storm.kafka.topology.work.heap.memory.mb']}")
    private String topologyWorkHeapMemory;
    @Value("#{settings['app.split.bolt.task.num']}")
    private String appSplitBoltTaskNum;
    @Value("#{settings['app.save.bolt.task.num']}")
    private String appSaveBoltTaskNum;
    @Value("#{settings['topology.message.timeout.secs']}")
    private String topologyMessageTimeoutSecs;
    @Value("#{settings['storm.nimbus.retry.times']}")
    private String stormNimbusRetryTimes;
    @Value("#{settings['kafka.max.poll.records']}")
    private String kafkaMaxPollRecords;
    @Autowired
    RedisCluHelper  redisCluHelper;

    public void startConsumeKafka() throws AuthorizationException, AlreadyAliveException, InvalidTopologyException, IOException {
        Config conf = new Config();
        conf.setNumWorkers(Integer.parseInt(numWorkers));
        
        TopologyBuilder builder = new TopologyBuilder();
        KafkaSpoutConfig<String, String> kafkaSpoutConfig = KafkaSpoutConfig.builder(bootstrapServers,
                                                                                     appUpdateTopic).build();
        kafkaSpoutConfig.getKafkaProps().put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        kafkaSpoutConfig.getKafkaProps().put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        kafkaSpoutConfig.getKafkaProps().put( ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.parseInt(kafkaMaxPollRecords));
        

        //parallelismHint 执行此spout应分配的任务数。 每个任务都将在群集周围某个进程中的某个线程上运行。
        builder.setSpout(KAFKA_SPOUT_ID, new KafkaSpout<>(kafkaSpoutConfig), parallelismHint);
        // 注册一个split bolt ，这个bolt订阅kafkaSpout发射出来的数据流,shuffleGrouping方法告诉
        // storm要将类sentenceSpout发射的tuple随机均匀地分发给SplitBolt实例
        builder.setBolt(APP_SPLIT_BOLT, new AppSplitBolt(),Integer.parseInt(appSplitBoltTaskNum)).shuffleGrouping(KAFKA_SPOUT_ID);
        // fieldsGrouping()方法来保证所有 word字段值相同的tuple会被路由到同一个CountBolt实例中
        builder.setBolt(APP_SAVE_BOLT, new AppSaveBolt(),Integer.parseInt(appSaveBoltTaskNum)).shuffleGrouping(APP_SPLIT_BOLT);

        /** 通常情况下 spout 的发射速度会快于下游的 bolt 的消费速度，当下游的 bolt 还有 TOPOLOGY_MAX_SPOUT_PENDING 个 tuple 没有消费完时，spout 会停下来等待，该配置作用于 spout 的每个 task。  */
        conf.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, Integer.parseInt(maxSpoutPending));
        /** 调整分配给每个 worker 的内存 */
        conf.put(Config.WORKER_HEAP_MEMORY_MB,             Integer.parseInt(workHeapMemory));
        conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB,  Integer.parseInt(topologyWorkHeapMemory));
        //超时时间
        conf.put(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS, Integer.parseInt(topologyMessageTimeoutSecs));
        //重试次数
        conf.put(Config.STORM_NIMBUS_RETRY_TIMES, Integer.parseInt(stormNimbusRetryTimes));
        if (STORM_MODE.equals(clusterMode)) {
            // 提交到集群运行
            StormSubmitter.submitTopology(topology, conf, builder.createTopology());
        } else {
            // conf.setDebug(true);
            // 本地模式运行
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createTopology());
            Utils.sleep(1000000);
            cluster.killTopology(TOPOLOGY_NAME);
            cluster.shutdown();
        }

    }
}
