package com.seasky.solphire.storm.demo.hdfs;

import com.seasky.solphire.storm.demo.utils.ParamUtils;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.hdfs.bolt.HdfsBolt;
import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
import org.apache.storm.hdfs.bolt.format.FileNameFormat;
import org.apache.storm.hdfs.bolt.format.RecordFormat;
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.TimedRotationPolicy.TimeUnit;
import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy;
import org.apache.storm.hdfs.bolt.sync.SyncPolicy;
import org.apache.storm.kafka.*;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;

import java.util.Arrays;

/**
 * 本类完成以下内容
 * 1、从kafka中读取消息：通过kafkaSpout
 * 2、在storm中对消息进行处理，主要完成（1）WordSplitterBolt对消息进行拆分（2）WordCounterBolt进行统计
 * 3、将结果输出到hdfs：通过hdfsBolt
 *  @author junhong
 *  @date 2018-07-16
 */
public class KafkaStormHdfsTopology {

    //args[0]:nimbus hostname； args[1]:topology名称； args[2]：topic名称。 三都均为optional
    public static void main(String[] args) throws AlreadyAliveException,
            InvalidTopologyException, InterruptedException {

        String zkHostStr = ParamUtils.getString("zk.server");
        int zkPort = ParamUtils.getInt("zk.port");
        String zks = "";
        for (String host : zkHostStr.split(",")) {
            zks += (host + ":" + zkPort + ",");
        }
        zks = zks.substring(0, zks.length() - 1);
        String topic = ParamUtils.getString("kafka.topic.test1");
        // storm信息在zookeeper中的默认存储僧
        String zkRoot = "/storm";
        String id = "word";

        // 1、配置spout从kafka中读取消息
        // 默认情况下，brokers的目录位于/brokers，为方便管理，将kafka相关的信息都放入了/kafka中。
        BrokerHosts brokerHosts = new ZkHosts(zks, "/brokers");
        SpoutConfig spoutConf = new SpoutConfig(brokerHosts, topic, zkRoot, id);
        spoutConf.scheme = new SchemeAsMultiScheme(new StringScheme());
//        spoutConf.forceFromStart = false;
        spoutConf.zkServers = Arrays.asList(zkHostStr.split(","));
        spoutConf.zkPort = zkPort;

        // 2、配置hdfs-bolt，将结果写入hdfs中
        RecordFormat format = new DelimitedRecordFormat()
                .withFieldDelimiter("\t");
        // 每收到1000个tuple同步一次到hdfs
        SyncPolicy syncPolicy = new CountSyncPolicy(1000);
        // 每分钟同步一次到hdfs。只要满足1分钟，或者1000tuple均会触发同步。
        FileRotationPolicy rotationPolicy = new TimedRotationPolicy(1.0f,
                TimeUnit.MINUTES); // rotate files
        FileNameFormat fileNameFormat = new DefaultFileNameFormat()
                .withPath("/tmp/storm/").withPrefix("app_").withExtension(".log"); // set
                                                                                // file
                                                                                // name
                                                                                // format
        HdfsBolt hdfsBolt = new HdfsBolt()
//                .withFsUrl("hdfs://gdc-nn01-test:9000")
                .withFsUrl(ParamUtils.getString("hadoop.default.fs"))
                .withFileNameFormat(fileNameFormat).withRecordFormat(format)
                .withRotationPolicy(rotationPolicy).withSyncPolicy(syncPolicy);

        // 3、创建topology
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("kafka-reader", new KafkaSpout(spoutConf), 5); // Kafka消费
        builder.setBolt("word-splitter", new WordSplitterBolt(), 2)
                .shuffleGrouping("kafka-reader");
        builder.setBolt("word-counter", new WordCounterBolt()).fieldsGrouping(
                "word-splitter", new Fields("word"));
        builder.setBolt("hdfs-bolt", hdfsBolt, 2).shuffleGrouping(
                "word-counter");

        // 4、启动topology
        Config conf = new Config();
        String topologyName = null;
        if (args.length <= 1) {
            topologyName = KafkaStormHdfsTopology.class.getSimpleName();
        } else {
            topologyName = args[1];
        }


        System.out.println("test");

        if (args != null && args.length > 0) {
            conf.put(Config.NIMBUS_HOST, args[0]);
            conf.setNumWorkers(3);
            try {
				StormSubmitter.submitTopologyWithProgressBar(topologyName, conf,
				        builder.createTopology());
			} catch (AuthorizationException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
        } else {
            conf.setMaxTaskParallelism(3);
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology(topologyName, conf, builder.createTopology());
            Thread.sleep(60000);
            cluster.shutdown();

        }

    }
}