package org.nbict.iot.trident.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.storm.kafka.spout.Func;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff;
import org.apache.storm.kafka.spout.KafkaSpoutRetryService;
import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutOpaque;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

import java.io.Serializable;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;

/**
 * kafka spout配置工具
 * Created by songseven on 18/6/25.
 */
public class KafkaSpout4ParsedTools {

    private static final Func<ConsumerRecord<String, String>, List<Object>>
            JUST_VALUE_FUNC = new JustValueFunc();

    /**
     * Needs to be serializable.
     */
    private static class JustValueFunc implements Func<ConsumerRecord<String, String>, List<Object>>, Serializable {

        @Override
        public List<Object> apply(ConsumerRecord<String, String> record) {
            return new Values(record.topic(), record.value()); //packet
        }
    }

    public static KafkaSpoutConfig<String, String> newKafkaSpoutConfig(String bootstrapServers, String topicPattern, String groupId) {
        return newKafkaSpoutConfig(bootstrapServers, topicPattern, groupId,
                KafkaSpoutConfig.FirstPollOffsetStrategy.UNCOMMITTED_LATEST);
    }

    /**
     * @param bootstrapServers
     * @param topicPattern
     * @param groupId
     * @return
     */
    public static KafkaSpoutConfig<String, String> newKafkaSpoutConfig(String bootstrapServers, String topicPattern, String groupId, KafkaSpoutConfig.FirstPollOffsetStrategy strategy) {


        KafkaSpoutConfig.Builder<String, String> kafkaBuilder = new
                KafkaSpoutConfig.Builder(bootstrapServers, StringDeserializer
                .class, StringDeserializer.class, Pattern.compile
                (topicPattern));

        kafkaBuilder.setGroupId(groupId);

        kafkaBuilder.setRecordTranslator(JUST_VALUE_FUNC, new Fields("platform_id", "packet"));
        kafkaBuilder.setRetry(newRetryService());
        kafkaBuilder.setOffsetCommitPeriodMs(3000L);
        //默认是 UNCOMMITTED_EARLIESTspout 会从每个partition的最后一次提交的offset开始读取. 如果offset不存在或者过期, 则会依照 EARLIEST进行读取
        kafkaBuilder.setFirstPollOffsetStrategy(strategy); //!important
        //kafkaBuilder.setMaxUncommittedOffsets(10000000);
        kafkaBuilder.setPollTimeoutMs(200L);

        //kafkaBuilder.setMaxPartitionFectchBytes()
        KafkaSpoutConfig build = kafkaBuilder.build();
        return build;
    }

    /**
     * 获取trident的kafka spout
     *
     * @param spoutConfig
     * @return
     */
    public static KafkaTridentSpoutOpaque<String, String> newKafkaTridentSpoutOpaque(KafkaSpoutConfig<String, String> spoutConfig) {
        return new KafkaTridentSpoutOpaque<>(spoutConfig);
    }


    protected static KafkaSpoutRetryService newRetryService() {
        return new KafkaSpoutRetryExponentialBackoff(new KafkaSpoutRetryExponentialBackoff.TimeInterval(500L, TimeUnit.MICROSECONDS),
                KafkaSpoutRetryExponentialBackoff.TimeInterval.milliSeconds(2), Integer.MAX_VALUE,
                KafkaSpoutRetryExponentialBackoff.TimeInterval.seconds(10));
    }
}
