package org.nbict.iot.trident.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.storm.kafka.spout.Func;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff;
import org.apache.storm.kafka.spout.KafkaSpoutRetryService;
import org.apache.storm.kafka.spout.trident.KafkaTridentSpoutOpaque;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

import java.io.Serializable;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;

/**
 * Created by songseven on 18/7/6.
 */
public class KafkaSpout4RawTools {


//    private static int count = 0;
    private static final Func<ConsumerRecord<String, byte[]>, List<Object>>
            JUST_VALUE_FUNC = new KafkaSpout4RawTools.JustValueFunc();

    /**
     * Needs to be serializable.
     */
    private static class JustValueFunc implements Func<ConsumerRecord<String, byte[]>, List<Object>>, Serializable {

        @Override
        public List<Object> apply(ConsumerRecord<String, byte[]> record) {
//            System.out.println(count++);
            return new Values(record.topic(), record.value()); //raw
        }
    }

    /**
     * @param bootstrapServers
     * @param topicPattern
     * @param groupId
     * @return
     */
    public static KafkaSpoutConfig<String, String> newKafkaSpoutConfig(String bootstrapServers, String topicPattern, String groupId, KafkaSpoutConfig.FirstPollOffsetStrategy strategy) {

        KafkaSpoutConfig.Builder<String, byte[]> kafkaBuilder = new
                KafkaSpoutConfig.Builder(bootstrapServers, StringDeserializer
                .class, ByteArrayDeserializer.class, Pattern.compile
                (topicPattern));

        kafkaBuilder.setGroupId(groupId);

        kafkaBuilder.setRecordTranslator(JUST_VALUE_FUNC, new Fields("platform_id", "raw"));
        kafkaBuilder.setRetry(newRetryService());
        kafkaBuilder.setOffsetCommitPeriodMs(3000L);
        //默认是 UNCOMMITTED_EARLIEST spout 会从每个partition的最后一次提交的offset开始读取. 如果offset不存在或者过期, 则会依照 EARLIEST进行读取
        kafkaBuilder.setFirstPollOffsetStrategy(strategy); //!important
        //kafkaBuilder.setMaxUncommittedOffsets(1000000); //控制在下一次拉取数据之前最多可以有多少数据在等待 commit
        kafkaBuilder.setPollTimeoutMs(200L); //设置读取数据的超时时间

        //kafkaBuilder.setMaxPartitionFectchBytes()
        KafkaSpoutConfig build = kafkaBuilder.build();
        return build;
    }

    /**
     * 获取trident的kafka spout
     *
     * @param spoutConfig
     * @return
     */
    public static KafkaTridentSpoutOpaque<String, String> newKafkaTridentSpoutOpaque(KafkaSpoutConfig<String, String> spoutConfig) {
        return new KafkaTridentSpoutOpaque<>(spoutConfig);
    }


    protected static KafkaSpoutRetryService newRetryService() {
        return new KafkaSpoutRetryExponentialBackoff(new KafkaSpoutRetryExponentialBackoff.TimeInterval(500L, TimeUnit.MICROSECONDS),
                KafkaSpoutRetryExponentialBackoff.TimeInterval.milliSeconds(2), Integer.MAX_VALUE,
                KafkaSpoutRetryExponentialBackoff.TimeInterval.seconds(10));
    }
}
