/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka;

import com.alibaba.fastjson.JSONObject;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.ConsumerStrategy;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.ipig.commons.conf.kafka.consumer.KafkaConsumerConf;
import org.ipig.commons.conf.kafka.consumer.KafkaOffsetsInfo;
import org.ipig.computing.commons.helper.KafkaHelper;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.spark.streaming.SparkStreamPartitionService;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

/**
 * SparkStreamKafkaExecutorService
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
public interface SparkStreamKafkaExecutorService extends SparkStreamPartitionService<ConsumerRecord<String, String>> {

    @Override
    default void initEachPartition(final Iterator<ConsumerRecord<String, String>> partition) {

    }

    @Override
    public default void doEachPartition(Iterator<ConsumerRecord<String, String>> partition) {
        while (partition.hasNext()) {
            ConsumerRecord<String, String> recordLine = partition.next();
//            System.out.println(recordLine);
            KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo = KafkaHelper.toKafkaRecordInfo(recordLine);
            if (recInfo == null)continue;
            if (checkIsContinueProcess(recInfo)) {
                doEachRecord(recInfo);
            }
        }
    }

    /**
     * 检查是否继续处理
     *
     * @return boolean true:是，false：否
     */
    public default boolean checkIsContinueProcess(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo) {
        return true;
    }

    @Override
    public void postEachPartition(Iterator<ConsumerRecord<String, String>> partition);

    /**
     * 业务处理每条记录
     *
     * @param recInfo KafkaRecordInfo<String, RecordInfo>
     */
    public void doEachRecord(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo);


    @Override
    public default JavaStreamingContext getJavaStreamingContext(JavaSparkContext sc) {
        JavaStreamingContext ssc = new JavaStreamingContext(sc, Durations.seconds(this.getConf().getDurationsSeconds()));
        return ssc;
    }

    @Override
    public default JavaInputDStream<ConsumerRecord<String, String>> fromInputDStream(JavaStreamingContext jsc) {
        KafkaConsumerConf kafkaConsumerConf = getConf().getSparkKafkaConf().getConsumerConf();
        String autoCommitEnable = kafkaConsumerConf.getAutoCommitEnable();
        Collection<String> topicsSet = new HashSet<>(Arrays.asList(kafkaConsumerConf.getTopicName()));
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConsumerConf.getBootstrapServers());
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConsumerConf.getGroupId());
        kafkaParams.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaConsumerConf.getAutoOffsetReset());
        kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaConsumerConf.getKeyDeserializer());
        kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaConsumerConf.getKeyDeserializer());
        kafkaParams.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommitEnable);
        //max.poll.records       【500  条】
        kafkaParams.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "500");
//        kafkaParams.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, kafkaConsumerConf.getMaxPollRecords());
        // max.poll.interval.ms【5  分钟】300000ms
        kafkaParams.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "300000");
        //request.timeout.ms【30 秒钟】30000ms
        kafkaParams.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000");
        //session.timeout.ms【10 秒钟】10000ms
        kafkaParams.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
        //heartbeat.interval.ms【3  秒钟】3000ms
        kafkaParams.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "3000");
        //fetch.max.wait.ms【0.5秒钟】500ms
        kafkaParams.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "500");

        if (kafkaConsumerConf.getMap() != null && !kafkaConsumerConf.getMap().isEmpty()) {
            kafkaParams.putAll(kafkaConsumerConf.getMap());
        }
        ConsumerStrategy consumerStrategy = null;
        Map<TopicPartition, Long> fromOffsets = new HashMap<>();
        if (StringUtils.equalsIgnoreCase(autoCommitEnable, "true")) {
            kafkaParams.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafkaConsumerConf.getAutoCommitMs());
        } else {
            List<KafkaOffsetsInfo> fromOffsetsList = getKafkaOffsetsInfos(jsc);
            if (fromOffsetsList != null && !fromOffsetsList.isEmpty()) {
                for (KafkaOffsetsInfo info : fromOffsetsList) {
                    TopicPartition topicPartition = new TopicPartition(info.getTopic(), info.getPartition());
                    fromOffsets.put(topicPartition, info.getOffset());
                }
            }
        }
        if (fromOffsets.isEmpty()) {
            consumerStrategy = ConsumerStrategies.Subscribe(topicsSet, kafkaParams);
        } else {
            consumerStrategy = ConsumerStrategies.Subscribe(topicsSet, kafkaParams, fromOffsets);
        }
        JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils.createDirectStream(
                jsc, LocationStrategies.PreferConsistent(), consumerStrategy);
        return stream;
    }

    /**
     * getKafkaOffsetsInfos
     *
     * @param jsc
     * @return List<KafkaOffsetsInfo>
     */
    public default List<KafkaOffsetsInfo> getKafkaOffsetsInfos(JavaStreamingContext jsc) {
        return null;
    }
}

