/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka;

import com.mongodb.spark.MongoSpark;
import com.mongodb.spark.config.ReadConfig;
import com.mongodb.spark.config.WriteConfig;
import com.mongodb.spark.rdd.api.java.JavaMongoRDD;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.ConsumerStrategy;
import org.apache.spark.streaming.kafka010.HasOffsetRanges;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.apache.spark.streaming.kafka010.OffsetRange;
import org.bson.Document;
import org.ipig.commons.conf.kafka.consumer.KafkaConsumerConf;
import org.ipig.commons.conf.kafka.consumer.KafkaOffsetsInfo;
import org.ipig.commons.conf.mongo.MongoConf;
import org.ipig.commons.conf.redis.RedisConf;
import org.ipig.commons.helper.DateTimeHelper;
import org.ipig.commons.helper.GsonHelper;
import org.ipig.commons.helper.SparkMongoHelper;
import org.ipig.commons.helper.StringHelper;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.constant.context.SparkContext;
import org.ipig.computing.spark.SparkLauncherService;
import org.ipig.computing.spark.conf.SparkExecutorConf;
import org.ipig.computing.spark.conf.SparkLauncherConf;
import org.ipig.computing.spark.conf.SparkMongoConf;
import org.ipig.computing.spark.streaming.SparkStreamExecutorLauncher;
import org.ipig.constants.JavaCnst;
import org.ipig.constants.SymbolCnst;
import org.ipig.data.redis.RedisManager;

import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

/**
 * AbstractSparkStreamKafkaToMongoExecutor
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Slf4j
public abstract class AbstractSparkStreamKafkaToMongoExecutor implements SparkStreamKafkaExecutorService, SparkLauncherService<SparkExecutorConf> {
    @Getter
    @Setter
    private SparkExecutorConf conf;
    private volatile SparkSession sparkSession;

    @Override
    public void postEachPartition(Iterator<ConsumerRecord<String, String>> partition) {

    }

    @Override
    public SparkSession getSession() {
        log.debug("获取会话");
        if (sparkSession == null) {
            synchronized (this) {
                if (sparkSession == null) {
                    String master = conf.getMaster();
                    String appName = conf.getAppName() + SymbolCnst.MIDLINE + Thread.currentThread().getName() + SymbolCnst.MIDLINE
                            + Thread.currentThread().getId() + SymbolCnst.UNDERLINE + DateTimeHelper.getDateTime();
                    boolean newSession = true;
                    log.warn("获取新会话={}",appName);
                    SparkConf sparkConf = new SparkConf().setAppName(appName);
                    sparkConf.set(SparkContext.Network.NETWORK_TIMEOUT.key, conf.getNetworkTimeout());
                    sparkConf.set(SparkContext.ExecutionBehavior.EXECUTOR_HEARTBEATINTERVAL.key, conf.getExecutorHeartbeatinterval());
                    sparkConf.set(SparkContext.Sql.SQL_BROADCAST_TIMEOUT.key, conf.getSqlBroadcastTimeout());

                    /**使用Kryo代替java序列化降低网络传输的数据量与内存资源，提高序列化的速度 */
                    sparkConf.set(SparkContext.CompressionAndSerialization.SERIALIZER.key, conf.getSerializer());
                    sparkConf.registerKryoClasses(new Class[]{SparkLauncherConf.class, SparkExecutorConf.class, KafkaRecordInfo.class,
                            RecordInfo.class, MongoConf.class, SparkMongoConf.class, RedisConf.class, RedisManager.class, this.getClass()});
                    SparkSession.Builder sessionBuilder = SparkSession.builder().config(sparkConf);
                    if (StringUtils.startsWithIgnoreCase(master, SparkContext.WorkingMode.LOCAL.code)) {
                        sessionBuilder.master(SparkContext.DEFAULT_SPARK_MASTER);
                    }
                    System.out.println(this.getClass());
                    if (newSession) {
                        sparkSession = sessionBuilder.getOrCreate().newSession();// 为每个线程新创建一个session
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    } else {
                        //getOrCreate() 注意在实际使用中当有多个线程使用此session时，session可能被其它线程修改状态或关闭。
                        sparkSession = sessionBuilder.getOrCreate();
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    }
//                    sparkSession.sparkContext().setLogLevel(conf.getLogLevel());
                    String tmpDir = this.getConf().getCheckpointDir();
                    if (StringUtils.isBlank(tmpDir)) {
                        tmpDir = System.getProperty(JavaCnst.IO_TMP_DIR);
                    }
                    sparkSession.sparkContext().setCheckpointDir(tmpDir + File.separator + "checkpoint" + File.separator + this.getClass().getName());
                }
            }
        }
        return sparkSession;
    }

    @Override
    public String getMainClass() {
        return SparkStreamExecutorLauncher.class.getName();
    }

    @Override
    public String getMainArg() {
        String json = "";
        SparkExecutorConf executorConf = this.getConf();
        executorConf = executorConf.deepClone();
        //@TODO 开发使用，上测试与生产前必须关闭
//        String group = RandomStringUtils.randomAlphanumeric(10);
//        executorConf.getSparkKafkaConf().getConsumerConf().setGroupId(group);
        executorConf.setExecutorClass(this.getExecutorClass());
        json = GsonHelper.toJson(executorConf, SparkExecutorConf.class);
        log.warn(json);
        return StringHelper.encodeByUTF8(json);
    }

    @Override
    public String getExecutorClass() {
        return this.getClass().getName();
    }

    @Override
    public JavaInputDStream<ConsumerRecord<String, String>> fromInputDStream(JavaStreamingContext jsc) {
        KafkaConsumerConf kafkaConsumerConf = getConf().getSparkKafkaConf().getConsumerConf();
        String autoCommitEnable = kafkaConsumerConf.getAutoCommitEnable();
        Collection<String> topicsSet = new HashSet<>(Arrays.asList(kafkaConsumerConf.getTopicName()));
        Map<String, Object> kafkaParams = new HashMap<>();
        kafkaParams.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConsumerConf.getBootstrapServers());
        kafkaParams.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConsumerConf.getGroupId());
        kafkaParams.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaConsumerConf.getAutoOffsetReset());
        kafkaParams.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, kafkaConsumerConf.getKeyDeserializer());
        kafkaParams.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, kafkaConsumerConf.getKeyDeserializer());
        kafkaParams.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommitEnable);
        //max.poll.records       【500  条】
        kafkaParams.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "500");
//        kafkaParams.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, kafkaConsumerConf.getMaxPollRecords());
        // max.poll.interval.ms【5  分钟】300000ms
        kafkaParams.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "300000");
        //request.timeout.ms【30 秒钟】30000ms
        kafkaParams.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000");
        //session.timeout.ms【10 秒钟】10000ms
        kafkaParams.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");
        //heartbeat.interval.ms【3  秒钟】3000ms
        kafkaParams.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "3000");
        //fetch.max.wait.ms【0.5秒钟】500ms
        kafkaParams.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "500");

        if (kafkaConsumerConf.getMap() != null && !kafkaConsumerConf.getMap().isEmpty()) {
            kafkaParams.putAll(kafkaConsumerConf.getMap());
        }
        ConsumerStrategy consumerStrategy = null;
        Map<TopicPartition, Long> fromOffsets = new HashMap<>();
        if (org.apache.commons.lang3.StringUtils.equals(autoCommitEnable, "true")) {
            kafkaParams.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafkaConsumerConf.getAutoCommitMs());
        }else{
            List<KafkaOffsetsInfo> fromOffsetsList = getKafkaOffsetsInfos(jsc);
            if (fromOffsetsList != null && !fromOffsetsList.isEmpty()) {
                for (KafkaOffsetsInfo info : fromOffsetsList) {
                    TopicPartition topicPartition = new TopicPartition(info.getTopic(), info.getPartition());
                    fromOffsets.put(topicPartition, info.getOffset());
                }
            }
        }
        if (fromOffsets.isEmpty()) {
            consumerStrategy = ConsumerStrategies.Subscribe(topicsSet, kafkaParams);
        } else {
            consumerStrategy = ConsumerStrategies.Subscribe(topicsSet, kafkaParams, fromOffsets);
        }
        JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils.createDirectStream(
                jsc, LocationStrategies.PreferConsistent(), consumerStrategy);
        return stream;
    }

    public List<KafkaConsumerConf> getKafkaConsumerConfList(JavaStreamingContext jsc) {
        SparkExecutorConf executorConf = this.getConf();
        Map readParmsMap = SparkMongoHelper.getSparkMongoConnectorParams(executorConf.getSparkMongoConf().getReadConf(), "skKafkaConsumerConf");
        Map writeParmsMap = SparkMongoHelper.getSparkMongoConnectorParams(executorConf.getSparkMongoConf().getWriteConf(), "skKafkaConsumerConf");
        WriteConfig writeConfig = WriteConfig.create(writeParmsMap).withOption("spark.mongodb.output.replaceDocument", "true");
        ReadConfig readConfig = ReadConfig.create(readParmsMap);
        Document filterDoc=new Document().append("","");
        SparkConf sparkConf=jsc.sparkContext().getConf();
        Document filterDo = new Document("$match", filterDoc);
        Document projectDoc = Document.parse("{$project:{'createDate':1}}");
        /* Start Example: Use aggregation to filter a RDD ***************/
        List<Document> filters = new ArrayList<Document>();
        filters.add(filterDo);
        filters.add(projectDoc);
        System.out.println("过滤条件： " + filters);
//        JavaMongoRDD<Document> rdd = MongoSpark.load(jsc.sparkContext(),readConfig);
        JavaMongoRDD<KafkaConsumerConf> rdds = MongoSpark.load(jsc.sparkContext(),readConfig,KafkaConsumerConf.class);
        return rdds.collect();
    }

    /**
     * getKafkaOffsetsInfos
     * @param jsc
     * @return List<KafkaOffsetsInfo>
     */
    @Override
    public List<KafkaOffsetsInfo> getKafkaOffsetsInfos(JavaStreamingContext jsc) {
        SparkExecutorConf executorConf = this.getConf();
        Map readParmsMap = SparkMongoHelper.getSparkMongoConnectorParams(executorConf.getSparkMongoConf().getReadConf(), "skKafkaOffsetsInfo");
        Map writeParmsMap = SparkMongoHelper.getSparkMongoConnectorParams(executorConf.getSparkMongoConf().getWriteConf(), "skKafkaOffsetsInfo");
        WriteConfig writeConfig = WriteConfig.create(writeParmsMap).withOption("spark.mongodb.output.replaceDocument", "true");
        ReadConfig readConfig = ReadConfig.create(readParmsMap);
        Document filterDoc=new Document().append("","");
        SparkConf sparkConf=jsc.sparkContext().getConf();
        Document filterDo = new Document("$match", filterDoc);
        Document projectDoc = Document.parse("{$project:{'createDate':1}}");
        /* Start Example: Use aggregation to filter a RDD ***************/
        List<Document> filters = new ArrayList<Document>();
        filters.add(filterDo);
        filters.add(projectDoc);
        System.out.println("过滤条件： " + filters);
//        JavaMongoRDD<Document> rdd = MongoSpark.load(jsc.sparkContext(),readConfig);
        JavaMongoRDD<KafkaOffsetsInfo> rdds = MongoSpark.load(jsc.sparkContext(),readConfig,KafkaOffsetsInfo.class);

        return rdds.collect();
    }


    @Override
    public void postEachRDD(JavaRDD<ConsumerRecord<String, String>> rdd) {
        log.debug(Thread.currentThread().getName() + "_" + Thread.currentThread().getId() + " 可以进行累加器值的获取");
        OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
        for (OffsetRange offsetRange:offsetRanges){
            System.out.println(offsetRange.topicPartition().toString());
            System.out.println(offsetRange.toString());
        }
        // begin your transaction
        // update results
        // update offsets where the end of existing offsets matches the beginning of this batch of offsets
        // assert that offsets were updated correctly
        // end your transaction
    }
}

