/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka;

import com.aerospike.client.Bin;
import com.aerospike.client.Key;
import com.aerospike.client.Record;
import com.aerospike.client.policy.GenerationPolicy;
import com.aerospike.client.policy.RecordExistsAction;
import com.aerospike.client.policy.WritePolicy;
import com.alibaba.fastjson.JSONObject;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.HasOffsetRanges;
import org.apache.spark.streaming.kafka010.OffsetRange;
import org.ipig.commons.conf.kafka.consumer.KafkaConsumerConf;
import org.ipig.commons.conf.kafka.consumer.KafkaOffsetsInfo;
import org.ipig.commons.conf.mongo.MongoConf;
import org.ipig.commons.conf.redis.RedisConf;
import org.ipig.commons.conf.spike.SpikeConf;
import org.ipig.commons.helper.DateTimeHelper;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.constant.SpikeCnst;
import org.ipig.computing.constant.context.SparkContext;
import org.ipig.computing.spark.conf.ApplicationConf;
import org.ipig.computing.spark.conf.SparkExecutorConf;
import org.ipig.computing.spark.conf.SparkKafkaConf;
import org.ipig.computing.spark.conf.SparkLauncherConf;
import org.ipig.computing.spark.conf.SparkMongoConf;
import org.ipig.computing.spark.conf.SparkRedisConf;
import org.ipig.computing.spark.conf.SparkSpikeConf;
import org.ipig.constants.JavaCnst;
import org.ipig.constants.SparkCnst;
import org.ipig.constants.SymbolCnst;
import org.ipig.data.as.impl.SpikeManager;
import org.ipig.data.redis.RedisManager;
import org.ipig.model.proc.DimMetricsMolel;
import org.ipig.model.proc.FlowDimMetricsConf;
import org.ipig.model.proc.FlowProcessorConf;
import org.ipig.model.proc.FlowRequesterConf;

import java.io.File;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;

/**
 * AbstractSparkStreamKafkaToSpikeExecutor
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Slf4j
public abstract class AbstractSparkStreamKafkaToSpikeExecutor implements SparkStreamKafkaToSpikeExecutorService {
    @Getter
    @Setter
    private SparkExecutorConf conf;
    private volatile SparkSession sparkSession;
    private Broadcast<SpikeManager> broadcastSpikeClient;
    private SpikeManager spikeManager;

    public SpikeManager getSpikeManager() {
        if (spikeManager == null) {
            spikeManager = broadcastSpikeClient.getValue();
            if (spikeManager == null) {
                spikeManager = new SpikeManager(this.getConf().getSparkSpikeConf().getWriteConf());
            }
        }
        return spikeManager;
    }


    @Override
    public void init(JavaStreamingContext jsc) {
        log.debug(jsc.ssc().conf().get(SparkContext.Application.APP_NAME.key));
        JavaSparkContext javaSparkContext = jsc.sparkContext();
        spikeManager = new SpikeManager(this.getConf().getSparkSpikeConf().getWriteConf());
        broadcastSpikeClient = javaSparkContext.broadcast(spikeManager);
    }

    @Override
    public void initEachPartition(final Iterator<ConsumerRecord<String, String>> partition) {
        spikeManager = getSpikeManager();
    }

    @Override
    public void postEachPartition(final Iterator<ConsumerRecord<String, String>> partition) {

    }

    @Override
    public boolean checkIsContinueProcess(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo) {
//        return !getJedis().exists(getKey(recInfo));
        return true;
    }

    @Override
    public SparkSession getSession() {
        log.debug("获取会话");
        if (sparkSession == null) {
            synchronized (this) {
                if (sparkSession == null) {
                    String master = conf.getMaster();
                    String appName = conf.getAppName() + SymbolCnst.UNDERLINE + Thread.currentThread().getName() + SymbolCnst.UNDERLINE
                            + Thread.currentThread().getId() + SymbolCnst.UNDERLINE + DateTimeHelper.getDateTime();
                    boolean newSession = true;
                    log.warn("获取新会话={}", appName);
                    SparkConf sparkConf = new SparkConf().setAppName(appName);
                    sparkConf.set(SparkContext.Network.NETWORK_TIMEOUT.key, conf.getNetworkTimeout());
                    sparkConf.set(SparkContext.ExecutionBehavior.EXECUTOR_HEARTBEATINTERVAL.key, conf.getExecutorHeartbeatinterval());
                    sparkConf.set(SparkContext.Sql.SQL_BROADCAST_TIMEOUT.key, conf.getSqlBroadcastTimeout());

                    //Streaming
                    sparkConf.set(SparkContext.Streaming.STREAMING_RECEIVER_MAX_RATE.key, getConf().getStreamingReceiverMaxRate());
                    sparkConf.set(SparkContext.Streaming.STREAMING_STOP_GRACE_FULLY_ON_SHUTDOWN.key, getConf().getStreamingStopGracefullyOnShutdown());
                    sparkConf.set(SparkContext.Streaming.STREAMING_BACKPRESSURE_ENABLED.key, getConf().getStreamingBackpressureEnabled());
                    sparkConf.set(SparkContext.Streaming.STREAMING_BACKPRESSURE_INITIALRATE.key, getConf().getStreamingBackpressureInitialRate());
                    sparkConf.set(SparkContext.Streaming.STREAMING_KAFKA_MAX_RATE_PER_PARTITION.key, getConf().getStreamingKafkaMaxRatePerPartition());

                    /**使用Kryo代替java序列化降低网络传输的数据量与内存资源，提高序列化的速度 */
                    sparkConf.set(SparkContext.CompressionAndSerialization.SERIALIZER.key, conf.getSerializer());
                    sparkConf.registerKryoClasses(new Class[]{SparkLauncherConf.class, SparkExecutorConf.class, KafkaRecordInfo.class,
                            RecordInfo.class, MongoConf.class, SparkMongoConf.class, RedisConf.class, SpikeConf.class,
                            DimMetricsMolel.class, FlowRequesterConf.class, FlowDimMetricsConf.class, FlowProcessorConf.class,
                            SparkRedisConf.class, SparkSpikeConf.class, SparkKafkaConf.class, ApplicationConf.class,
                            RedisManager.class, SpikeManager.class, this.getClass()});
                    SparkSession.Builder sessionBuilder = SparkSession.builder().config(sparkConf);
                    if (StringUtils.startsWithIgnoreCase(master, SparkContext.WorkingMode.LOCAL.code)) {
                        sessionBuilder.master(SparkContext.DEFAULT_SPARK_MASTER);
                    }
                    System.out.println(this.getClass());
                    if (newSession) {
                        sparkSession = sessionBuilder.getOrCreate().newSession();// 为每个线程新创建一个session
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    } else {
                        //getOrCreate() 注意在实际使用中当有多个线程使用此session时，session可能被其它线程修改状态或关闭。
                        sparkSession = sessionBuilder.getOrCreate();
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    }
//                    sparkSession.sparkContext().setLogLevel(conf.getLogLevel());
                    String tmpDir = this.getConf().getCheckpointDir();
                    if (StringUtils.isBlank(tmpDir)) {
                        tmpDir = System.getProperty(JavaCnst.IO_TMP_DIR);
                    }
                    sparkSession.sparkContext().setCheckpointDir(tmpDir + File.separator + SparkCnst.CHECKPOINT + File.separator + this.getClass().getName());
                }
            }
        }
        return sparkSession;
    }

    /**
     * getKafkaOffsetsInfos
     *
     * @param jsc
     * @return List<KafkaOffsetsInfo>
     */
    @Override
    public List<KafkaOffsetsInfo> getKafkaOffsetsInfos(JavaStreamingContext jsc) {
        KafkaConsumerConf kafkaConsumerConf = this.getConf().getSparkKafkaConf().getConsumerConf();
        List<KafkaOffsetsInfo> list = new ArrayList<>();
        String rowKey = this.getConf().getRequesterConf().getCode() + SymbolCnst.AT + kafkaConsumerConf.getTopicName();
        Key key = new Key(SpikeCnst.Namespace.ORIG.code, SpikeCnst.Set.KAFKA_OFFSET.code, rowKey);
        Record record = getSpikeManager().getClient().get(null, key);
        Map<String, Object> map = record.bins;
        for (Map.Entry<String, Object> entry : map.entrySet()) {
            list.add(new KafkaOffsetsInfo(kafkaConsumerConf.getTopicName(), Integer.valueOf(entry.getKey()),
                    Long.valueOf(StringUtils.EMPTY + entry.getValue())));
        }
        return list;
    }

    @Override
    public void postEachRDD(JavaRDD<ConsumerRecord<String, String>> rdd) {
        OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
        Bin[] bins = new Bin[offsetRanges.length];
        int i = 0;
        boolean opAble = false;
        for (OffsetRange offsetRange : offsetRanges) {
            bins[i++] = new Bin(String.valueOf(offsetRange.partition()), offsetRange.untilOffset());
//            System.out.println(offsetRange.topicPartition().toString() + " ; " + offsetRange.toString() + " 主题" + offsetRange.topic() + " 分区" + offsetRange.partition() + " 开始位置" + offsetRange.fromOffset() + " 当前位置" + offsetRange.untilOffset());
//            hrb_anls_model-2 ; OffsetRange(topic: 'hrb_anls_model', partition: 2, range: [0 -> 0]) 主题hrb_anls_model 分区2 开始位置0 当前位置0
            if (!opAble && offsetRange.untilOffset() > offsetRange.fromOffset()) {
                opAble = true;
            }
        }
        if (opAble) {
            try {
                KafkaConsumerConf kafkaConf = this.getConf().getSparkKafkaConf().getConsumerConf();
                String rowKey = this.getConf().getRequesterConf().getCode() + SymbolCnst.AT + kafkaConf.getTopicName();
                Key key = new Key(SpikeCnst.Namespace.ORIG.code, SpikeCnst.Set.KAFKA_OFFSET.code, rowKey);
                Record record = getSpikeManager().getClient().get(null, key);
                WritePolicy writePolicy = new WritePolicy();
                writePolicy.generationPolicy = GenerationPolicy.EXPECT_GEN_EQUAL;
                if (record != null) {
                    System.out.println(record.toString());
//                    (gen:294),(exp:305975267),(bins:(0:0),(1:0),(2:0),(3:0),(4:0),(5:0),(6:0),(7:0))
                    writePolicy.generation = record.generation;
                    writePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY;
                } else {
                    writePolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY;
                }
                getSpikeManager().getClient().put(writePolicy, key, bins);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
    }
}

