/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka;

import com.alibaba.fastjson.JSONObject;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.HasOffsetRanges;
import org.apache.spark.streaming.kafka010.OffsetRange;
import org.ipig.commons.conf.kafka.consumer.KafkaOffsetsInfo;
import org.ipig.commons.conf.mongo.MongoConf;
import org.ipig.commons.conf.redis.RedisConf;
import org.ipig.commons.helper.DateTimeHelper;
import org.ipig.computing.commons.pojo.ModelMetricValue;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.constant.context.SparkContext;
import org.ipig.computing.spark.conf.SparkExecutorConf;
import org.ipig.computing.spark.conf.SparkLauncherConf;
import org.ipig.computing.spark.conf.SparkMongoConf;
import org.ipig.constants.JavaCnst;
import org.ipig.constants.SymbolCnst;
import org.ipig.data.redis.RedisManager;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Transaction;

import java.io.File;
import java.io.IOException;
import java.util.List;

/**
 * AbstractSparkStreamKafkaToRedisExecutor
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Slf4j
public abstract class AbstractSparkStreamKafkaToRedisExecutor implements SparkStreamKafkaToRedisExecutorService {
    //默认过期时间为一天
    public static final int TIMEOUT=86400;
    public transient static final String ZJP= "ZJP";
    @Getter
    @Setter
    private SparkExecutorConf conf;
    private volatile SparkSession sparkSession;
    private Broadcast<RedisManager> broadcastRedisManager;
    private RedisManager redisManager;
    private Jedis jedis;

    @Override
    public Jedis getJedis() {
        if (jedis == null) {
            if (redisManager == null) {
                redisManager = broadcastRedisManager.getValue();
            }
            jedis = redisManager.getDefaultResource();
        }
        return jedis;
    }

    @Override
    public RedisManager getRedisManager() {
        if (redisManager == null) {
            redisManager = broadcastRedisManager.getValue();
        }
        return redisManager;
    }

    private String getKey(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo){
        RecordInfo recordInfo=recInfo.getValue();
        String key=new StringBuilder(ZJP).append(SymbolCnst.COLON).append(recInfo.getTopic()).append(SymbolCnst.COLON).append(recordInfo.getCode()).append(SymbolCnst.COLON).append(recordInfo.getPos()).toString();
        return key;
    }

    /**
     *   存储数据
     * @param recInfo KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>>
     * @param mmvList List<ModelMetricValue>
     */
    public void saveData(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo, List<ModelMetricValue> mmvList) {
        if (!mmvList.isEmpty()) {
            int timeout=TIMEOUT*3;
            String key=getKey(recInfo);
            Transaction tx = getJedis().multi();
            try {
                for (ModelMetricValue mmv:mmvList) {
                    tx.hincrBy(mmv.getKeyAndModel(), mmv.getMetricField(),mmv.getMetricValue());
                    tx.expire(mmv.getKeyAndModel(), timeout);
                }
                tx.set(key,"","NX","EX",timeout);
                tx.exec();
            } catch (Exception e) {
                e.printStackTrace();
                tx.discard();
            }finally {
                if (tx != null) {
                    try {
                        tx.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
            }
        }
    }

    @Override
    public boolean checkIsContinueProcess(KafkaRecordInfo<String, RecordInfo<JSONObject, JSONObject>> recInfo) {
        return !getJedis().exists(getKey(recInfo));
    }

    @Override
    public void init(JavaStreamingContext jsc) {
        log.debug(jsc.ssc().conf().get(SparkContext.Application.APP_NAME.key));
        JavaSparkContext javaSparkContext = jsc.sparkContext();
        redisManager = new RedisManager(this.getConf().getRedisConf());
        broadcastRedisManager = javaSparkContext.broadcast(this.redisManager);
    }

    @Override
    public SparkSession getSession() {
        log.debug("获取会话");
        if (sparkSession == null) {
            synchronized (this) {
                if (sparkSession == null) {
                    String master = conf.getMaster();
                    String appName = conf.getAppName() + SymbolCnst.MIDLINE + Thread.currentThread().getName() + SymbolCnst.MIDLINE
                            + Thread.currentThread().getId() + SymbolCnst.UNDERLINE + DateTimeHelper.getDateTime();
                    boolean newSession = true;
                    log.warn("获取新会话={}",appName);
                    SparkConf sparkConf = new SparkConf().setAppName(appName);
                    sparkConf.set(SparkContext.Network.NETWORK_TIMEOUT.key, conf.getNetworkTimeout());
                    sparkConf.set(SparkContext.ExecutionBehavior.EXECUTOR_HEARTBEATINTERVAL.key, conf.getExecutorHeartbeatinterval());
                    sparkConf.set(SparkContext.Sql.SQL_BROADCAST_TIMEOUT.key, conf.getSqlBroadcastTimeout());

                    //Streaming
                    sparkConf.set(SparkContext.Streaming.STREAMING_RECEIVER_MAX_RATE.key, getConf().getStreamingReceiverMaxRate());
                    sparkConf.set(SparkContext.Streaming.STREAMING_STOP_GRACE_FULLY_ON_SHUTDOWN.key, getConf().getStreamingStopGracefullyOnShutdown());
                    sparkConf.set(SparkContext.Streaming.STREAMING_BACKPRESSURE_ENABLED.key, getConf().getStreamingBackpressureEnabled());
                    sparkConf.set(SparkContext.Streaming.STREAMING_BACKPRESSURE_INITIALRATE.key, getConf().getStreamingBackpressureInitialRate());
                    sparkConf.set(SparkContext.Streaming.STREAMING_KAFKA_MAX_RATE_PER_PARTITION.key, getConf().getStreamingKafkaMaxRatePerPartition());

                    /**使用Kryo代替java序列化降低网络传输的数据量与内存资源，提高序列化的速度 */
                    sparkConf.set(SparkContext.CompressionAndSerialization.SERIALIZER.key, conf.getSerializer());
                    sparkConf.registerKryoClasses(new Class[]{SparkLauncherConf.class, SparkExecutorConf.class, KafkaRecordInfo.class,
                            RecordInfo.class, MongoConf.class, SparkMongoConf.class, RedisConf.class, RedisManager.class, this.getClass()});
                    SparkSession.Builder sessionBuilder = SparkSession.builder().config(sparkConf);
                    if (StringUtils.startsWithIgnoreCase(master, SparkContext.WorkingMode.LOCAL.code)) {
                        sessionBuilder.master(SparkContext.DEFAULT_SPARK_MASTER);
                    }
                    System.out.println(this.getClass());
                    if (newSession) {
                        sparkSession = sessionBuilder.getOrCreate().newSession();// 为每个线程新创建一个session
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    } else {
                        //getOrCreate() 注意在实际使用中当有多个线程使用此session时，session可能被其它线程修改状态或关闭。
                        sparkSession = sessionBuilder.getOrCreate();
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    }
//                    sparkSession.sparkContext().setLogLevel(conf.getLogLevel());
                    String tmpDir = this.getConf().getCheckpointDir();
                    if (StringUtils.isBlank(tmpDir)) {
                        tmpDir = System.getProperty(JavaCnst.IO_TMP_DIR);
                    }
                    sparkSession.sparkContext().setCheckpointDir(tmpDir + File.separator + "checkpoint" + File.separator + this.getClass().getName());
                }
            }
        }
        return sparkSession;
    }



    /**
     * getKafkaOffsetsInfos
     * @param jsc
     * @return List<KafkaOffsetsInfo>
     */
    @Override
    public List<KafkaOffsetsInfo> getKafkaOffsetsInfos(JavaStreamingContext jsc) {
        SparkExecutorConf executorConf = this.getConf();
        //@TODO 从redis中获取信息

        return null;
    }


    @Override
    public void postEachRDD(JavaRDD<ConsumerRecord<String, String>> rdd) {
        log.debug(Thread.currentThread().getName() + "_" + Thread.currentThread().getId() + " 可以进行累加器值的获取");
        OffsetRange[] offsetRanges = ((HasOffsetRanges) rdd.rdd()).offsetRanges();
        for (OffsetRange offsetRange:offsetRanges){
            System.out.println(offsetRange.topicPartition().toString());
            System.out.println(offsetRange.toString());
        }

        //@TODO 事物性更新到redis中
        // begin your transaction
        // update results
        // update offsets where the end of existing offsets matches the beginning of this batch of offsets
        // assert that offsets were updated correctly
        // end your transaction

        if (offsetRanges!=null && offsetRanges.length>0) {
            int timeout=TIMEOUT*3;
            String key=new StringBuilder(ZJP).append(SymbolCnst.COLON).toString();
            Transaction tx = getJedis().multi();
            try {
/*                for (ModelMetricValue mmv:mmvList) {
                    tx.hincrBy(mmv.getKeyAndModel(), mmv.getMetricsField(),mmv.getMetricValue());
                    tx.expire(mmv.getKeyAndModel(), timeout);
                }*/

                tx.set(key,"","NX","EX",timeout);
                tx.exec();
            } catch (Exception e) {
                e.printStackTrace();
                tx.discard();
            }finally {
                if (tx != null) {
                    try {
                        tx.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
            }
        }

    }
}

