/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.streaming.kafka;

import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.SparkSession;
import org.ipig.commons.conf.mongo.MongoConf;
import org.ipig.commons.conf.redis.RedisConf;
import org.ipig.commons.helper.DateTimeHelper;
import org.ipig.commons.helper.GsonHelper;
import org.ipig.commons.helper.StringHelper;
import org.ipig.computing.commons.pojo.kafka.KafkaRecordInfo;
import org.ipig.computing.commons.pojo.kafka.RecordInfo;
import org.ipig.computing.constant.context.SparkContext;
import org.ipig.computing.spark.SparkLauncherService;
import org.ipig.computing.spark.conf.SparkExecutorConf;
import org.ipig.computing.spark.conf.SparkLauncherConf;
import org.ipig.computing.spark.conf.SparkMongoConf;
import org.ipig.computing.spark.streaming.SparkStreamExecutorLauncher;
import org.ipig.constants.JavaCnst;
import org.ipig.constants.SymbolCnst;
import org.ipig.data.redis.RedisManager;

import java.io.File;
import java.util.Iterator;

/**
 * AbstractSparkStreamKafkaExecutor
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Slf4j
public abstract class AbstractSparkStreamKafkaExecutor implements SparkStreamKafkaExecutorService, SparkLauncherService<SparkExecutorConf> {
    @Getter
    @Setter
    private SparkExecutorConf conf;
    private volatile SparkSession sparkSession;

    @Override
    public void postEachPartition(Iterator<ConsumerRecord<String, String>> partition) {

    }

    @Override
    public SparkSession getSession() {
        log.debug("获取会话");
        if (sparkSession == null) {
            synchronized (this) {
                if (sparkSession == null) {
                    String master = conf.getMaster();
                    String appName = conf.getAppName() + SymbolCnst.MIDLINE + Thread.currentThread().getName() + SymbolCnst.MIDLINE
                            + Thread.currentThread().getId() + SymbolCnst.UNDERLINE + DateTimeHelper.getDateTime();
                    boolean newSession = true;
                    log.warn("获取新会话={}",appName);
                    SparkConf sparkConf = new SparkConf().setAppName(appName);
                    sparkConf.set(SparkContext.Network.NETWORK_TIMEOUT.key, conf.getNetworkTimeout());
                    sparkConf.set(SparkContext.ExecutionBehavior.EXECUTOR_HEARTBEATINTERVAL.key, conf.getExecutorHeartbeatinterval());
                    sparkConf.set(SparkContext.Sql.SQL_BROADCAST_TIMEOUT.key, conf.getSqlBroadcastTimeout());

                    /**使用Kryo代替java序列化降低网络传输的数据量与内存资源，提高序列化的速度 */
                    sparkConf.set(SparkContext.CompressionAndSerialization.SERIALIZER.key, conf.getSerializer());
                    sparkConf.registerKryoClasses(new Class[]{SparkLauncherConf.class, SparkExecutorConf.class, KafkaRecordInfo.class,
                            RecordInfo.class, MongoConf.class, SparkMongoConf.class, RedisConf.class, RedisManager.class, this.getClass()});
                    SparkSession.Builder sessionBuilder = SparkSession.builder().config(sparkConf);
                    if (StringUtils.startsWithIgnoreCase(master, SparkContext.WorkingMode.LOCAL.code)) {
                        sessionBuilder.master(SparkContext.DEFAULT_SPARK_MASTER);
                    }
                    System.out.println(this.getClass());
                    if (newSession) {
                        sparkSession = sessionBuilder.getOrCreate().newSession();// 为每个线程新创建一个session
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    } else {
                        //getOrCreate() 注意在实际使用中当有多个线程使用此session时，session可能被其它线程修改状态或关闭。
                        sparkSession = sessionBuilder.getOrCreate();
                        SparkSession.setActiveSession(sparkSession);//将新session放置在当前线程中
                    }
//                    sparkSession.sparkContext().setLogLevel(conf.getLogLevel());
                    String tmpDir = this.getConf().getCheckpointDir();
                    if (StringUtils.isBlank(tmpDir)) {
                        tmpDir = System.getProperty(JavaCnst.IO_TMP_DIR);
                    }
                    sparkSession.sparkContext().setCheckpointDir(tmpDir + File.separator + "checkpoint" + File.separator + this.getClass().getName());
                }
            }
        }
        return sparkSession;
    }

    @Override
    public String getMainClass() {
        return SparkStreamExecutorLauncher.class.getName();
    }

    @Override
    public String getMainArg() {
        String json = "";
        SparkExecutorConf executorConf = this.getConf();
        executorConf = executorConf.deepClone();
        //@TODO 开发使用，上测试与生产前必须关闭
//        String group = RandomStringUtils.randomAlphanumeric(10);
//        executorConf.getSparkKafkaConf().getConsumerConf().setGroupId(group);
        executorConf.setExecutorClass(this.getExecutorClass());
        json = GsonHelper.toJson(executorConf, SparkExecutorConf.class);
        log.warn(json);
        return StringHelper.encodeByUTF8(json);
    }

    @Override
    public String getExecutorClass() {
        return this.getClass().getName();
    }

    @Override
    public void postEachRDD(JavaRDD<ConsumerRecord<String, String>> rdd) {
        log.debug(Thread.currentThread().getName() + "_" + Thread.currentThread().getId() + " 可以进行累加器值的获取");
    }
}

