/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.spark.conf;

import lombok.Data;
import org.ipig.commons.conf.ConfService;
import org.ipig.commons.helper.GsonHelper;
import org.ipig.computing.constant.context.SparkContext;

import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;

/**
 * spark Launcher 配置
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
@Data
public class SparkLauncherConf extends ApplicationConf implements Serializable, ConfService,Cloneable {
    /** spark.serializer */
    private String serializer = SparkContext.CompressionAndSerialization.SERIALIZER.defaultValue;
    /** sql.autoBroadcastJoinThreshold */
    private String sqlAutoBroadcastJoinThreshold = SparkContext.Sql.SQL_AUTO_BROADCAST_JOIN_THRESHOLD.defaultValue;
    /** spark.sql.broadcastTimeout */
    private String sqlBroadcastTimeout = SparkContext.Sql.SQL_BROADCAST_TIMEOUT.defaultValue;

    /**  是否启用冗长输出   */
    private String verbose = "true";
    /**  是否启动动态资源分配   */
    private String dynamicAllocationEnabled = SparkContext.DynamicAllocation.DYNAMICALLOCATION_ENABLED.defaultValue;
    /**  动态删除executor策略：若动态分配设为true且executor处于idle状态的时间已超时，则移除executor   */
    private String dynamicExecutorIdleTimeout = SparkContext.DynamicAllocation.DYNAMICALLOCATION_EXECUTOR_IDLE_TIMEOUT.defaultValue;
    /**  若动态分配为true，执行器的初始数量   */
    private String dynamicAllocationInitialExecutors = SparkContext.DynamicAllocation.DYNAMICALLOCATION_INITIAL_EXECUTORS.defaultValue;
    /**  执行器最大数量：默认为你申请的最大executor数   */
    private String dynamicAllocationMaxExecutor = SparkContext.DynamicAllocation.DYNAMICALLOCATION_MAX_EXECUTOR.defaultValue;
    /** 执行器最少数量：最少保留的executor数 */
    private String dynamicAllocationMinExecutor = SparkContext.DynamicAllocation.DYNAMICALLOCATION_MIN_EXECUTOR.defaultValue;
    /** 动态申请executor：如果有新任务处于等待状态，等待的超时时间 ，超过该值后开始启动executor */
    private String dynamicAllocationSchedulerBacklogTimeout = SparkContext.DynamicAllocation.DYNAMICALLOCATION_SCHEDULER_BACKLOG_TIMEOUT.defaultValue;
    /** 若executor缓存数据超时，且动态内存分配为true，则移除缓存 */
    private String dynamicAllocationCachedExecutorIdleTimeout = SparkContext.DynamicAllocation.DYNAMICALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT.defaultValue;
    /** 调度第二次及之后超时时间 */
    private String dynamicAllocationSustainedSchedulerBacklogTimeout = SparkContext.DynamicAllocation.DYNAMICALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT.defaultValue;
    /**  shuffle服务开关   */
    private String shuffleServiceEnabled = SparkContext.Shuffle.SHUFFLE_SERVICE_ENABLED.defaultValue;
    /** shuffle服务端口 */
    private String shuffleServicePort = SparkContext.Shuffle.SHUFFLE_SERVICE_PORT.defaultValue;
    /** shuffle数据的实现方法，包括sort和hash两种 */
    private String shuffleManager = SparkContext.Shuffle.SHUFFLE_MANAGER.defaultValue;
    /** 每个shuffle文件输出流的内存缓存区大小。这些缓冲区减少了系统IO的调用次数  */
    private String shuffleFileBuffer = SparkContext.Shuffle.SHUFFLE_FILE_BUFFER.defaultValue;
    /** 用于执行和存储的内存比例 */
    private String memoryFraction = SparkContext.MemoryManagement.MEMORY_FRACTION.defaultValue;
    /** 在存储和计算内存中，缓存所占的内存比例 */
    private String memorystorageFraction = SparkContext.MemoryManagement.MEMORY_STORAGE_FRACTION.defaultValue;
    /** 广播每个block的大小,若值太大会降低广播的并行度，若值太小则可能出现BlockManager瓶颈 */
    private String behaviorBroadcastBlocksize = SparkContext.ExecutionBehavior.BROADCAST_BLOCKSIZE.defaultValue;
    /** 代表着rdd的一个分区能存放数据的最大字节数 */
    private String behaviorFilesMaxPartitionBytes = SparkContext.ExecutionBehavior.FILES_MAX_PARTITION_BYTES.defaultValue;
    /** 并发数 */
    private String behaviorDefaultParallelism = SparkContext.ExecutionBehavior.DEFAULT_PARALLELISM.defaultValue;
    /** executor与driver的心跳间隔 */
    private String executorHeartbeatinterval = SparkContext.ExecutionBehavior.EXECUTOR_HEARTBEATINTERVAL.defaultValue;

    /** 所有网络交互的超时时间 */
    private String networkTimeout = SparkContext.Network.NETWORK_TIMEOUT.defaultValue;

    /** 确保在kill任务时，能够处理完最后一批数据，再关闭程序，不会发生强制kill导致数据处理中断，没处理完的数据丢失, */
    private String streamingStopGracefullyOnShutdown = SparkContext.Streaming.STREAMING_STOP_GRACE_FULLY_ON_SHUTDOWN.defaultValue;
    /** 是否启用反压机制 默认启用*/
    private String streamingBackpressureEnabled = SparkContext.Streaming.STREAMING_BACKPRESSURE_ENABLED.defaultValue;
    /** 启用反压机制时每个接收器接收第一批数据的初始最大速率（整数）,限制第一次批处理应该消费的数据，因为程序冷启动 队列里面有大量积压，防止第一次全部读取，造成系统阻塞 */
    private String streamingBackpressureInitialRate = SparkContext.Streaming.STREAMING_BACKPRESSURE_INITIALRATE.defaultValue;
    /** 限制每秒每个消费线程读取每个kafka分区最大的数据量 （整数）*/
    private String streamingKafkaMaxRatePerPartition = SparkContext.Streaming.STREAMING_KAFKA_MAX_RATE_PER_PARTITION.defaultValue;
    /** 控制吞吐量: 限制每秒的最大吞吐，如果做限制1000，那么每秒最大吞吐就是1000条*/
    private String streamingReceiverMaxRate = SparkContext.Streaming.STREAMING_RECEIVER_MAX_RATE.defaultValue;

    /**
     * <li>在集群模式（cluster mode）下，yarn应用master等待SparkContext初始化的时间。
     * <li>在客户端模式（client mode）下，master等待driver连接到它的时间。
     */
    String yarnAmWaitTime = SparkContext.Yarn.YARN_AM_WAIT_TIME.defaultValue;
    /**  yarn依赖jars  */
    String yarnJars = SparkContext.Yarn.YARN_JARS.defaultValue;
    /**  RM等待AM启动次数，也就是sc初始化次数。超过这个数值，启动失败*/
    String yarnApplicationMasterWaitTries = SparkContext.Yarn.YARN_APPLICATION_MASTER_WAIT_TRIES.defaultValue;

    /** sparkHome */
    private String sparkHome = "";
    /** javaHome */
    private String javaHome = "";
    /** the Spark master for the application */
    private String master = SparkContext.DEFAULT_SPARK_MASTER;
    /** the deploy mode for the application */
//    private String deployMode = SparkContext.DEFAULT_DEPLOY_MODE;

    /** 【*】 the driver class path. */
    private String driverExtraClasspath = "";
    /** driverMemory */
    private String driverMemory = SparkContext.Driver.DRIVER_MEMORY.defaultValue;
    /** the driver VM options. */
    private String driverExtraJavaOptions = "";
    /** the driver native library path. */
    private String driverExtraLibraryPath = "";
    /** 是否允许driver中创建多个SparkContext实例 */
    private String driverAllowMultipleContexts = SparkContext.Driver.DRIVER_ALLOW_MULTIPLE_CONTEXTS.defaultValue;
    /** 【*】 the executor class path */
    private String executorExtraClasspath = "";
    /** executorMemory */
    private String executorMemory = SparkContext.Executor.EXECUTOR_MEMORY.defaultValue;
    /** executorCores  */
    private String executorCores = SparkContext.Executor.EXECUTOR_CORES.defaultValue;
    /** the executor VM options.   */
    private String executorExtraJavaOptions = "";
    /** the executor native library path.   */
    private String executorExtraLibraryPath = "";
    /** spark属性KV格式    */
    private Map<String, String> confMap = new HashMap<String, String>();

    /** 保存点*/
    private String checkpointDir="";
    /** 日志级别*/
    private String logLevel = "WARN";
    /** 序列化类List*/
//    private List<Class> serializerClassList =new ArrayList<Class>();
    public static SparkLauncherConf toObject(String json) {
        return GsonHelper.fromJson(json, SparkLauncherConf.class);
    }

    public String toJson() {
        return GsonHelper.toJson(this, this.getClass());
    }
}
