/*
 *  Copyright 2020-2025 the original author or authors.
 *  You cannot use this file unless authorized by the author.
 */

package org.ipig.computing.constant.context;

import org.apache.commons.lang3.StringUtils;
import org.ipig.commons.helper.AssertHelper;
import org.ipig.commons.helper.StringHelper;
import org.ipig.constants.SymbolCnst;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.Properties;

/**
 * SparkContext
 *
 * @author <a href="mailto:comchnts@163.com">chinats</a>
 * @since 1.0
 */
public class SparkContext {
    /**
     * APP_CLASS
     */
    public static final String APP_CLASS = " --class ";
    /**
     * APP_NAME
     */
    public static final String APP_NAME = " --name ";
    /**
     * APP_MASTER
     */
    public static final String APP_MASTER = " --master ";
    /**
     * APP_DEPLOY_MODE
     */
    public static final String APP_DEPLOY_MODE = " --deploy-mode ";
    /**
     * MAIN_FUNCTION
     */
    public static final String MAIN_FUNCTION = "main";
    /**
     * enabled
     */
    public static final String ENABLED = "true";
    /**
     * 默认master
     */
    public static final String DEFAULT_SPARK_MASTER = "local[*]";
    /**
     * 默认deploy mode
     */
    public static final String DEFAULT_DEPLOY_MODE = DeployMode.CLUSTER.code;
    /**
     * 默认 spark submit class
     */
    public static final String SPARK_SUBMIT_CLASS = "org.apache.spark.deploy.SparkSubmit";
    public static final String DEFAULT_PROPERTIES_FILE = "spark-defaults.conf";
    public static final String ENV_SPARK_HOME = "SPARK_HOME";
    public static final String ENV_SPARK_CONF_DIR = "SPARK_CONF_DIR";
    private static Properties sparkProps = null;

    /**
     * 应用属性
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Application {
        /**
         * 应用名称
         */
        APP_NAME("spark.app.name", "");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Application(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * 网络核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Network {
        /**
         * 所有网络交互的超时时间 单位秒
         */
        NETWORK_TIMEOUT("spark.network.timeout", "300");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Network(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Yarn核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Yarn {
        /**
         * <li>在集群模式（cluster mode）下，yarn应用master等待SparkContext初始化的时间。
         * <li>在客户端模式（client mode）下，master等待driver连接到它的时间。
         */
        YARN_AM_WAIT_TIME("spark.yarn.am.waitTime", "180000"),//180s
        /**
         * yarn依赖jars
         */
        YARN_JARS("spark.yarn.jars", ""),
        /**
         * RM等待AM启动次数，也就是sc初始化次数。超过这个数值，启动失败
         */
        YARN_APPLICATION_MASTER_WAIT_TRIES("spark.yarn.applicationMaster.waitTries", "10"),
        /**
         * 当Spark Shuffle Service的初始化失败时是否停止NodeManager。
         * 这可以防止由于SparkShuffle服务未运行的NodeManager上运行容器而导致的应用程序故障。
         */
        YARN_SHUFFLE_STOP_ON_FAILURE("spark.yarn.shuffle.stopOnFailure", "false");


        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Yarn(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * 执行行为核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum ExecutionBehavior {
        /**
         * 广播每个block的大小。若值太大会降低广播的并行度，若值太小则可能出现BlockManager瓶颈
         */
        BROADCAST_BLOCKSIZE("spark.broadcast.blockSize", "4m"),
        /**
         * 代表着rdd的一个分区能存放数据的最大字节数
         */
        FILES_MAX_PARTITION_BYTES("spark.files.maxPartitionBytes", "134217728"), // 128 MB
        /**
         * executor与driver的心跳间隔  单位秒
         */
        EXECUTOR_HEARTBEATINTERVAL("spark.executor.heartbeatInterval", "30"),
        /**
         * 并发数
         */
        DEFAULT_PARALLELISM("spark.default.parallelism", "72");// 系统自己计算
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        ExecutionBehavior(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * SparkSql核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Sql {
        /**
         * 默认sql.autoBroadcastJoinThreshold spark sql joined rdd自动广播的阈值100m
         */
        SQL_AUTO_BROADCAST_JOIN_THRESHOLD("spark.sql.autoBroadcastJoinThreshold", "104857600"),
        /**
         * 广播等待超时时间，单位秒
         */
        SQL_BROADCAST_TIMEOUT("spark.sql.broadcastTimeout", "1200");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Sql(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Streaming核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Streaming {
        /**
         * 确保在kill任务时，能够处理完最后一批数据，再关闭程序，不会发生强制kill导致数据处理中断，没处理完的数据丢失,
         */
        STREAMING_STOP_GRACE_FULLY_ON_SHUTDOWN("spark.streaming.stopGracefullyOnShutdown", "true"),
        /**
         * 对BatchInterval的进一步细化切分。将一个BatchInterval的数据喜欢切分成更小的block，一个block对应一个Spark Partition。默认200ms
         */
        STREAMING_BLOCKINTERVAL("spark.streaming.blockInterval", "200"),
        /**
         * 是否启用预写日志功能 默认不启用
         */
        STREAMING_RECEIVER_WRITEAHEADLOG_ENABLE("spark.streaming.receiver.writeAheadLog.enable", "false"),
        /**
         * 是否启用反压机制 默认启用
         */
        STREAMING_BACKPRESSURE_ENABLED("spark.streaming.backpressure.enabled", "true"),
        /**
         * 启用反压机制时每个接收器接收第一批数据的初始最大速率（整数）,限制第一次批处理应该消费的数据，
         * 因为程序冷启动 队列里面有大量积压，防止第一次全部读取，造成系统阻塞
         */
        STREAMING_BACKPRESSURE_INITIALRATE("spark.streaming.backpressure.initialRate", "1000"),
        /**
         * 控制吞吐量: 限制每秒每个消费线程读取每个kafka分区最大的数据量 （整数）
         */
        STREAMING_KAFKA_MAX_RATE_PER_PARTITION("spark.streaming.kafka.maxRatePerPartition", "2000"),
        /**
         * 控制吞吐量: 限制每秒的最大吞吐，如果做限制1000，那么每秒最大吞吐就是1000条
         */
        STREAMING_RECEIVER_MAX_RATE("spark.streaming.receiver.maxRate", "2000");

        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Streaming(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * 内存管理核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum MemoryManagement {
        /**
         * 用于执行和存储的内存比例。值越小，计算内存越小，缓冲区数据被排除的可能越大。这个比例剩余的部分用于存储spark元数据、用户数据结构，最好使用默认值。
         */
        MEMORY_FRACTION("spark.memory.fraction", "0.75"),
        /**
         * 在存储和计算内存中，缓存所占的内存比例，值越大，计算可用内存越少
         */
        MEMORY_STORAGE_FRACTION("spark.memory.storageFraction", "0.5");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        MemoryManagement(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * 压缩与序列化核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum CompressionAndSerialization {
        /**
         * 序列化对象的类
         */
        SERIALIZER("spark.serializer", "org.apache.spark.serializer.KryoSerializer"),
        /**
         * kryo序列化的缓冲区大小
         */
        KRYOSERIALIZER_BUFFER_MAX("spark.kryoserializer.buffer.max", "64m"),
        /**
         * 初始化的序列化缓冲区
         */
        KRYOSERIALIZER_BUFFER("spark.kryoserializer.buffer", "64k"),
        /**
         * 是否序列化RDD分区，能通过耗费大量CPU降低存储空间
         */
        RDD_COMPRESS("spark.rdd.compress", "false"),
        /**
         * 广播变量是否被压缩
         */
        BROADCAST_COMPRESS("spark.broadcast.compress", "true");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        CompressionAndSerialization(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * 动态资源分配核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum DynamicAllocation {
        /**
         * 是否启动动态资源分配
         */
        DYNAMICALLOCATION_ENABLED("spark.dynamicAllocation.enabled", "true"),
        /**
         * 动态删除executor策略：若动态分配设为true且executor处于idle状态的时间已超时，则移除executor
         */
        DYNAMICALLOCATION_EXECUTOR_IDLE_TIMEOUT("spark.dynamicAllocation.executorIdleTimeout", "60s"),
        /**
         * 若动态分配为true，执行器的初始数量
         */
        DYNAMICALLOCATION_INITIAL_EXECUTORS("spark.dynamicAllocation.initialExecutors", "2"),
        /**
         * 执行器最大数量：默认为你申请的最大executor数
         */
        DYNAMICALLOCATION_MAX_EXECUTOR("spark.dynamicAllocation.maxExecutor", "64"),
        /**
         * 执行器最少数量：最少保留的executor数
         */
        DYNAMICALLOCATION_MIN_EXECUTOR("spark.dynamicAllocation.minExecutor", "0"),
        /**
         * 动态申请executor：如果有新任务处于等待状态，等待的超时时间 ，超过该值后开始启动executor
         */
        DYNAMICALLOCATION_SCHEDULER_BACKLOG_TIMEOUT("spark.dynamicAllocation.schedulerBacklogTimeout", "10s"),
        /**
         * 若executor缓存数据超时，且动态内存分配为true，则移除缓存
         */
        DYNAMICALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT("spark.dynamicAllocation.cachedExecutorIdleTimeout", ""),
        /**
         * 调度第二次及之后超时时间
         */
        DYNAMICALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT(
                "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout", "10s");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        DynamicAllocation(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Shuffle核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Shuffle {
        /**
         * shuffle服务开关
         */
        SHUFFLE_SERVICE_ENABLED("spark.shuffle.service.enabled", "true"),
        /**
         * 外部shuffle服务连接端口参数
         */
        SHUFFLE_SERVICE_PORT("spark.shuffle.service.port", "7337"),
        /**
         * shuffle数据的实现方法，包括sort和hash两种。sort内存利用率更改，从1.2版本后sort作为默认实现方法
         */
        SHUFFLE_MANAGER("spark.shuffle.manager", "sort"),
        /**
         * 每个shuffle文件输出流的内存缓存区大小。这些缓冲区减少了系统IO的调用次数
         */
        SHUFFLE_FILE_BUFFER("spark.shuffle.file.buffer", "32k");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Shuffle(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Driver核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Driver {
        /**
         * Configuration key for the driver memory.
         */
        DRIVER_MEMORY("spark.driver.memory", ""),
        /**
         * Configuration key for the driver class path.
         */
        DRIVER_EXTRA_CLASSPATH("spark.driver.extraClassPath", ""),
        /**
         * Configuration key for the driver VM options.
         */
        DRIVER_EXTRA_JAVA_OPTIONS("spark.driver.extraJavaOptions", ""),
        /**
         * Configuration key for the driver native library path.
         */
        DRIVER_EXTRA_LIBRARY_PATH("spark.driver.extraLibraryPath", ""),
        /**
         * 是否允许driver中创建多个SparkContext实例
         */
        DRIVER_ALLOW_MULTIPLE_CONTEXTS("spark.driver.allowMultipleContexts", "false");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Driver(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Executor核心key与默认值
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum Executor {
        /**
         * Configuration key for the executor memory
         */
        EXECUTOR_MEMORY("spark.executor.memory", ""),
        /**
         * Configuration key for the number of executor CPU cores.
         */
        EXECUTOR_CORES("spark.executor.cores", ""),
        /**
         * Configuration key for the executor class path.
         */
        EXECUTOR_EXTRA_CLASSPATH("spark.executor.extraClassPath", ""),
        /**
         * Configuration key for the executor VM options.
         */
        EXECUTOR_EXTRA_JAVA_OPTIONS("spark.executor.extraJavaOptions", ""),
        /**
         * executor与driver的心跳间隔
         */
        EXECUTOR_HEARTBEAT_INTERVAL("spark.executor.heartbeatInterval", ""),
        /**
         * Configuration key for the executor native library path.
         */
        EXECUTOR_EXTRA_LIBRARY_PATH("spark.executor.extraLibraryPath", "");
        /**
         * key
         */
        public final String key;
        /**
         * defaultValue
         */
        public final String defaultValue;

        /**
         * @param key          String
         * @param defaultValue String
         */
        Executor(String key, String defaultValue) {
            this.key = key;
            this.defaultValue = defaultValue;
        }
    }

    /**
     * Spark的工作模式
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum WorkingMode {
        /**
         * 本地单机模式
         */
        LOCAL("local", "local[*]", "本地单机模式"),
        /**
         * 集群yarn模式
         */
        YARN("yarn", "yarn", "集群yarn模式"),
        /**
         * 集群mesos模式
         */
        MESOS("mesos", "mesos", "集群mesos模式");
        /**
         * code
         */
        public final String code;
        /**
         * defaultValue
         */
        public final String defaultValue;
        /**
         * title
         */
        public final String title;

        /**
         * @param code
         * @param defaultValue
         * @param title
         */
        WorkingMode(String code, String defaultValue, String title) {
            this.code = code;
            this.defaultValue = defaultValue;
            this.title = title;
        }

        /**
         * parse
         *
         * @param code String
         * @return WorkingMode
         */
        public static WorkingMode parse(String code) {
            for (WorkingMode mode : WorkingMode.values()) {
                if (StringUtils.equals(mode.code, code)) {
                    return mode;
                }
            }
            return null;
        }
    }

    /**
     * 发布模式
     *
     * @author <a href="mailto:tansheng@zjport.gov.cn">tansheng</a>
     * @version $Id: SparkContext.java 1872 2019-09-30 09:04:06Z tansheng $
     * @since 1.0
     */
    public enum DeployMode {
        /**
         * 客户端模式
         */
        CLIENT("client", "客户端模式"),
        /**
         * 集群模式-yarn
         */
        CLUSTER("cluster", "集群模式");
        /**
         * code
         */
        public final String code;
        /**
         * title
         */
        public final String title;

        /**
         * @param code  String
         * @param title String
         */
        DeployMode(String code, String title) {
            this.code = code;
            this.title = title;
        }

        /**
         * parse
         *
         * @param code String
         * @return WorkingMode
         */
        public static DeployMode parse(String code) {
            for (DeployMode mode : DeployMode.values()) {
                if (StringUtils.equals(mode.code, code)) {
                    return mode;
                }
            }
            return DeployMode.CLUSTER;//默认返回集群模式
        }
    }


    /**
     * getSparkOnYarnUploadDir
     * @return
     */
    public static String getSparkOnYarnUploadDir() {
        String uploadAppLibDir = "";
        try {
            String yarnJars = loadSparkProperties().getProperty(SparkContext.Yarn.YARN_JARS.key);
            String substring = StringUtils.substring(yarnJars, StringUtils.indexOf(yarnJars, SymbolCnst.SLASH,
                    StringUtils.indexOf(yarnJars, SymbolCnst.DBL_SLASH) + 2));
            int index = StringUtils.lastIndexOf(substring, SymbolCnst.ASTERISK);
            uploadAppLibDir = substring;
            if (index > 0) {
                uploadAppLibDir = StringUtils.substring(substring, 0, index);
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return uploadAppLibDir;
    }

    /**
     * loadSparkProperties
     *
     * @param fileName
     * @return
     * @throws IOException
     */
    public static Properties loadSparkProperties(String fileName) throws IOException {
        if (sparkProps == null) {
            File propsFile = null;
            if (StringUtils.isNotBlank(fileName)) {
                propsFile = new File(fileName);
            } else {
                propsFile = new File(getSparkConfDir(), DEFAULT_PROPERTIES_FILE);
            }
            if (propsFile.isFile()) {
                try (InputStreamReader isr = new InputStreamReader(new FileInputStream(propsFile), StandardCharsets.UTF_8)) {
                    sparkProps=new Properties();
                    sparkProps.load(isr);
                    for (Map.Entry<Object, Object> e : sparkProps.entrySet()) {
                        e.setValue(e.getValue().toString().trim());
                    }
                }catch (Exception ex){
                    ex.printStackTrace();
                }
            }
        }
        return sparkProps;
    }

    /**
     * loadSparkProperties
     *
     * @return
     * @throws IOException
     */
    public static Properties loadSparkProperties() throws IOException {
        return loadSparkProperties(null);
    }

    /**
     * getSparkLibDir
     *
     * @return
     */
    public static String getSparkLibDir() {
        return StringHelper.join(File.separator, getSparkHome(), "jars");
    }
    /**
     * getSparkConfDir
     *
     * @return
     */
    public static String getSparkConfDir() {
        String confDir = System.getenv(SparkContext.ENV_SPARK_CONF_DIR);
        if (StringUtils.isBlank(confDir)) {
            confDir = StringHelper.join(File.separator, getSparkHome(), "conf");
        }
        return confDir;
    }

    /**
     * getSparkHome
     *
     * @return
     */
    public static String getSparkHome() {
        String sparkHome = System.getenv(SparkContext.ENV_SPARK_HOME);
        AssertHelper.hasText(sparkHome, "Set the SPARK_HOME environment variable!");
        return sparkHome;
    }
}

