package com.apex.core;

import com.alibaba.fastjson.JSONObject;
import com.apex.env.Execution;
import com.apex.spark.SparkBaseSink;
import com.apex.spark.SparkBaseSource;
import com.apex.spark.SparkBaseTransform;
import com.apex.spark.SparkEnvironment;
import com.apex.spark.batch.SparkBatchExecution;
import com.apex.spark.stream.SparkStreamingExecution;
import com.apex.spark.structuredstream.SparkStructuredStreamingExecution;
import com.apex.spark.utils.ConfigKeyName;
import com.apex.utils.CliUtils;
import com.apex.utils.PluginType;
import com.apex.utils.RegisteredPlugin;
import com.apex.utils.TableSchema;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
import java.util.Objects;

/**
 * mvn clean package -Dmaven.test.skip=true
 */
public class RunSparkEngine {
    private static final Logger logger = LoggerFactory.getLogger(RunSparkEngine.class);
    //注册临时表名称
    private static String tableName = "";
    public static void main(String[] args) {
        if (args.length < 1){
            logger.error("*.json not find please check your config file then retry");
            System.exit(0);
        }
        String configFile;
        try {
            //通过命令行读取配置文件位置
            configFile = CliUtils.parseCli(args);
            //初始化基础环境
            SparkEnvironment environment = new SparkEnvironment();
            //配置文件解析

            Config config = ConfigFactory.parseFile(new File(configFile));

            //插件查找
            String environmentEngine = config.getString(ConfigKeyName.JOB_ENGINE);
            String sourceEngine = config.getString(ConfigKeyName.JOB_SOURCE_PLUGIN_NAME);
            String transformEngine = config.getString(ConfigKeyName.JOB_TRANSFORM_PLUGIN_NAME);
            String sinkEngine = config.getString(ConfigKeyName.JOB_SINK_PLUGIN_NAME);

            //是否存在json schema
            if (config.hasPath(ConfigKeyName.SPARK_TABLE_SCHEMA)){
                Map<String, JSONObject> schemaMap = TableSchema.parseTableSchema(configFile);

                if (!Objects.isNull(schemaMap)){
                    schemaMap.keySet().forEach( e -> tableName = e);

                    environment.setStructType(schemaMap.get(tableName));

                    environment.setQueryTable(tableName);
                }
            }

            //查找是否存在用户自定义SQL脚本文件
            if (config.hasPath(ConfigKeyName.SPARK_RUN_SQLFile)){
                String sqlFilePath = config.getString(ConfigKeyName.SPARK_RUN_SQLFile);
                if (!StringUtils.isBlank(sqlFilePath)){
                    String workSpace = config.getString(ConfigKeyName.SPARK_RUN_WORKSPACE);
                    List<String> sqlAll = Files.readAllLines(Paths.get(workSpace + "/" + sqlFilePath));
                    environment.setSqlAll(sqlAll);
                }
                }
            //插件注册

            RegisteredPlugin registeredPlugin = new RegisteredPlugin();

            registeredPlugin.configEngine(environmentEngine);

            List<SparkBaseSource> sources =
                    registeredPlugin.createPlugins(PluginType.SOURCE, sourceEngine);
            List<SparkBaseTransform> transforms =
                    registeredPlugin.createPlugins(PluginType.TRANSFORM, transformEngine);
            List<SparkBaseSink> sinks = registeredPlugin.createPlugins(PluginType.SINK, sinkEngine);


           //配置基础执行环境
            environment.setConfig(config);
            environment.prepare(config.getBoolean(ConfigKeyName.SPARK_RUN_HIVE_MODE));

            Execution execution = null;
            switch (config.getString(ConfigKeyName.SPARK_RUNTIME_MODE).toLowerCase()){
                case "batch":
                  execution = new SparkBatchExecution(environment);
                  break;
                case "structuredstreaming":
                    execution = new SparkStructuredStreamingExecution(environment);
                  break;
                case "streaming":
                    execution = new SparkStreamingExecution(environment);
                    break;
                    default:
                        logger.error("no support any node \n for example : batch structuredstreaming streaming");
                        System.exit(0);
            }

            //执行插件参数设置
            RegisteredPlugin.prepare(environment, sources, transforms, sinks);
            //启动应用程序
            execution.start(sources, transforms, sinks);

        //异常
        }catch (Exception e){
            logger.error("error");
            e.printStackTrace();
        }

    }
}
