package com.hub.realtime.flink.submit.base;

import cn.hutool.core.date.DateUtil;
import com.alibaba.fastjson.JSON;
import com.hub.realtime.common.core.domain.model.ClusterInfo;
import com.hub.realtime.common.exception.UtilException;
import com.hub.realtime.common.model.*;
import com.hub.realtime.common.utils.JobUtils;
import com.hub.realtime.common.utils.hadoop.HdfsUtil;
import com.hub.realtime.flink.submit.model.FlinkSubmitCommon;
import com.hub.realtime.flink.submit.model.YarnRunEnv;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.JobID;
import org.apache.flink.client.cli.ClientOptions;
import org.apache.flink.client.deployment.ClusterClientFactory;
import org.apache.flink.client.deployment.ClusterRetrieveException;
import org.apache.flink.client.deployment.DefaultClusterClientServiceLoader;
import org.apache.flink.client.deployment.application.ApplicationConfiguration;
import org.apache.flink.client.program.ClusterClient;
import org.apache.flink.configuration.*;
import org.apache.flink.runtime.util.HadoopUtils;
import org.apache.flink.util.FlinkException;
import org.apache.flink.yarn.Utils;
import org.apache.flink.yarn.YarnClientYarnClusterInformationRetriever;
import org.apache.flink.yarn.YarnClusterClientFactory;
import org.apache.flink.yarn.YarnClusterDescriptor;
import org.apache.flink.yarn.configuration.YarnConfigOptions;
import org.apache.flink.yarn.configuration.YarnDeploymentTarget;
import org.apache.flink.yarn.configuration.YarnLogConfigUtil;
import org.apache.flink.yarn.executors.YarnSessionClusterExecutor;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;

import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;

import static com.hub.realtime.common.constant.HdfsConstants.*;
import static org.apache.flink.configuration.HighAvailabilityOptions.HA_CLUSTER_ID;
import static org.apache.flink.yarn.configuration.YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE;

@Slf4j
public abstract class FlinkYarnSubmitBase extends FlinkSubmitBase {

    /**
     * 停止任务
     *
     * @param flinkStopRequest
     * @return
     * @throws FlinkException
     */
    @Override
    public String stop(FlinkStopRequest flinkStopRequest) {
        String stopSuccessSavepoint = "";
        Configuration flinkConfiguration = getDefaultFLinkConf(flinkStopRequest.getFlinkHome() + "/conf");
        flinkConfiguration.set(YarnConfigOptions.APPLICATION_ID, flinkStopRequest.getYid());
        YarnClusterClientFactory yarnClusterClientFactory = new YarnClusterClientFactory();
        ApplicationId applicationId = yarnClusterClientFactory.getClusterId(flinkConfiguration);
        if (applicationId == null)
            throw new UtilException("getClusterClient error. No cluster id was specified. Please specify a cluster to which you would like to connect！");
        JobID jobID = parseJobId(flinkStopRequest.getJid());
        YarnRunEnv yarnRunEnv = createyarnClusterDescriptor(flinkStopRequest.getClusterInfo(), flinkConfiguration, null); //yarnClusterClientFactory.createClusterDescriptor(flinkConfiguration);
        ClusterClient clusterClient = null;
        try {
            clusterClient = yarnRunEnv.getYarnClusterDescriptor().retrieve(applicationId).getClusterClient();
        } catch (ClusterRetrieveException e) {
            e.printStackTrace();
        }
        String savePointDir = "";
        try {
            if (flinkStopRequest.getIsSavePoint()) {
                if (StringUtils.isNotEmpty(flinkStopRequest.getSavePointPath())) {
                    savePointDir = HdfsUtil.getDefaultFS(flinkStopRequest.getClusterInfo()).concat(flinkStopRequest.getSavePointPath());
                } else {
                    savePointDir = HdfsUtil.getDefaultFS(flinkStopRequest.getClusterInfo()).concat(APP_SAVEPOINTS);//flinkConfiguration.get(ConfigOptions.key(CheckpointingOptions.SAVEPOINT_DIRECTORY.key())
                }
            }
            CompletableFuture savepointPathFuture = null;
            if (!flinkStopRequest.getIsSavePoint() && !flinkStopRequest.getIsDrain()) {
                savepointPathFuture = clusterClient.cancel(jobID);
            }

            if (flinkStopRequest.getIsSavePoint() && !flinkStopRequest.getIsDrain()) {
                savepointPathFuture = clusterClient.cancelWithSavepoint(jobID, savePointDir);
            }

            if (flinkStopRequest.getIsSavePoint() && flinkStopRequest.getIsDrain()) {
                savepointPathFuture = clusterClient.stopWithSavepoint(jobID, flinkStopRequest.getIsDrain(), savePointDir);
            }

            if (savepointPathFuture != null) {
                Duration duration = flinkConfiguration.get(ClientOptions.CLIENT_TIMEOUT);
                try {
                    log.info("====获取到的duration:" + duration.toMillis());
                    stopSuccessSavepoint = savepointPathFuture.get(duration.toMillis(), TimeUnit.MILLISECONDS).toString();
                } catch (Exception e) {
                    yarnRunEnv.getYarnClusterDescriptor().killCluster(applicationId);
                    log.error("正常取消失败,直接kill掉集群：" + e.getMessage());
                    e.printStackTrace();
                }
            }
        } catch (Exception ex) {
            try {
                yarnRunEnv.getYarnClusterDescriptor().killCluster(applicationId);
            } catch (Exception e) {
                log.error("kill集群失败：" + e.getMessage());
                e.printStackTrace();
                throw new UtilException(e);
            }
        }
        return stopSuccessSavepoint;
    }


    /**
     * 获取可用的配置
     *
     * @param flinkSubmitCommon
     * @param flinkRunRequest
     * @return
     */
    protected Configuration getEffectiveConfiguration(FlinkSubmitCommon flinkSubmitCommon, FlinkRunRequest flinkRunRequest) {
        Configuration effectiveConfiguration = new Configuration(flinkSubmitCommon.getCliFrontend().getConfiguration());
        //设置yarn的配置
        Configuration yarnFlinkConf = new Configuration();
        applyDescriptorOptionToConfig(flinkRunRequest, yarnFlinkConf);
        /**
         * 设置yarn相关的通用配置
         */
        if (flinkRunRequest.getYarnConf() != null) {
            YarnConf yarnConf = flinkRunRequest.getYarnConf();
            if (StringUtils.isNotEmpty(yarnConf.getYarnapplicationId())) {
                ApplicationId applicationId = ConverterUtils.toApplicationId(yarnConf.getYarnapplicationId());
                if (applicationId != null) {
                    if (StringUtils.isNotEmpty(yarnConf.getZookeeperNamespace())) {
                        effectiveConfiguration.setString(HA_CLUSTER_ID, yarnConf.getZookeeperNamespace());
                    }
                    effectiveConfiguration.setString(
                            YarnConfigOptions.APPLICATION_ID, ConverterUtils.toString(applicationId));
                    effectiveConfiguration.setString(
                            DeploymentOptions.TARGET, YarnSessionClusterExecutor.NAME);
                } else {
                    effectiveConfiguration.setString(DeploymentOptions.TARGET, flinkRunRequest.getExecuteMode());
                }
            } else {
                effectiveConfiguration.setString(DeploymentOptions.TARGET, flinkRunRequest.getExecuteMode());
            }

            if (StringUtils.isNotEmpty(yarnConf.getJmMemory())) {
                String jmMemoryVal = yarnConf.getJmMemory();
                if (!MemorySize.MemoryUnit.hasUnit(jmMemoryVal)) {
                    jmMemoryVal += "m";
                }
                effectiveConfiguration.set(
                        JobManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.parse(jmMemoryVal));
            }

            if (StringUtils.isNotEmpty(yarnConf.getTmMemory())) {
                String tmMemoryVal = yarnConf.getTmMemory();
                if (!MemorySize.MemoryUnit.hasUnit(tmMemoryVal)) {
                    tmMemoryVal += "m";
                }
                effectiveConfiguration.set(
                        TaskManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.parse(tmMemoryVal));
            }

            if (yarnConf.getYarnslots() != null) {
                effectiveConfiguration.setInteger(
                        TaskManagerOptions.NUM_TASK_SLOTS,
                        yarnConf.getYarnslots());
            }

            //设置动态参数
            if (yarnConf.getOptions() != null && yarnConf.getOptions().size() > 0) {
                for (Map.Entry<String, String> op : yarnConf.getOptions().entrySet()) {
                    if (StringUtils.isNotEmpty(op.getValue())) {
                        effectiveConfiguration.setString(op.getKey(), op.getValue());
                    }
                }
            }

        } else {
            effectiveConfiguration.setString(DeploymentOptions.TARGET, flinkRunRequest.getExecuteMode());
        }
        effectiveConfiguration.addAll(yarnFlinkConf);
        //flink通用配置
        Configuration configuration = applyToConfiguration(flinkRunRequest, flinkSubmitCommon.getJobJars());
        effectiveConfiguration.addAll(configuration);
        List<String> providedLibs = new ArrayList<>();
        //把flink的lib加进去不然会报错
        //apps/realtime/flink/flinkserver/flink/1.13.2/lib
        providedLibs.add(flinkRunRequest.getWorkSpacseEnv().getFlinkLib());
        configuration.set(APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME);
        if (flinkRunRequest.getExecuteMode().equals(YarnDeploymentTarget.APPLICATION.getName())) {
            effectiveConfiguration.set(PipelineOptions.JARS, Collections.singletonList(flinkRunRequest.getJarFile()));
            ConfigOption<Integer> maxRetainedCheckpoints = CheckpointingOptions.MAX_RETAINED_CHECKPOINTS;
            effectiveConfiguration.set(maxRetainedCheckpoints, flinkSubmitCommon.getDefaultConfig().get(maxRetainedCheckpoints));

            try {
                UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
                log.debug("UserGroupInformation currentUser: " + currentUser);
                if (HadoopUtils.isKerberosSecurityEnabled(currentUser)) {
                    log.debug("kerberos Security is Enabled...");
                    Boolean useTicketCache = getDefaultFLinkConf(flinkRunRequest.getFlinkHome().concat("/conf")).get(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE);
                    if (!HadoopUtils.areKerberosCredentialsValid(currentUser, useTicketCache)) {
                        throw new RuntimeException("Hadoop security with Kerberos is enabled but the login user " + currentUser + "  does not have Kerberos credentials or delegation tokens!");
                    }
                }
            } catch (Exception ee) {
                log.error("获取hadoop用户出错：" + ee.getMessage());
                ee.printStackTrace();
                throw new UtilException("获取hadoop用户出错：" + ee.getMessage());
            }

            //设置依赖包
            // providedLibs.add(flinkRunRequest.getWorkSpacseEnv().getAppJars());
            //providedLibs.add(flinkRunRequest.getWorkSpacseEnv().getAppPlugins());
//            String[] split = flinkRunRequest.getFlinkVersion().split("\\.");
//            if (split.length != 3) throw new UtilException("不支持的flink版本");

            //添加shims和connector依赖
            String flinkMainVersion = flinkRunRequest.getFlinkVersion().substring(0, 4);

            if (!flinkRunRequest.getJobType().equals("flink")) {
                providedLibs.add(HdfsUtil.getDefaultFS(flinkRunRequest.getClusterInfo()).concat(APP_SHIMS).concat("/flink").concat(flinkMainVersion));
                providedLibs.add(HdfsUtil.getDefaultFS(flinkRunRequest.getClusterInfo()).concat(APP_CONNECTORS).concat("/flink").concat(flinkMainVersion));
            }


            //添加第三方依赖
            String hdfsJobLib = HdfsUtil.getDefaultFS(flinkRunRequest.getClusterInfo()).concat(UPLOAD_DEP_PATH)
                    .concat("/").concat(flinkRunRequest.getAppUid())
                    .concat("/")
                    .concat(flinkRunRequest.getJobUid());

            if (HdfsUtil.exists(flinkRunRequest.getClusterInfo(), hdfsJobLib)) {
                providedLibs.add(hdfsJobLib);
            }

        } else {
            providedLibs.add(flinkRunRequest.getWorkSpacseEnv().getAppJars());
        }
        if (providedLibs.size() > 0) {
            effectiveConfiguration.set(YarnConfigOptions.PROVIDED_LIB_DIRS, providedLibs);
        }
        //设置入口类
        effectiveConfiguration.set(ApplicationConfiguration.APPLICATION_MAIN_CLASS, flinkRunRequest.getMainClass());


        //UPLOAD_HIVE_CONF_PATH

        //设置运行参数
        List<String> args = new ArrayList<>();
        if (!flinkRunRequest.getJobType().equals("flink")) {
            if (StringUtils.isEmpty(flinkRunRequest.getSql())) throw new IllegalArgumentException("执行的sql为空！");
            args.add("--sql");
            args.add(flinkRunRequest.getSql());

            args.add("--jobName");
            args.add(flinkRunRequest.getAppName());

            args.add("--exeModel");
            args.add(flinkRunRequest.getTableModel());

            /**
             * 设置catlog
             */
            if(flinkRunRequest.getFlinkCatalog()!=null)
            {
                String hdfsHiveConf=UPLOAD_HIVE_CONF_PATH.concat("/")
                        .concat(flinkRunRequest.getAppUid())
                        .concat("/")
                        .concat(flinkRunRequest.getJobUid());

                if(HdfsUtil.exists(flinkRunRequest.getClusterInfo(),hdfsHiveConf))
                {
                    HdfsUtil.delete(flinkRunRequest.getClusterInfo(),hdfsHiveConf);
                }
                HdfsUtil.mkdirs(flinkRunRequest.getClusterInfo(),hdfsHiveConf);
                HdfsUtil.upload(flinkRunRequest.getClusterInfo(),flinkRunRequest.getFlinkCatalog().getHiveConfDir()
                        .concat("/hive-site.xml"),hdfsHiveConf,false,true);
                flinkRunRequest.getFlinkCatalog().setHiveConfDir(HdfsUtil.getDefaultFS(flinkRunRequest.getClusterInfo())
                        .concat(hdfsHiveConf));
                args.add("--catalog");
                args.add(JSON.toJSONString(flinkRunRequest.getFlinkCatalog()));
            }


//            if (StringUtils.isNotEmpty(flinkRunRequest.getCatalogName())) {
//                args.add("--catalogName");
//                args.add(flinkRunRequest.getCatalogName());
//            }
//            if (StringUtils.isNotEmpty(flinkRunRequest.getDatabaseName())) {
//                args.add("--databaseName");
//                args.add(flinkRunRequest.getDatabaseName());
//            }


        }
        if (flinkRunRequest.getArgs() != null && flinkRunRequest.getArgs().size() > 0) {
            for (Map.Entry<String, String> map : flinkRunRequest.getArgs().entrySet()) {
                args.add("--" + map.getKey());
                args.add(map.getValue());
            }
        }
        if (!args.isEmpty()) {
            effectiveConfiguration.set(ApplicationConfiguration.APPLICATION_ARGS, args);
        }

        return effectiveConfiguration;
    }

    private void applyDescriptorOptionToConfig(FlinkRunRequest flinkRunRequest, final Configuration configuration) {
        YarnConf yarnConf = flinkRunRequest.getYarnConf();
        //设置flinkdistJar
        if (flinkRunRequest.getExecuteMode().equals(YarnDeploymentTarget.APPLICATION)) {
            configuration.setString(YarnConfigOptions.FLINK_DIST_JAR, (new Path(flinkRunRequest.getWorkSpacseEnv().getFlinkDistJar())).toString());
        } else {
            //在clusterdescripter中设置
        }

        //yarn配置
        if (yarnConf != null) {
            if (StringUtils.isNotEmpty(yarnConf.getYarnqueue())) {
                configuration.setString(YarnConfigOptions.APPLICATION_QUEUE, yarnConf.getYarnqueue());
            }
            if (StringUtils.isNotEmpty(yarnConf.getYarnName())) {
                configuration.setString(YarnConfigOptions.APPLICATION_NAME, yarnConf.getYarnName());
            }
            if (StringUtils.isNotEmpty(yarnConf.getYarnapplicationType())) {
                configuration.setString(YarnConfigOptions.APPLICATION_TYPE, yarnConf.getYarnapplicationType());
            }
            if (StringUtils.isNotEmpty(yarnConf.getZookeeperNamespace())) {
                configuration.setString(HA_CLUSTER_ID, yarnConf.getZookeeperNamespace());
            } else if (StringUtils.isNotEmpty(yarnConf.getYarnzookeeperNamespace())) {
                configuration.setString(HA_CLUSTER_ID, yarnConf.getYarnzookeeperNamespace());
            }
            if (StringUtils.isNotEmpty(yarnConf.getNodeLable())) {
                configuration.setString(YarnConfigOptions.NODE_LABEL, yarnConf.getNodeLable());
            }
        }
        configuration.setBoolean(DeploymentOptions.ATTACHED, false);
        configuration.set(DeploymentOptionsInternal.CONF_DIR, flinkRunRequest.getFlinkHome().concat("/conf"));
    }

    /**
     * 获取yarn的运行环境
     *
     * @param flinkConf
     * @param flinkRunRequest
     * @return
     */
    protected YarnRunEnv createYarnClusterDescriptor(Configuration flinkConf, FlinkRunRequest flinkRunRequest) {
        YarnRunEnv yarnRunEnv = new YarnRunEnv();
        yarnRunEnv.setYarnConfDir(flinkRunRequest.getClusterInfo().getHadoopConfDir());
        createyarnClusterDescriptor(flinkRunRequest.getClusterInfo(), flinkConf, yarnRunEnv);
        //添加shipfiles
        List<String> shipFiles = new ArrayList<>();

        if (!flinkRunRequest.getExecuteMode().equals(YarnDeploymentTarget.APPLICATION)) {
            shipFiles.add(flinkRunRequest.getFlinkHome().concat("/plugins"));
        }
        if (flinkRunRequest.getYarnConf() != null) {
            YarnConf yarnConf = flinkRunRequest.getYarnConf();
            if (yarnConf.getShipFiles() != null && yarnConf.getShipFiles().size() > 0) {
                flinkRunRequest.getYarnConf().getShipFiles().forEach(ship -> {
                    shipFiles.add(ship);
                });
            }
        }
        if (shipFiles.size() > 0) {
            List<File> files = new ArrayList<>();
            shipFiles.forEach(f -> {
                files.add(new File(f));
            });

            //上传日志文件
            String localLogFilePath = flinkRunRequest.getFlinkHome().concat("/conf/logback.xml");
            try {
                File localLogFile = new File(localLogFilePath);
                if (localLogFile.exists()) {
                    localLogFile.delete();
                }
                FileOutputStream fos = new FileOutputStream(localLogFilePath);
                ByteArrayInputStream inputStream = new ByteArrayInputStream(flinkRunRequest.getLogText().getBytes());
                int buf_size = 1024;
                byte[] buffer = new byte[buf_size];
                int len = 0;
                while (-1 != (len = inputStream.read(buffer, 0, buf_size))) {
                    fos.write(buffer, 0, len);
                }
                inputStream.close();
                fos.close();

                flinkConf.set(APPLICATION_LOG_CONFIG_FILE, localLogFilePath);
                //  files.add(localLogFile);
            } catch (IOException e) {
                log.error("创建日志文件出错：" + e.getMessage());
                e.printStackTrace();
                throw new UtilException("创建日志文件出错：" + e.getMessage());
            }
            yarnRunEnv.getYarnClusterDescriptor().addShipFiles(files);
        }
        if (!flinkRunRequest.getExecuteMode().equals(YarnDeploymentTarget.APPLICATION.getName())) {
            yarnRunEnv.getYarnClusterDescriptor().setLocalJarPath(new Path(flinkRunRequest.getWorkSpacseEnv().getFlinkDistJar()));
        }
        return yarnRunEnv;
    }


    public YarnRunEnv createyarnClusterDescriptor(ClusterInfo clusterInfo, Configuration flinkConf, YarnRunEnv yarnRunEnv) {
        if (yarnRunEnv == null) yarnRunEnv = new YarnRunEnv();
        yarnRunEnv.setYarnConfDir(clusterInfo.getHadoopConfDir());
        DefaultClusterClientServiceLoader clusterClientServiceLoader = new DefaultClusterClientServiceLoader();
        ClusterClientFactory<ApplicationId> clusterClientFactory = clusterClientServiceLoader.getClusterClientFactory(flinkConf);
        yarnRunEnv.setClusterClientFactory(clusterClientFactory);
        YarnClient yarnClient = YarnClient.createYarnClient();
        YarnConfiguration yarnConfiguration =
                Utils.getYarnAndHadoopConfiguration(flinkConf);
        if (StringUtils.isEmpty(yarnRunEnv.getYarnConfDir()))
            throw new IllegalArgumentException("请配置yarn：YARN_CONF_DIR!");
        yarnConfiguration.addResource(new Path(yarnRunEnv.getYarnConfDir().concat("/yarn-site.xml")));
        yarnClient.init(yarnConfiguration);
        yarnClient.start();
        YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(
                flinkConf,
                yarnConfiguration,
                yarnClient,
                YarnClientYarnClusterInformationRetriever.create(yarnClient),
                false);
        yarnRunEnv.setYarnClusterDescriptor(clusterDescriptor);
        return yarnRunEnv;
    }


    /**
     * 构建返回结果
     *
     * @param clusterClient
     * @param clusterDescriptor
     * @return
     */
    protected FlinkSubmitResponse makeRunResponse(ClusterInfo clusterInfo, ClusterClient<ApplicationId> clusterClient, YarnClusterDescriptor clusterDescriptor) {
        try {
            FlinkSubmitResponse flinkSubmitResponse = new FlinkSubmitResponse();
            ApplicationId clusterId = clusterClient.getClusterId();
            if (clusterId != null) {
                FlinkRestModel flinkRestModel = JobUtils.getJobInfo(clusterInfo, clusterId);
                if (flinkRestModel.getJobs() != null && !flinkRestModel.getJobs().isEmpty()) {
                    JobInfo jobInfo = flinkRestModel.getJobs().get(0);
                    flinkSubmitResponse.setState(jobInfo.getState());
                    flinkSubmitResponse.setJobId(jobInfo.getJid());
                }
                flinkSubmitResponse.setAppId(clusterId.toString());
                log.info("===============任务提交完成，YID: " + flinkSubmitResponse.getAppId()
                        + " ,JobId: " + flinkSubmitResponse.getJobId() + " " + DateUtil.date());
            }
            return flinkSubmitResponse;
        } catch (Exception ex) {
            log.error("提交失败：" + ex.getMessage());
            ex.printStackTrace();
            throw new UtilException("提交失败：" + ex.getMessage());
        }
    }


}

