/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


package com.sui.bigdata.flink.sql.launcher;

import avro.shaded.com.google.common.collect.Lists;
import com.alibaba.fastjson.JSONObject;
import com.sui.bigdata.flink.sql.core.Main;
import com.sui.bigdata.flink.sql.core.util.ConfigConstrant;
import com.sui.bigdata.flink.sql.core.util.ScanUdfUtils;
import com.sui.bigdata.flink.sql.core.util.SendMsgUtils;
import com.sui.bigdata.flink.sql.core.util.PluginUtil;
import org.apache.commons.io.Charsets;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.JobSubmissionResult;
import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.client.cli.CliArgsException;
import org.apache.flink.client.program.ClusterClient;
import org.apache.flink.client.program.PackagedProgram;
import org.apache.flink.configuration.ConfigConstants;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.GlobalConfiguration;
import org.apache.hadoop.fs.*;
import org.apache.flink.runtime.jobgraph.JobStatus;
import org.apache.flink.yarn.AbstractYarnClusterDescriptor;
import org.apache.flink.runtime.jobgraph.SavepointRestoreSettings;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import java.io.*;
import java.net.URLDecoder;
import java.util.*;

/**
 * Date: 2019/7/16
 *
 * @author chenyong
 */

public class LauncherMain {
    protected static final Logger logger = LoggerFactory.getLogger(LauncherMain.class);

    private static final String FLINK_CONF = "/conf";

    private static final String CHK = "chk";



    public static Map<String, String> submitTask(String sql,
                                                 String name,
                                                 String flinkPath,
                                                 String udfJarPath,
                                                 String coreJarPath,
                                                 String yarnconf,
                                                 String confProp,
                                                 String metaTable,
                                                 String savePointPath) throws Exception {
        sql = sql + ScanUdfUtils.getUdfs(udfJarPath);
        LauncherOptionParser optionParser = new LauncherOptionParser(sql, name, flinkPath, udfJarPath, yarnconf, confProp, metaTable);
        Map<String, String> resultId = new HashMap();

        //拿到除了程序不需要的参数以外的所有参数
        List<String> argList = optionParser.getProgramExeArgList();
        String[] localArgs = argList.toArray(new String[argList.size()]);

        //预先校验
        try {
            Main.perRun(localArgs);
        } catch (Exception e) {
            logger.error("submit fail： ",e);
            resultId.put("errorMsg", e.getMessage());
            return resultId;
        }

       //指定本jar包路径
        File jarFile = new File(coreJarPath);
        PackagedProgram program = new PackagedProgram(jarFile, Lists.newArrayList(), localArgs);

        if (StringUtils.isNotBlank(savePointPath)) {
            logger.info(" submit job by savePoint :{} ",savePointPath);
            program.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savePointPath, true));
        }

        ClusterClient clusterClient = ClusterClientFactory.createYarnClient(name, flinkPath, yarnconf, confProp, udfJarPath);
        JobSubmissionResult jobSubmissionResult = null;
        try {
            jobSubmissionResult = clusterClient.run(program, getJobPar(confProp));
            resultId.put("appId", String.valueOf(clusterClient.getClusterId()));
            resultId.put("webUrl", clusterClient.getWebInterfaceURL());
            resultId.put("jobId", jobSubmissionResult.getJobID().toString());
        } catch (Exception e) {
            ClusterClientFactory.killApp(yarnconf, String.valueOf(clusterClient.getClusterId()));
            resultId.put("errorMsg", e.getCause().getMessage());
            logger.error("submit fail： ",e);
            return resultId;
        }

        clusterClient.shutdown();
        return resultId;

    }



    public static void stop(String jobId,
                            String appId,
                            String flinkPath,
                            String yarnconf
    ) throws Exception {
        String flinkconf = flinkPath + FLINK_CONF;

        ClusterClient clusterClient = ClusterClientFactory.createYarnClient(flinkconf, yarnconf, appId);
        JobID jobID = parseJobId(jobId);
        clusterClient.cancel(jobID);
        clusterClient.shutdown();
        ClusterClientFactory.killApp(yarnconf, appId);
        logger.info("---cancel--sucesss--");

    }


    public static String savepoint(String jobId,
                                   String appId,
                                   String flinkPath,
                                   String yarnconf
    ) throws Exception {
        String flinkconf = flinkPath + FLINK_CONF;
        String savepointDirectory = GlobalConfiguration.loadConfiguration(flinkconf).getString("state.savepoints.dir", "");
        ClusterClient clusterClient = ClusterClientFactory.createYarnClient(flinkconf, yarnconf, appId);
        JobID jobID = parseJobId(jobId);

        Object savepointCompletePath = clusterClient.triggerSavepoint(jobID, savepointDirectory).get();
        clusterClient.shutdown();
        return savepointCompletePath.toString();

    }

    public static Properties getProperties(String confProp) throws Exception {
        confProp = URLDecoder.decode(confProp, Charsets.UTF_8.toString());
        return  PluginUtil.jsonStrToObject(confProp, Properties.class);
    }
    public static String getSavepointPath (String flinkPath,String confProp,String name,String yarnConfPath,String jobId) throws Exception{
        String flinkconf = flinkPath + FLINK_CONF;
        YarnConfiguration yarnConf = ClusterClientFactory.loadYarnConfiguration(yarnConfPath);
        Properties confProperties = getProperties(confProp);
        if (!StringUtils.isNotBlank(confProperties.getProperty(ConfigConstrant.FLINK_CHECKPOINT_INTERVAL_KEY))) {
            return null;
        }

        //获取用户参数中传入的savepoint路径，返回
        if (StringUtils.isNotBlank(confProperties.getProperty(ConfigConstrant.FLINK_RESTART_SAVEPOINT_PATH))){
            return confProperties.getProperty(ConfigConstrant.FLINK_RESTART_SAVEPOINT_PATH);
        }

        //获取checkpoint根路径

        String savePointRootDir = "";
        if (StringUtils.isNotBlank(confProperties.getProperty(ConfigConstrant.CHECKPOINTS_DIRECTORY_KEY))){
             savePointRootDir = confProperties.getProperty(ConfigConstrant.CHECKPOINTS_DIRECTORY_KEY)+"/"+jobId;
        }else {
            savePointRootDir = GlobalConfiguration.loadConfiguration(flinkconf).getString(ConfigConstrant.CHECKPOINTS_DIRECTORY_KEY, "")+"/"+name+"/"+jobId;
        }

        logger.info(" savePointRootDir: "+savePointRootDir);
        Path path = new Path(savePointRootDir);
        FileSystem hdfs = FileSystem.get(yarnConf);
        if (!hdfs.exists(path)){
            return null;
        }

        FileStatus[] files = hdfs.listStatus(path);

        String latestCheckPoint = "";
        for (int i = 0; i < files.length; i++) {
            String fileName = files[i].getPath().getName();
            logger.info(" fileName: "+fileName);
            if (fileName.contains("chk") && fileName.compareTo(latestCheckPoint)>0){
                latestCheckPoint = fileName;
            }
        }

        String absoluteCheckPointPath = savePointRootDir+"/"+latestCheckPoint;

        if (StringUtils.isNotBlank(latestCheckPoint) &&  hdfs.exists(new Path(absoluteCheckPointPath+"/_metadata"))){
            return absoluteCheckPointPath;
        }
        return null;
    }

    public static Map<String, String> restart(String jobId,
                                              String appId,
                                              String sql,
                                              String name,
                                              String flinkPath,
                                              String udfJarPath,
                                              String coreJarPath,
                                              String yarnconf,
                                              String confProp,
                                              String metaTable,
                                              Boolean savepoint,
                                              String status) throws Exception {
        String savepointPath = "";
        logger.info(" {} restart.....",name);
        if (savepoint != null && savepoint) {
            savepointPath = savepoint(jobId, appId, flinkPath, yarnconf);
            logger.info(" {} trigger savepoint success ,the path is : {}",name,savepointPath);
        }else {
            savepointPath = getSavepointPath(flinkPath,confProp,name,yarnconf,jobId);
            logger.info(" {} get savepoint success ,the path is : {}",name,savepointPath);
        }


        Map<String, String> resutlId = submitTask(sql, name, flinkPath, udfJarPath, coreJarPath, yarnconf, confProp, metaTable, savepointPath);
        if ( null == resutlId.get("errorMsg") && canStop(status)){
            try {
                stop(jobId,appId,flinkPath,yarnconf);
            }catch (Exception e){
                logger.warn("kill name failed ,maybe has been killed");
            }
        }
        return resutlId;
    }


    public static JobID parseJobId(String jobIdString) throws CliArgsException {
        if (jobIdString == null) {
            throw new CliArgsException("Missing JobId");
        }

        final JobID jobId;
        try {
            jobId = JobID.fromHexString(jobIdString);
        } catch (IllegalArgumentException e) {
            throw new CliArgsException(e.getMessage());
        }
        return jobId;
    }

    private static Boolean canStop(String status){
        if ("RUNNING".equals(status)){ return Boolean.TRUE; }
        if ("ACCEPT".equals(status)){ return Boolean.TRUE; }
        if ("RESTARTING".equals(status)){ return Boolean.TRUE; }
        return Boolean.FALSE;
    }

    private static String getStackTrace(Exception e){
        String detailMsg = "";
        StackTraceElement[] trace = e.getStackTrace();
        for (StackTraceElement s : trace) {
            detailMsg += "\tat " + s + "\r\n";
        }
        return detailMsg;
    }



    private static Integer getJobPar(String confProp) throws Exception {
        confProp = URLDecoder.decode(confProp, Charsets.UTF_8.toString());

        Properties confProperties = PluginUtil.jsonStrToObject(confProp, Properties.class);
        return confProperties.getProperty("parallelism").equals("0") ? 1 : Integer.parseInt(confProperties.getProperty("parallelism"));
    }

    public static void main(String[] args) throws Exception {
        //test
//        String sql = "/data/flink/flinkstreamsql/risk_control_p1.txt";
//        String flinkPath = "/data/flink/flink-1.9.0";
//        String udfJarPath = "/data/flink/flinkstreamsql/flink-stream-sql-function-jar-with-dependencies.jar";
//        String coreJarPath = "/data/flink/flinkstreamsql/flink-stream-sql-jar-with-dependencies.jar";

        //dev
        String sql = "/home/testhadoop/flinkstreamsql/nodep/risk_control_p1.txt";
        String flinkPath = "/home/testhadoop/flink-1.9.0";
        String udfJarPath = "/home/testhadoop/flinkstreamsql/platform/udf/flink-stream-sql-function-jar-with-dependencies.jar";
        String coreJarPath = "/home/testhadoop/flinkstreamsql/platform/flink-stream-sql-jar-with-dependencies.jar";


        String name = "risk_control_p1_test";
        String yarnconf = "/etc/hadoop/conf";
        String confProp = "{\"parallelism\": 2,\"time.characteristic\": \"EventTime\",\"queue\":\"root.chenyong\"}";
        String metaTable = "1";

        /* 启动任务 */
        Map<String, String> resutlId = submitTask(sql, name, flinkPath, udfJarPath, coreJarPath, yarnconf, confProp, metaTable, "");
        String jobId = resutlId.get("jobId");
        String appId = resutlId.get("appId");
        String webUrl = resutlId.get("webUrl");
        System.out.println(" jobid:  " + jobId + ",appid  " + appId + " webUrl :" + webUrl);
        Thread.sleep(30000);

        /* savepoint */
        String savepointPath = savepoint(jobId, appId, flinkPath, yarnconf);
        System.out.println(" savepointpath : " + savepointPath);
        Thread.sleep(10000);

        /* 从savapoint中启动*/
        Map<String, String> resutlId1 = submitTask(sql, name, flinkPath, udfJarPath, coreJarPath, yarnconf, confProp, metaTable, savepointPath);
        String jobId1 = resutlId1.get("jobId");
        String appId1 = resutlId1.get("appId");
        String webUrl1 = resutlId.get("webUrl");
        System.out.println(" jobid:  " + jobId1 + ",appid  " + appId1 + " webUrl :" + webUrl1);
        Thread.sleep(30000);


        /* 重启 */
        Map<String, String> resutlId2 = restart(jobId1, appId1, sql, name, flinkPath, udfJarPath, coreJarPath, yarnconf, confProp, metaTable, true,"RUNNING");
        String jobId2 = resutlId2.get("jobId");
        String appId2 = resutlId2.get("appId");
        String webUrl2 = resutlId.get("webUrl");

        System.out.println(" jobid:  " + jobId2 + ",appid  " + appId2 + " webUrl :" + webUrl2);
        Thread.sleep(30000);

        /* 停止 */
        stop(jobId2, appId2, flinkPath, yarnconf);
        System.out.println(" stop sucess ");

    }

    public static String getExceptionAllinformation(Exception ex) {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        PrintStream pout = new PrintStream(out);
        ex.printStackTrace(pout);
        String ret = new String(out.toByteArray());
        pout.close();
        try {
            out.close();
        } catch (Exception e) {
        }
        return ret;
    }

}
