package com.flink.streaming.core;


import com.flink.streaming.common.constant.SystemConstant;
import com.flink.streaming.common.enums.JobTypeEnum;
import com.flink.streaming.common.model.SqlCommandCall;
import com.flink.streaming.common.sql.SqlFileParser;
import com.flink.streaming.core.checkpoint.CheckPointParams;
import com.flink.streaming.core.checkpoint.FsCheckPoint;
import com.flink.streaming.core.execute.ExecuteSql;
import com.flink.streaming.core.model.JobRunParam;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.StatementSet;
import org.apache.flink.table.api.TableEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;

/**
 * 1、平台提交任务，是使用的 Process pcs = Runtime.getRuntime().exec(command); 来执行
 *
 * 2、command 类似于，下面这个命令是 平台生成的 使用 yarn 提交 批任务
 *
 * /Users/gump/dreamware/flink-1.13.1/bin/flink run -yjm 1024m -ytm 1024m -p 1 -yqu default  -ynm flink@my_batch_job  -yd -m yarn-cluster  -c com.flink.streaming.core.JobApplication /Users/gump/study/source/github/flink-streaming-platform-web/lib/flink-streaming-core-1.3.0.RELEASE.jar -sql /Users/gump/study/source/github/flink-streaming-platform-web/sql/job_sql_3.sql  -type 2
 *
 * 3、大家可以去分析这个命令，其实就是要知道 Flink 的客户端在哪个目录 ，提交的jar 就是导入到lib下的 flink-streaming-core-1.3.0.RELEASE.jar
 *
 * 4、yarn 的url 貌似只是 日志那里有使用
 *
 * 5、com.flink.streaming.core.JobApplication 这个就是真正运行的JOB,在IDEA 里面也是可以执行的，配置一下参数就可以了，相信大家可以举一反三的
 *    我debug时用的参数参考： -sql /Users/gump/study/source/github/flink-streaming-platform-web/sql/job_sql_1.sql  -type 0
 *    这里有个细节：core pom中的Flink 依赖包是 provide 的，本地要debug job时可以 注释这个刷新Maven
 *
 * 6、我们在任务中配置的SQL，会生成在项目的 /sql 目录下 ，也就是上面命令的 -sql 后的路径 -type 是告诉任务是流任务还是 批任务
 */
public class JobApplication {

    private static final Logger log = LoggerFactory.getLogger(JobApplication.class);

    public static void main(String[] args) {

        try {
            Arrays.stream(args).forEach(arg -> log.info("{}", arg));

            JobRunParam jobRunParam = buildParam(args);

            List<String> sql = Files.readAllLines(Paths.get(jobRunParam.getSqlPath()));

            List<SqlCommandCall> sqlCommandCallList = SqlFileParser.fileToSql(sql);

            EnvironmentSettings settings = null;

            TableEnvironment tEnv = null;

            if (jobRunParam.getJobTypeEnum() != null && JobTypeEnum.SQL_BATCH.equals(jobRunParam.getJobTypeEnum())) {
                log.info("[SQL_BATCH]本次任务是批任务");
                //批处理
                settings = EnvironmentSettings.newInstance()
                        .useBlinkPlanner()
                        .inBatchMode()
                        .build();
                tEnv = TableEnvironment.create(settings);
            } else {
                log.info("[SQL_STREAMING]本次任务是流任务");
                //默认是流 流处理 目的是兼容之前版本
                StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

                settings = EnvironmentSettings.newInstance()
                        .useBlinkPlanner()
                        .inStreamingMode()
                        .build();
                tEnv = StreamTableEnvironment.create(env, settings);
                //设置checkPoint
                FsCheckPoint.setCheckpoint(env, jobRunParam.getCheckPointParam());

            }

            StatementSet statementSet = tEnv.createStatementSet();

            ExecuteSql.exeSql(sqlCommandCallList, tEnv, statementSet);

            TableResult tableResult = statementSet.execute();

            if (tableResult == null || tableResult.getJobClient().get() == null
                    || tableResult.getJobClient().get().getJobID() == null) {
                throw new RuntimeException("任务运行失败 没有获取到JobID");
            }
            JobID jobID = tableResult.getJobClient().get().getJobID();

            System.out.println(SystemConstant.QUERY_JOBID_KEY_WORD + jobID);

            log.info(SystemConstant.QUERY_JOBID_KEY_WORD + "{}", jobID);

        } catch (Exception e) {
            System.err.println("任务执行失败:" + e.getMessage());
            log.error("任务执行失败：", e);
        }


    }


    private static JobRunParam buildParam(String[] args) throws Exception {
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String sqlPath = parameterTool.get("sql");
        Preconditions.checkNotNull(sqlPath, "-sql参数 不能为空");
        JobRunParam jobRunParam = new JobRunParam();
        jobRunParam.setSqlPath(sqlPath);
        jobRunParam.setCheckPointParam(CheckPointParams.buildCheckPointParam(parameterTool));
        String type = parameterTool.get("type");
        if (StringUtils.isNotEmpty(type)) {
            jobRunParam.setJobTypeEnum(JobTypeEnum.getJobTypeEnum(Integer.valueOf(type)));
        }
        return jobRunParam;
    }

}
