package com.sh.data.engine.flink.client;

import org.apache.calcite.avatica.util.Casing;
import org.apache.calcite.avatica.util.Quoting;
import org.apache.calcite.config.Lex;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.parser.SqlParseException;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.sql.parser.impl.FlinkSqlParserImpl;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.LocalStreamEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.api.config.ExecutionConfigOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;

public class FlinkClient {

    private static final Logger LOGGER = LoggerFactory.getLogger(FlinkClient.class);

    private static final String ESCAPE_STR = "_HUFU_FLINK_SQL_ESCAPE_";
    private static final int ESCAPE_STR_LEN = ESCAPE_STR.length();

    public static void main(String[] args) throws Exception {
        Options options = new Options();
        options.addOption("sqlFilePath", true, "sqlFilePath");
        options.addOption("addjar", true, "add jar");

        CommandLineParser commandLineParser = new DefaultParser();
        CommandLine commandLine = commandLineParser.parse(options, args);

        String sqlFilePath = commandLine.getOptionValue("sqlFilePath");

        // Objects.requireNonNull(sqlBase64Str, "parameters 'sqlBase64Str' can't be null");
        String sql = StringUtils.EMPTY;

        // String sql=sqlBase64Str;
        if (StringUtils.isNotBlank(sqlFilePath)) {
            sql = readSqlFile(sqlFilePath);
        }
        LOGGER.info("flink sql:\n {}", sql);
        sql =
            sql.replaceAll("--.*?(\r?\n|$)", "") // 去除单行注释
                .replaceAll("/\\*.*?\\*/", "") // 去除多行注释
                .trim();
        List<String> sqlList = splitSqlJar(sql);

        StreamExecutionEnvironment streamExecutionEnvironment = getStreamExeEnv();

        EnvironmentSettings environmentSettings =
            EnvironmentSettings.newInstance().inStreamingMode().build();
        StreamTableEnvironment streamTableEnvironment =
            StreamTableEnvironment.create(streamExecutionEnvironment, environmentSettings);
        configTableEnvironment(streamTableEnvironment);

        if (CollectionUtils.isNotEmpty(sqlList)) {
            for (String _sql : sqlList) {
                LOGGER.info("flink sql split :\n {}", _sql);

                if (_sql.startsWith("SET table.sql-dialect=hive")) {
                    streamTableEnvironment.getConfig().setSqlDialect(SqlDialect.HIVE);
                    continue;
                }

                if (_sql.startsWith("SET table.sql-dialect=default")) {
                    streamTableEnvironment.getConfig().setSqlDialect(SqlDialect.DEFAULT);
                    continue;
                }

                streamTableEnvironment.executeSql(_sql);
            }
        }
    }

    public static List<String> splitSql(String sqlStr, boolean maybeHaveEscapeConeten)
        throws SqlParseException {
        if (StringUtils.isBlank(sqlStr)) {
            return Collections.emptyList();
        }

        List<String> result = new ArrayList<>();

        if (maybeHaveEscapeConeten) {
            int escapeStrIndexLeft = sqlStr.indexOf(ESCAPE_STR);
            if (escapeStrIndexLeft != -1) {
                int escapeStrIndexRight = sqlStr.indexOf(ESCAPE_STR, escapeStrIndexLeft + ESCAPE_STR_LEN);
                if (escapeStrIndexRight != -1) { // 说明真正有了成套的配对了
                    String escapeContentBefore = sqlStr.substring(0, escapeStrIndexLeft);
                    result.addAll(splitSql(escapeContentBefore, false));

                    String escapeContent =
                        sqlStr.substring(escapeStrIndexLeft + ESCAPE_STR_LEN, escapeStrIndexRight);
                    // tableEnv执行的sql后边不能带有;
                    if (escapeContent.endsWith(";")) {
                        result.add(escapeContent.substring(0, escapeContent.length() - 1));
                    }

                    String escapeContentAfter = sqlStr.substring(escapeStrIndexRight + ESCAPE_STR_LEN);
                    result.addAll(splitSql(escapeContentAfter, true));

                    return result;
                }
            }
        }

        // 到了的都是纯净的,不含有escape内容
        SqlParser sqlParser = createSqlParser(sqlStr);
        SqlNodeList sqlNodeList = sqlParser.parseStmtList();
        if (sqlNodeList == null || CollectionUtils.isEmpty(sqlNodeList.getList())) {
            return Collections.emptyList();
        }

        result.addAll(
            sqlNodeList.getList().stream().map(SqlNode::toString).collect(Collectors.toList()));

        return result;
    }

    public static List<String> splitSqlJar(String sqlStr) {
        if (sqlStr == null || sqlStr.trim().isEmpty()) {
            return Collections.emptyList();
        }

        // 通过正则表达式查找 SQL 语句的分隔符 ';'，并确保拆分后的每一段都是完整的 SQL 语句
        // 我们这里假设 SQL 语句以分号 ';' 结尾
        String[] sqlStatements = sqlStr.split("(?<=;)(?=\\s*(?i:(create|insert)))", -1);
        List<String> result = new ArrayList<>();

        for (String statement : sqlStatements) {
            // 清理每个 SQL 语句的前后空白字符
            String trimmedStatement = statement.trim();
            if (!trimmedStatement.isEmpty()) {
                result.add(trimmedStatement);
            }
        }

        return result;
    }

    private static SqlParser createSqlParser(String sqlStr) {
        return SqlParser.create(
            sqlStr,
            SqlParser.config()
                // use flink parser to parse create table sql
                .withParserFactory(FlinkSqlParserImpl.FACTORY)
                .withLex(Lex.MYSQL)
                .withQuoting(Quoting.BACK_TICK)
                .withUnquotedCasing(Casing.UNCHANGED)
                .withQuotedCasing(Casing.UNCHANGED));
    }

    private static StreamExecutionEnvironment getStreamExeEnv() {
        //        StreamExecutionEnvironment streamExecutionEnvironment =
        // StreamExecutionEnvironment.getExecutionEnvironment();
        StreamExecutionEnvironment streamExecutionEnvironment =
            LocalStreamEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(getParallelismDefault());

        int maxParallelism = getMaxParallelism();
        if (maxParallelism > 0) {
            streamExecutionEnvironment.setMaxParallelism(maxParallelism);
        }

        int bufferTimeoutMillis = getBufferTimeoutMillis();
        if (bufferTimeoutMillis >= 0) {
            streamExecutionEnvironment.setBufferTimeout(bufferTimeoutMillis);
        }

        configCheckPoint(streamExecutionEnvironment);
        return streamExecutionEnvironment;
    }

    private static void configCheckPoint(StreamExecutionEnvironment streamExecEnv) {
        // 每隔60s进行启动一个检查点【设置checkpoint的周期】
        streamExecEnv.enableCheckpointing(60000);

        // 高级选项：
        // 设置模式为exactly-once （这是默认值）
        streamExecEnv.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // 确保检查点之间有至少30s的间隔【checkpoint最小间隔】
        streamExecEnv.getCheckpointConfig().setMinPauseBetweenCheckpoints(30000);

        // 检查点必须在10分钟内完成，或者被丢弃【checkpoint的超时时间】
        streamExecEnv.getCheckpointConfig().setCheckpointTimeout(600000);

        // 同一时间只允许进行3个检查点
        streamExecEnv.getCheckpointConfig().setMaxConcurrentCheckpoints(3);

        // 表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint【详细解释见备注】
        streamExecEnv
            .getCheckpointConfig()
            .enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    }

    private static void configTableEnvironment(StreamTableEnvironment streamTableEnvironment) {
        Configuration configuration = streamTableEnvironment.getConfig().getConfiguration();
        configuration.setString(ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER.key(), "drop");
        configuration.setString("table.dynamic-table-options.enabled", "true");
    }

    private static String readSqlFile(String sqlFilePath) throws IOException {
        return FileUtils.readFileToString(new File(sqlFilePath), StandardCharsets.UTF_8);
    }

    // flink param
    public static int getParallelismDefault() {
        return NumberUtils.toInt(System.getProperty("parallelism.default"), 1);
    }

    // flink param
    public static int getMaxParallelism() {
        return NumberUtils.toInt(System.getProperty("pipeline.max-parallelism"), -1);
    }

    // custom param
    public static int getBufferTimeoutMillis() {
        return NumberUtils.toInt(System.getProperty("env.buffer.timeout.millis"), -1);
    }
}
