/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */



package com.sui.bigdata.flink.sql.core;

import com.alibaba.fastjson.JSON;
import com.sui.bigdata.flink.sql.core.enums.ECacheType;
import com.sui.bigdata.flink.sql.core.exec.FlinkSqlExec;
import com.sui.bigdata.flink.sql.core.parser.*;
import com.sui.bigdata.flink.sql.core.parser.*;
import com.sui.bigdata.flink.sql.core.side.SideSqlExec;
import com.sui.bigdata.flink.sql.core.side.SideTableInfo;
import com.sui.bigdata.flink.sql.core.sink.StreamSinkFactory;
import com.sui.bigdata.flink.sql.core.source.StreamSourceFactory;
import com.sui.bigdata.flink.sql.core.table.SourceTableInfo;
import com.sui.bigdata.flink.sql.core.table.TableInfo;
import com.sui.bigdata.flink.sql.core.table.TargetTableInfo;
import com.sui.bigdata.flink.sql.core.util.DealFailDataUtil;
import com.sui.bigdata.flink.sql.core.util.FlinkUtil;
import com.sui.bigdata.flink.sql.core.util.PluginUtil;
import com.sui.bigdata.flink.sql.core.watermarker.WaterMarkerAssigner;
import org.apache.calcite.config.Lex;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Options;
import org.apache.commons.io.Charsets;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.calcite.shaded.com.google.common.base.Preconditions;
import org.apache.flink.calcite.shaded.com.google.common.collect.Maps;
import org.apache.flink.calcite.shaded.com.google.common.collect.Sets;
import org.apache.flink.client.program.ContextEnvironment;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamContextEnvironment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.config.ExecutionConfigOptions;
import org.apache.flink.table.api.config.OptimizerConfigOptions;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.sinks.TableSink;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLDecoder;
import java.util.*;
import java.util.concurrent.TimeUnit;

/**
 * Date: 2019/07/10
 * @author yong_chen@sui.com
 */

public class Main {

    private static final String CLASS_FILE_NAME_FMT = "class_path_%d";

    private static final Logger logger = LoggerFactory.getLogger(Main.class);

    private static org.apache.calcite.sql.parser.SqlParser.Config config = org.apache.calcite.sql.parser.SqlParser
            .configBuilder()
            .setLex(Lex.MYSQL)
            .build();

    public static void main(String[] args) throws Exception {
        CommandLine commandLine = getCommandLine(args);
        String name = commandLine.getOptionValue("name");
        run(commandLine).execute(name);
    }

    public static void perRun(String[] args) throws Exception {
        CommandLine commandLine = getCommandLine(args);
        run(commandLine);
    }

    public static CommandLine getCommandLine(String[] args) throws Exception{
        Options options = new Options();
        options.addOption("sql", true, "sql config");
        options.addOption("name", true, "job name");
        options.addOption("confProp", true, "env properties");
        options.addOption("mode", true, "deploy mode");
        options.addOption("savePointPath", true, "Savepoint restore path");
        options.addOption("allowNonRestoredState", true, "Flag indicating whether non restored state is allowed if the savepoint");
        options.addOption("port",true,"local execute port");
        options.addOption("metaTable",true,"metaTable info");
        options.addOption("udfJarPath",true,"udf jar path");
        CommandLineParser parser = new DefaultParser();
        return parser.parse(options, args);
    }

    public static StreamExecutionEnvironment run( CommandLine cl) throws Exception{
        String sql = cl.getOptionValue("sql");
        String name = cl.getOptionValue("name");
        String deployMode = cl.getOptionValue("mode");
        String confProp = cl.getOptionValue("confProp");
        String metaTable = cl.getOptionValue("metaTable");
        String udfJarPath = cl.getOptionValue("udfJarPath");

        Preconditions.checkNotNull(sql, "parameters of sql is required");

        if(sql.endsWith("txt") || sql.endsWith("sql")){
            sql = PluginUtil.readExecSql(sql);
        }else {
            sql = URLDecoder.decode(sql, Charsets.UTF_8.name());
        }

        DealFailDataUtil.jobName = name;
        confProp = URLDecoder.decode(confProp, Charsets.UTF_8.toString());
        Properties confProperties = PluginUtil.jsonStrToObject(confProp, Properties.class);

        StreamExecutionEnvironment env = getStreamExeEnv(confProperties, deployMode);
        EnvironmentSettings environmentSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, environmentSettings);


        FlinkUtil.setTableEnvTTL(confProperties,tableEnv);
        FlinkUtil.setBlinkOptimizer(confProperties,tableEnv);
        SqlTree sqlTree = SqlParser.parseSql(sql, JSON.parseArray(metaTable, CreateTableParser.SqlParserResult.class));


        Map<String, SideTableInfo> sideTableMap = Maps.newHashMap();
        Map<String, Table> registerTableCache = Maps.newHashMap();

        //register udf
        registerUDF(sqlTree, tableEnv,udfJarPath);
        //register table schema
        registerTable(sqlTree, env, tableEnv, sideTableMap, registerTableCache,FlinkUtil.getEnvParallelism(confProperties));

        SideSqlExec sideSqlExec = new SideSqlExec();

        for (CreateTmpTableParser.SqlParserResult result : sqlTree.getTmpSqlList()) {
            sideSqlExec.registerTmpTable(result, sideTableMap, tableEnv, registerTableCache,FlinkUtil.getEnvParallelism(confProperties));
        }

        for (InsertSqlParser.SqlParseResult result : sqlTree.getExecSqlList()) {
            if(logger.isInfoEnabled()){
                logger.info("exe-sql:\n" + result.getExecSql());
            }

            if (result.getExecSql().toLowerCase().startsWith("insert")){
                FlinkSqlExec.insertSqlExe(tableEnv, sqlTree, sideTableMap, registerTableCache, sideSqlExec, result,FlinkUtil.getEnvParallelism(confProperties));
            }

            if (result.getExecSql().toLowerCase().startsWith("select")){
                FlinkSqlExec.selectSqlExe(tableEnv,confProperties,name, sideTableMap, registerTableCache, sideSqlExec, result,FlinkUtil.getEnvParallelism(confProperties));
            }
        }

        if(env instanceof MyLocalStreamEnvironment) {
            List<URL> urlList = new ArrayList<>();
            ((MyLocalStreamEnvironment) env).setClasspaths(urlList);
            if (cl.getOptionValue("port") != null) {
                ((MyLocalStreamEnvironment) env).setPort(Integer.valueOf(cl.getOptionValue("port")));
            }
        }

        return env;
    }

    /**
     * This part is just to add classpath for the jar when reading remote execution, and will not submit jar from a local
     * @param env
     * @param classPathSet
     * @throws NoSuchFieldException
     * @throws IllegalAccessException
     */
    private static void addEnvClassPath(StreamExecutionEnvironment env, Set<URL> classPathSet) throws NoSuchFieldException, IllegalAccessException {
        if(env instanceof StreamContextEnvironment){
            Field field = env.getClass().getDeclaredField("ctx");
            field.setAccessible(true);
            ContextEnvironment contextEnvironment= (ContextEnvironment) field.get(env);
            for(URL url : classPathSet){
                contextEnvironment.getClasspaths().add(url);
            }
        }
    }

    private  static void registerUDF(SqlTree sqlTree, StreamTableEnvironment tableEnv,String udfJarPath) throws Exception {
//        URL[] urls = new URL[1];
//        File  file = new File(udfJarPath);
//        urls[0] = file.getAbsoluteFile().toURI().toURL();
//        ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
//        ClassLoader udfClassLoader =  FlinkUserCodeClassLoaders.parentFirst(urls, currentClassLoader);
//        Thread.currentThread().setContextClassLoader(udfClassLoader);

        //register udf
        List<CreateFuncParser.SqlParserResult> funcList = sqlTree.getFunctionList();
        for (CreateFuncParser.SqlParserResult funcInfo : funcList) {

            try {
                FlinkUtil.registerUDF(funcInfo.getType(), funcInfo.getClassName(), funcInfo.getName(),
                        tableEnv);
            } catch (Exception e) {
                logger.info(" register "+funcInfo.getName() +" fail :" + e);
            }
        }

//        Thread.currentThread().setContextClassLoader(currentClassLoader);
    }


    private static void registerTable(SqlTree sqlTree, StreamExecutionEnvironment env, StreamTableEnvironment tableEnv,
                                      Map<String, SideTableInfo> sideTableMap, Map<String, Table> registerTableCache,int envParallelism) throws Exception {
        Set<URL> classPathSet = Sets.newHashSet();
        WaterMarkerAssigner waterMarkerAssigner = new WaterMarkerAssigner();
        for (TableInfo tableInfo : sqlTree.getTableInfoMap().values()) {

            if (tableInfo instanceof SourceTableInfo) {

                SourceTableInfo sourceTableInfo = (SourceTableInfo) tableInfo;
                Table table = StreamSourceFactory.getStreamSource(sourceTableInfo, env, tableEnv,envParallelism);
                tableEnv.registerTable(sourceTableInfo.getAdaptName(), table);
                //Note --- parameter conversion function can not be used inside a function of the type of polymerization
                //Create table in which the function is arranged only need adaptation sql
                String adaptSql = sourceTableInfo.getAdaptSelectSql();
                Table adaptTable = adaptSql == null ? table : tableEnv.sqlQuery(adaptSql);

                //使用新的类型系统datatype后用什么api 封装成rowTypeInfo.

                RowTypeInfo typeInfo = new RowTypeInfo(adaptTable.getSchema().getFieldTypes(), adaptTable.getSchema().getFieldNames());

                DataStream adaptStream = tableEnv.toAppendStream(adaptTable, typeInfo);
                String fields = String.join(",", typeInfo.getFieldNames());

                if(waterMarkerAssigner.checkNeedAssignWaterMarker(sourceTableInfo)){
                    adaptStream = waterMarkerAssigner.assignWaterMarker(adaptStream, typeInfo, sourceTableInfo);
                    fields += ".rowtime";
//                    fields += ",rowtime.rowtime";
                }else{
                    fields += ",proctime.proctime";
//                    fields += ".proctime";
                }

                Table regTable = tableEnv.fromDataStream(adaptStream, fields);
                tableEnv.registerTable(tableInfo.getName(), regTable);
                registerTableCache.put(tableInfo.getName(), regTable);
            } else if (tableInfo instanceof TargetTableInfo) {

                TableSink tableSink = StreamSinkFactory.getTableSink((TargetTableInfo) tableInfo);
                TypeInformation[] flinkTypes = FlinkUtil.transformTypes(tableInfo.getFieldClasses());
                tableEnv.registerTableSink(tableInfo.getName(), tableInfo.getFields(), flinkTypes, tableSink);
            } else if(tableInfo instanceof SideTableInfo){

                String sideOperator = ECacheType.ALL.name().equals(((SideTableInfo) tableInfo).getCacheType()) ? "all" : "async";
                sideTableMap.put(tableInfo.getName(), (SideTableInfo) tableInfo);
            }else {
                throw new RuntimeException("not support table type:" + tableInfo.getType());
            }
        }

        //The plug-in information corresponding to the table is loaded into the classPath env
        addEnvClassPath(env, classPathSet);
        int i = 0;
        for(URL url : classPathSet){
            String classFileName = String.format(CLASS_FILE_NAME_FMT, i);
            env.registerCachedFile(url.getPath(),  classFileName, true);
            i++;
        }
    }

    private static StreamExecutionEnvironment getStreamExeEnv(Properties confProperties, String deployMode) throws IOException, NoSuchMethodException {
        StreamExecutionEnvironment env = !ClusterMode.local.name().equals(deployMode) ?
                StreamExecutionEnvironment.getExecutionEnvironment() :
                new MyLocalStreamEnvironment();
        if (FlinkUtil.getEnvParallelism(confProperties)>0){
            env.setParallelism(FlinkUtil.getEnvParallelism(confProperties));
        }

        Configuration globalJobParameters = new Configuration();
        Method method = Configuration.class.getDeclaredMethod("setValueInternal", String.class, Object.class);
        method.setAccessible(true);

        confProperties.forEach((key,val) -> {
            try {
                method.invoke(globalJobParameters, key, val);
            } catch (IllegalAccessException e) {
                e.printStackTrace();
            } catch (InvocationTargetException e) {
                e.printStackTrace();
            }
        });

        ExecutionConfig exeConfig = env.getConfig();
        if(exeConfig.getGlobalJobParameters() == null){
            exeConfig.setGlobalJobParameters(globalJobParameters);
        }else if(exeConfig.getGlobalJobParameters() instanceof Configuration){
            ((Configuration) exeConfig.getGlobalJobParameters()).addAll(globalJobParameters);
        }


        if(FlinkUtil.getMaxEnvParallelism(confProperties) > 0){
            env.setMaxParallelism(FlinkUtil.getMaxEnvParallelism(confProperties));
        }

        if(FlinkUtil.getBufferTimeoutMillis(confProperties) > 0){
            env.setBufferTimeout(FlinkUtil.getBufferTimeoutMillis(confProperties));
        }


        env.setRestartStrategy(RestartStrategies.failureRateRestart(
                FlinkUtil.getmaxFailuresRestartPerInterval(confProperties),
                Time.of(FlinkUtil.getfailuresRestartRateInterval(confProperties), TimeUnit.MINUTES),
                Time.of(FlinkUtil.getFailuresRestartDelay(confProperties), TimeUnit.SECONDS)
        ));

        FlinkUtil.setStreamTimeCharacteristic(env, confProperties);
        FlinkUtil.openCheckpoint(env, confProperties);

        env.getConfig().enableObjectReuse();

        return env;
    }
}
