package com.we.risk.registerorigin;

import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

import com.alibaba.fastjson.JSONObject;
import com.we.flink.utils.WeKafkaPropertyReader;
import com.we.risk.registerorigin.table.*;
import com.we.risk.registerorigin.tag.TagUserAccount;
import com.we.risk.registerorigin.tag.TagUserSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;

public class DimRiskRegOriginLabel {
    public static final String RELEASEPROP = "risk/registerorigin/prod.properties";
    public static final String KEY_BY = "uid";
    public static Logger LOG = LoggerFactory.getLogger(DimRiskRegOriginLabel.class);

    public static void main(String[] args) {
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        if (parameterTool.getNumberOfParameters() == 0) {
            supportedTables();
            return;
        }

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment envTable = StreamTableEnvironment.create(env);
        WeKafkaPropertyReader propReader = WeKafkaPropertyReader.init(RELEASEPROP);
        try {
            String kfkBootStrapServer = propReader.getTmpKfkBootStrapServer();
            String kfkTopic = propReader.getTmpKfkTopic();
            int kfkPartitions = propReader.getTmpKfkPartitions();

            env.setStateBackend(new RocksDBStateBackend(propReader.getRocksDBBackendUrl()));
            TableConfig tableConf = envTable.getConfig();
            Configuration configuration = new Configuration();
            configuration.setString("table.exec.sink.not-null-enforcer", "drop");
            tableConf.addConfiguration(configuration);
            /** checkpoint configure */
            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(10 * 60 * 1000); // ms
            ckConf.setCheckpointTimeout(60 * 60 * 1000);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500);
            ckConf.enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            ckConf.enableUnalignedCheckpoints();

            /** sink kafka property */
            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", kfkBootStrapServer);
            /** hive catlog configure */
            String hiveCatlogName = propReader.getHiveCatlogName();
            String hiveDBName = propReader.getHiveDBName();
            String hiveConfDir = propReader.getHiveConfDir();

            HiveCatalog hiveCatalog = new HiveCatalog(hiveCatlogName, hiveDBName, hiveConfDir);
            envTable.registerCatalog("hive", hiveCatalog);
            envTable.useCatalog("hive");

            /** sink to Kafka */
            FlinkKafkaProducer<String> userlabelKafkaProducer =
                    new FlinkKafkaProducer<String>(
                            kfkTopic,
                            new WeKafkaKeyedSerializationSchema(),
                            sinkProp,
                            java.util.Optional.of(new WeKafkaCustomPartitioner()));

            if (parameterTool.has("mkpassbackrcdpt22")
                    || parameterTool.has("part1")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> mkPassBackRecordPart22FlatMapStream =
                        TableMkPassBackRecordPart22.getFlatMapStream(envTable, env);
                mkPassBackRecordPart22FlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("mkpassbackrcdpt27")
                    || parameterTool.has("part2")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> mkPassBackRecordPart27FlatMapStream =
                        TableMkPassBackRecordPart27.getFlatMapStream(envTable, env);
                mkPassBackRecordPart27FlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("mkpassbackrcdpt28")
                    || parameterTool.has("part1")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> mkPassBackRecordPart28FlatMapStream =
                        TableMkPassBackRecordPart28.getFlatMapStream(envTable, env);
                mkPassBackRecordPart28FlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("mkpassbackrcdpt29")
                    || parameterTool.has("part1")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> mkPassBackRecordPart29FlatMapStream =
                        TableMkPassBackRecordPart29.getFlatMapStream(envTable, env);
                mkPassBackRecordPart29FlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("userSource")
                    || parameterTool.has("part1")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> userSourceFlatMapStream =
                        TagUserSource.getFlatMapStream(envTable, env);
                userSourceFlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("userAccount")
                    || parameterTool.has("part2")
                    || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> userAccountFlatMapStream =
                        TagUserAccount.getFlatMapStream(envTable, env);
                userAccountFlatMapStream
                        .addSink(userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            /** submit the application */
            env.execute(DimRiskRegOriginLabel.class.toString());
        } catch (Exception E) {
            LOG.error("Exception: " + E.getMessage());
            E.printStackTrace();
        }
    }

    public static void supportedTables() {
        System.out.println("no table params setted!! ");
        System.out.println(
                "-mkpassbackrcdpt22 \n"
                        + "-mkpassbackrcdpt27 \n"
                        + "-mkpassbackrcdpt28 \n"
                        + "-mkpassbackrcdpt29 \n"
                        + "-userSource \n"
                        + "-userAccount \n");
    }

    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            Long uid = jsonObject.getLong(KEY_BY);
            return String.valueOf(uid).getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            //            LOG.error("WeKafkaCustomPartitioner partitions: " + partitions.length + "
            // partition: " + partition + " key: " + new String(key));
            return partition;
        }
    }
}
