package com.we.flink.dcapiuserlabel;

import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

import com.alibaba.fastjson.JSONObject;
import com.we.flink.dcapiuserlabel.table.TableBaOprCmsOfflineUserLabel;
import com.we.flink.dcapiuserlabel.table.TableDcStatusRecord;
import com.we.flink.dcapiuserlabel.tag.TagDcAuthRecord;
import com.we.flink.dcapiuserlabel.tag.TagDcSubmitRecord;
import com.we.flink.dcapiuserlabel.tag.TagHfqMarketLoan;
import com.we.flink.dcapiuserlabel.tag.TagUserAction;
import com.we.flink.dcapiuserlabel.tag.telsale.TagAiTask;
import com.we.flink.utils.WeKafkaPropertyReader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;

public class DimDcapiUserLabel {
    public static final String RELEASEPROP = "dcapiuserLabel/dcapilabel_prod.properties";
    public static Logger LOG = LoggerFactory.getLogger(DimDcapiUserLabel.class);

    public static void main(String[] args) {
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        if (parameterTool.getNumberOfParameters() == 0) {
            supportedTables();
            return;
        }
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment envTable = StreamTableEnvironment.create(env);
        WeKafkaPropertyReader propReader = WeKafkaPropertyReader.init(RELEASEPROP);
        try {
            String kfkBootStrapServer = propReader.getTmpKfkBootStrapServer();
            String kfkTopic = propReader.getTmpKfkTopic();
            int kfkPartitions = propReader.getTmpKfkPartitions();

            env.setStateBackend(new RocksDBStateBackend(propReader.getRocksDBBackendUrl()));

            /** checkpoint configure */
            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(10 * 60 * 1000); // ms
            ckConf.setCheckpointTimeout(60 * 60 * 1000);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500);
            ckConf.enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            ckConf.enableUnalignedCheckpoints();

            /** sink kafka property */
            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", kfkBootStrapServer);
            sinkProp.setProperty("acks", "all");
            /** hive catlog configure */
            String hiveCatlogName = propReader.getHiveCatlogName();
            String hiveDBName = propReader.getHiveDBName();
            String hiveConfDir = propReader.getHiveConfDir();

            HiveCatalog hiveCatalog = new HiveCatalog(hiveCatlogName, hiveDBName, hiveConfDir);
            envTable.registerCatalog("hive", hiveCatalog);
            envTable.useCatalog("hive");

            /** sink to Kafka */
            FlinkKafkaProducer<String> userLabelAllFlinkKafkaProducer =
                    new FlinkKafkaProducer<String>(
                            kfkTopic,
                            new WeKafkaKeyedSerializationSchema(),
                            sinkProp,
                            java.util.Optional.of(new WeKafkaCustomPartitioner()));

            /** Get Stream */
            if (parameterTool.has("aitask") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> aiTaskStream =
                        TagAiTask.getFlatMapStream(envTable, env);
                aiTaskStream.addSink(userLabelAllFlinkKafkaProducer).setParallelism(kfkPartitions);
            }

            if (parameterTool.has("baOprOfflineLabel") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> baOprOfflineUserLableStream =
                        TableBaOprCmsOfflineUserLabel.getFlatMapStream(envTable, env);
                baOprOfflineUserLableStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }
            if (parameterTool.has("dcstatusrecord") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> dcStatucRecordStream =
                        TableDcStatusRecord.getFlatMapStream(envTable, env);
                dcStatucRecordStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("dcauthrecord") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> dcAuthRecordStream =
                        TagDcAuthRecord.getFlatMapStream(envTable, env);
                dcAuthRecordStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("dcsubmitrecord") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> dcSubmitRecordStream =
                        TagDcSubmitRecord.getFlatMapStream(envTable, env);
                dcSubmitRecordStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("hfqmarketloan") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> hfqMarketLoanStream =
                        TagHfqMarketLoan.getFlatMapStream(envTable, env);
                hfqMarketLoanStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            if (parameterTool.has("useraction") || parameterTool.has("all")) {
                SingleOutputStreamOperator<String> userActionStream =
                        TagUserAction.getFlatMapStream(envTable, env);
                userActionStream
                        .addSink(userLabelAllFlinkKafkaProducer)
                        .setParallelism(kfkPartitions);
            }

            /** submit the application */
            env.execute(DimDcapiUserLabel.class.toString());
        } catch (Exception E) {
            LOG.error("Exception: " + E.getMessage());
            E.printStackTrace();
        }
    }

    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            Long uid = jsonObject.getLong("uid");
            return String.valueOf(uid).getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            if (LOG.isDebugEnabled()) {
                LOG.info(
                        "partitions: "
                                + partitions.length
                                + " partition: "
                                + partition
                                + " key: "
                                + new String(key));
            }
            return partition;
        }
    }

    public static void supportedTables() {
        System.out.println("no table params setted!! ");
        System.out.println(
                "-dcstatusrecord \n"
                        + "-dcauthrecord \n"
                        + "-dcsubmitrecord \n"
                        + "-hfqmarketloan \n"
                        + "-useraction ");
    }
}
