package com.we.risk.behavior.cross;

import com.alibaba.fastjson.JSONObject;
import com.we.flink.utils.WeKafkaPropertyReader;
import java.util.Optional;
import java.util.Properties;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.Catalog;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class GdmloanUserInfo {
    public static final String RELEASEPROP = "risk/behavior/cross/kfk_gdm_cross_loan_user_info_prod.properties";

    public static Logger LOG = LoggerFactory.getLogger(GdmloanUserInfo.class);

    public static void main(String[] args) {
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        if (parameterTool.getNumberOfParameters() == 0) {
            supportedTables();
            return;
        }
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment envTable = StreamTableEnvironment.create(env);
        WeKafkaPropertyReader propReader = WeKafkaPropertyReader.init(RELEASEPROP);
        try {
            String kfkBootStrapServer = propReader.getTmpKfkBootStrapServer();
            String kfkTopic = propReader.getTmpKfkTopic();
            int kfkPartitions = propReader.getTmpKfkPartitions();
            env.setStateBackend((StateBackend)new RocksDBStateBackend(propReader.getRocksDBBackendUrl()));

            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(600000L);
            ckConf.setCheckpointTimeout(3600000L);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500L);
            ckConf.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            ckConf.enableUnalignedCheckpoints();

            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", kfkBootStrapServer);
            sinkProp.setProperty("acks", "all");

            String hiveCatlogName = propReader.getHiveCatlogName();
            String hiveDBName = propReader.getHiveDBName();
            String hiveConfDir = propReader.getHiveConfDir();
            HiveCatalog hiveCatalog = new HiveCatalog(hiveCatlogName, hiveDBName, hiveConfDir);
            envTable.registerCatalog("cdh6_hive", (Catalog)hiveCatalog);
            envTable.useCatalog("cdh6_hive");

            FlinkKafkaProducer<String> userlabelKafkaProducer = new FlinkKafkaProducer(kfkTopic, new WeKafkaKeyedSerializationSchema(), sinkProp, Optional.of(new WeKafkaCustomPartitioner()));
            if (parameterTool.has("stream")) {
                SingleOutputStreamOperator<String> user = TableFdmHfqLoanCenterRiskRecord.getFlatMapStream(envTable, env);
                user.addSink((SinkFunction)userlabelKafkaProducer)
                        .setParallelism(1);
            }
            if (parameterTool.has("batch")) {
                SingleOutputStreamOperator<String> user = TableFdmHfqLoanCenterRiskRecordHive.getFlatMapStream(envTable, env);
                user.addSink((SinkFunction)userlabelKafkaProducer)
                        .setParallelism(kfkPartitions);
            }
            env.execute(GdmloanUserInfo.class.toString());
        } catch (Exception E) {
            LOG.error("Exception: " + E.getMessage());
            E.printStackTrace();
        }
    }

    public static void supportedTables() {
        System.out.println("no table params setted!! ");
        System.out.println("-all \n-batch \n-stream \n");
    }

    private static class WeKafkaKeyedSerializationSchema implements KeyedSerializationSchema<String> {
        private WeKafkaKeyedSerializationSchema() {}

        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            String key = jsonObject.getString("user_key");
            return key.getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {
        private WeKafkaCustomPartitioner() {}

        @Override
        public int partition(String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs((new String(key)).hashCode() % partitions.length);
            return partition;
        }
    }
}
