package com.we.risk.behavior.repay;

import com.alibaba.fastjson.JSONObject;
import com.we.flink.utils.WeKafkaPropertyReader;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.*;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Iterator;
import java.util.Properties;

import static com.we.risk.behavior.repay.CommonVar.*;

public class AdmRepayPlanLoan {
    public static final String INPUT_KEY_BY = LOANKEY;
    public static final String OUTPUT_KEY_BY = ACCUSERKEY;
    public static final String RELEASEPROP =
            "risk/behavior/repay/kfk_dim_repay_plan_loan_prod.properties";
    public static Logger LOG = LoggerFactory.getLogger(AdmRepayPlanLoan.class);

    public static void main(String[] args) throws IOException {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        try {
            WeKafkaPropertyReader paramReader = WeKafkaPropertyReader.init(RELEASEPROP);
            /** RocksDB */
            env.setStateBackend(new RocksDBStateBackend(paramReader.getRocksDBBackendUrl()));
            /** checkpoint configure */
            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(10 * 60 * 1000); // ms
            ckConf.setCheckpointTimeout(60 * 60 * 1000);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500);
            ckConf.enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

            ckConf.enableUnalignedCheckpoints();

            /** Consumer Kafka */
            String srcKafkaTopic = paramReader.getTmpKfkTopic();
            String srcKafkaBootStrapServer = paramReader.getTmpKfkBootStrapServer();
            String srckfkGrupId = paramReader.getTmpKfkGroupId();
            String srckfkOffset = paramReader.getTmpKfkOffset();

            Properties consumProp = new Properties();
            consumProp.setProperty("bootstrap.servers", srcKafkaBootStrapServer);
            consumProp.setProperty("group.id", srckfkGrupId);
            consumProp.setProperty("auto.offset.reset", srckfkOffset);
            consumProp.setProperty(
                    "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            consumProp.setProperty(
                    "value.deserializer",
                    "org.apache.kafka.common.serialization.StringDeserializer");

            FlinkKafkaConsumer<String> kfkSource =
                    new FlinkKafkaConsumer<String>(
                            srcKafkaTopic, new SimpleStringSchema(), consumProp);
            DataStreamSource<String> input = env.addSource(kfkSource);

            SingleOutputStreamOperator<String> out = input.uid("AdmRepayPlanLoan-input")
                    .keyBy(new KeySelector<String, String>() {
                        @Override
                        public String getKey(String value) throws Exception {
                            JSONObject inputJson = JSONObject.parseObject(value);
                            return inputJson.getString(INPUT_KEY_BY);
                        }
                    }).flatMap(new AdmRpyPlanLoanRichFlatMapFunc());

            /** sink to Kafka */
            String sinkkfkTopic = paramReader.getKfkTopic();
            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", paramReader.getKfkBootStrapServer());
            sinkProp.setProperty("acks", "all");
            int sinkkfkPartitions = paramReader.getKfkPartitions();

            FlinkKafkaProducer<String> userLabelAllFlinkKafkaProducer =
                    new FlinkKafkaProducer<String>(
                            sinkkfkTopic,
                            new WeKafkaKeyedSerializationSchema(),
                            sinkProp,
                            java.util.Optional.of(new WeKafkaCustomPartitioner()));

            out.addSink(userLabelAllFlinkKafkaProducer).setParallelism(sinkkfkPartitions);

            env.execute(AdmRepayPlanLoan.class.toString());
        } catch (Exception e) {
            LOG.error("Exception: " + e.getMessage());
            e.printStackTrace();
        }
    }

    private static class AdmRpyPlanLoanRichFlatMapFunc extends RichFlatMapFunction<String, String> {
        ValueState<Integer> vState;
        MapState<String, String> mapState;
        @Override
        public void open(Configuration parameters) throws Exception {
            vState = getRuntimeContext()
                            .getState(new ValueStateDescriptor<Integer>("vState", Integer.class));

            StateTtlConfig ttlConfig =
                    StateTtlConfig.newBuilder(Time.hours(24))
                            .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
                            .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
                            .cleanupInRocksdbCompactFilter(100000L)
                            .build();
            MapStateDescriptor<String, String> mapStateDesc = new MapStateDescriptor<>("mapState", String.class, String.class);
            mapStateDesc.enableTimeToLive(ttlConfig);
            mapState = getRuntimeContext().getMapState(mapStateDesc);
        }

        @Override
        public void flatMap(String value, Collector<String> out) throws Exception {
            Integer tmpVState = vState.value();
            Integer curState = (tmpVState == null ? new Integer(0) : tmpVState);

            JSONObject inputJson = JSONObject.parseObject(value);
            if(inputJson.containsKey(CHANNELCODE)) {
                vState.update(new Integer(1));
                if(!mapState.isEmpty()) {
                    Iterator<String> iterator = mapState.values().iterator();
                    while(iterator.hasNext()) {
                        out.collect(iterator.next());
                    }
                }
            } else {
                if(curState.intValue() == 0) {
                    //
                    String loan_key = inputJson.getString(LOANKEY);
                    String acc_user_key = inputJson.getString(ACCUSERKEY);
                    String periods = inputJson.getString(PERIODS);
                    mapState.put(acc_user_key + loan_key + periods, value);
                } else {
                    out.collect(value);
                }
            }
        }
    }



    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            String key = jsonObject.getString(OUTPUT_KEY_BY);
            return key.getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            if (LOG.isDebugEnabled()) {
                LOG.info(
                        " partitions: "
                                + partitions.length
                                + " partition: "
                                + partition
                                + " key: "
                                + new String(key));
            }
            return partition;
        }
    }
}
