package com.we.risk.antirefund.refund;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.sun.org.apache.regexp.internal.RE;
import com.we.flink.utils.WeKafkaPropertyReader;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;
import org.apache.flink.util.Collector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.InputStream;
import java.security.Key;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

public class AdmUserRefundLabel {
    public static final String INPUT_KEY_BY = "uid";
    public static final String OUTPUT_KEY_BY = "user_key";
    public static final String REFUND_TIMES = "refund_times";
    public static final String RELEASEPROP =
            "risk/antirefund/refund/adm_refund_kfk_prod.properties";
    public static Logger LOG = LoggerFactory.getLogger(AdmUserRefundLabel.class);

    public static void main(String[] args) throws IOException {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        try {
            WeKafkaPropertyReader paramReader = WeKafkaPropertyReader.init(RELEASEPROP);
            /** RocksDB */
            env.setStateBackend(new RocksDBStateBackend(paramReader.getRocksDBBackendUrl()));
            /** checkpoint configure */
            CheckpointConfig ckConf = env.getCheckpointConfig();
            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            ckConf.setCheckpointInterval(10 * 60 * 1000); // ms
            ckConf.setCheckpointTimeout(60 * 60 * 1000);
            ckConf.setMaxConcurrentCheckpoints(1);
            ckConf.setMinPauseBetweenCheckpoints(500);
            ckConf.enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

            ckConf.enableUnalignedCheckpoints();

            /** Consumer Kafka */
            String srcKafkaTopic = paramReader.getTmpKfkTopic();
            String srcKafkaBootStrapServer = paramReader.getTmpKfkBootStrapServer();
            String srckfkGrupId = paramReader.getTmpKfkGroupId();
            String srckfkOffset = paramReader.getTmpKfkOffset();

            Properties consumProp = new Properties();
            consumProp.setProperty("bootstrap.servers", srcKafkaBootStrapServer);
            consumProp.setProperty("group.id", srckfkGrupId);
            consumProp.setProperty("auto.offset.reset", srckfkOffset);
            consumProp.setProperty(
                    "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            consumProp.setProperty(
                    "value.deserializer",
                    "org.apache.kafka.common.serialization.StringDeserializer");

            FlinkKafkaConsumer<String> kfkSource =
                    new FlinkKafkaConsumer<String>(
                            srcKafkaTopic, new SimpleStringSchema(), consumProp);
            DataStreamSource<String> input = env.addSource(kfkSource);

            SingleOutputStreamOperator<String> out = input.uid("AdmUserRefundLabel-input")
                    .keyBy(
                            new KeySelector<String, Long>() {
                                @Override
                                public Long getKey(String value) throws Exception {
                                    return JSONObject.parseObject(value)
                                            .getLong(INPUT_KEY_BY);
                                }
                            })
                    .flatMap(new UserExpandRichFlatMapFunc());

            /** sink to Kafka */
            String sinkkfkTopic = paramReader.getKfkTopic();
            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", paramReader.getKfkBootStrapServer());
            int sinkkfkPartitions = paramReader.getKfkPartitions();

            FlinkKafkaProducer<String> userLabelAllFlinkKafkaProducer =
                    new FlinkKafkaProducer<String>(
                            sinkkfkTopic,
                            new WeKafkaKeyedSerializationSchema(),
                            sinkProp,
                            java.util.Optional.of(new WeKafkaCustomPartitioner()));

            out.addSink(userLabelAllFlinkKafkaProducer).setParallelism(sinkkfkPartitions);

            env.execute(AdmUserRefundLabel.class.toString());
        } catch (Exception e) {
            LOG.error("Exception: " + e.getMessage());
            e.printStackTrace();
        }
    }

    private static class UserExpandRichFlatMapFunc extends RichFlatMapFunction<String, String> {
        ValueState<RefundInfo> expandLabelVState;

        @Override
        public void open(Configuration parameters) throws Exception {
            expandLabelVState =
                    getRuntimeContext()
                            .getState(
                                    new ValueStateDescriptor<RefundInfo>(
                                            "expandLabelVState", RefundInfo.class));
        }

        @Override
        public void flatMap(String value, Collector<String> out) throws Exception {
            JSONObject input = JSON.parseObject(value);
            long uid = input.getLongValue(INPUT_KEY_BY);

            RefundInfo tmp = expandLabelVState.value();
            RefundInfo curState = (tmp == null ? RefundInfo.init(uid) : tmp);

            if(input.containsKey(OUTPUT_KEY_BY)) {
                curState.setUser_key(input.getString(OUTPUT_KEY_BY));
            }

            if(input.containsKey(REFUND_TIMES)) {
                curState.setRefund_times(input.getIntValue(REFUND_TIMES));
            }

            int refund_times = curState.getRefund_times();
            String user_key = curState.getUser_key();
            if(user_key != null && !"".equals(user_key) && refund_times != 0) {
                JSONObject outJson = new JSONObject();

                outJson.put(OUTPUT_KEY_BY, user_key);
                outJson.put(REFUND_TIMES, refund_times);
                out.collect(outJson.toString());
            }

            expandLabelVState.update(curState);
        }
    }


    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSONObject.parseObject(element);
            String key = jsonObject.getString(OUTPUT_KEY_BY);
            return key.getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            if (LOG.isDebugEnabled()) {
                LOG.info(
                        " partitions: "
                                + partitions.length
                                + " partition: "
                                + partition
                                + " key: "
                                + new String(key));
            }
            return partition;
        }
    }
}
