package demo.base.connect;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchema;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;

import java.util.Properties;

public class FlinkConnectSourceDemo {
    public static final String KFKTPC = "orderadd";
    public static final String KFKTPC1 = "orderdetail";
    public static final String KFKTPCSINK = "ordersink";
    public static final String KFKADD = "10.10.15.243:9092,10.10.15.224:9092,10.10.15.222:9092";

    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        try {
            //            env.setStateBackend(new
            // RocksDBStateBackend("hdfs://wetech-hdfs:8020/flink/RocksDBBackend"));
            //
            //            CheckpointConfig ckConf = env.getCheckpointConfig();
            //            ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            //            ckConf.setCheckpointInterval(10000);
            //            ckConf.setCheckpointTimeout(60000);
            //            ckConf.setMaxConcurrentCheckpoints(1);
            //            ckConf.setMinPauseBetweenCheckpoints(500);
            //
            // ckConf.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            //
            Properties prop = new Properties();
            prop.setProperty("bootstrap.servers", KFKADD);
            prop.setProperty("group.id", "flkcncsrcdemo");
            prop.setProperty("auto.offset.reset", "latest");
            prop.setProperty(
                    "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            prop.setProperty(
                    "value.deserializer",
                    "org.apache.kafka.common.serialization.StringDeserializer");

            FlinkKafkaConsumer<String> kfkSource1 =
                    new FlinkKafkaConsumer<String>(KFKTPC, new SimpleStringSchema(), prop);
            DataStreamSource<String> addressSource = env.addSource(kfkSource1);
            SingleOutputStreamOperator<String> addressFlatMap =
                    addressSource.flatMap(OrderAddress.getFlatMapFunction());
            //            DataStreamSource<String> addressSource = env.socketTextStream("localhost",
            // 9999);
            //            SingleOutputStreamOperator<OrderAddress> addreMapStream =
            // addressSource.map(OrderAddress.getMapFunction());

            FlinkKafkaConsumer<String> kfkSource2 =
                    new FlinkKafkaConsumer<String>(KFKTPC1, new SimpleStringSchema(), prop);
            DataStreamSource<String> detailSource = env.addSource(kfkSource2);
            //            DataStreamSource<String> detailSource = env.socketTextStream("localhost",
            // 9000);
            SingleOutputStreamOperator<String> detailFlatMap =
                    detailSource.flatMap(OrderDetail.getFlatMapFunction());

            Properties sinkProp = new Properties();
            sinkProp.setProperty("bootstrap.servers", KFKADD);

            FlinkKafkaProducer<String> userLabelAllFlinkKafkaProducer =
                    new FlinkKafkaProducer<String>(
                            KFKTPCSINK,
                            new WeKafkaKeyedSerializationSchema(),
                            sinkProp,
                            java.util.Optional.of(new WeKafkaCustomPartitioner()));

            addressFlatMap.addSink(userLabelAllFlinkKafkaProducer).setParallelism(3);
            detailFlatMap.addSink(userLabelAllFlinkKafkaProducer).setParallelism(3);
            //
            env.execute("Flink-Connect-Source-Test");
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    private static class WeKafkaKeyedSerializationSchema
            implements KeyedSerializationSchema<String> {
        @Override
        public byte[] serializeKey(String element) {
            JSONObject jsonObject = JSON.parseObject(element);
            return jsonObject.get("orderId").toString().getBytes();
        }

        @Override
        public byte[] serializeValue(String element) {
            return element.getBytes();
        }

        @Override
        public String getTargetTopic(String element) {
            return null;
        }
    }

    private static class WeKafkaCustomPartitioner extends FlinkKafkaPartitioner<String> {

        @Override
        public int partition(
                String record, byte[] key, byte[] value, String targetTopic, int[] partitions) {
            int partition = Math.abs(new String(key).hashCode() % partitions.length);
            //            System.out.println("partitions: " + partitions.length + " partition: " +
            // partition + " key: " + new String(key));
            return partition;
        }
    }
}
