package cn.jly.flink.source2sink.kafka;

import cn.jly.flink.entity.Metric;
import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.Encoder;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerRecord;

import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.util.Properties;

/**
 * @PackageName cn.jly.flink.source
 * @ClassName KafkaCustomSourceDemo
 * @Description 自定义kafka序列化和反序列化器
 * @Author 姬岚洋
 * @Date 2021/1/14 下午 3:28
 */
public class KafkaCustomSourceDemo {

    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        final Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "node01:9092,node02:9092,node03:9092");
        properties.setProperty("zookeeper.connect", "node01:2181,node02:2181,node03:2181");
        properties.setProperty("group.id", "test");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("auto.offset.reset", "latest");

        final FlinkKafkaConsumer<Tuple2<String, Metric>> kafkaConsumer = new FlinkKafkaConsumer<>(
                "metric_test",
                new CustomKafkaDeserializationSchema(),
                properties
        );
        // 从最早开始消费
        kafkaConsumer.setStartFromEarliest();

        env.addSource(kafkaConsumer)
                .flatMap(new FlatMapFunction<Tuple2<String, Metric>, Metric>() {
                    @Override
                    public void flatMap(Tuple2<String, Metric> stringMetricTuple2, Collector<Metric> collector) throws Exception {
                        if (stringMetricTuple2.f1 == null) {
                            System.out.println("抓到null数据");
                            return;
                        }
                        collector.collect(stringMetricTuple2.f1);
                    }
                })
                .addSink(
                        StreamingFileSink.forRowFormat(Path.fromLocalFile(new File("d:/test")), new Encoder<Metric>() {
                            @Override
                            public void encode(Metric metric, OutputStream outputStream) throws IOException {
                                final PrintStream printStream = new PrintStream(outputStream);
                                printStream.println(JSON.toJSONString(metric));
                            }
                        }).build()
                );

        env.execute("KafkaCustomSourceDemo");
    }

    /**
     * 自定义flink针对于kafka的序列化和反序列化器
     */
    public static class CustomKafkaDeserializationSchema implements KafkaDeserializationSchema<Tuple2<String, Metric>> {

        /**
         * nextElement 是否表示流的最后一条元素，我们要设置为 false ,因为我们需要 msg 源源不断的被消费
         *
         * @param nextElement
         * @return
         */
        @Override
        public boolean isEndOfStream(Tuple2<String, Metric> nextElement) {
            return false;
        }

        /**
         * 反序列化 kafka 的 record，我们直接返回一个 tuple2<kafkaTopicName,Metric>
         *
         * @param record
         * @return
         * @throws Exception
         */
        @Override
        public Tuple2<String, Metric> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception {
            /*
            因为数据是从 Kafka 过来的，难以避免的是 Kafka 中的数据可能会出现 null 或者不符合预期规范的数据，
            然后在反序列化的时候如果作业里面没有做异常处理的话，就会导致作业失败重启，这样情况可以在反序列化处做异常处理，
            保证作业的健壮性。
             */
            try {
                final Metric metric = JSON.parseObject(new String(record.value()), Metric.class);
                return Tuple2.of(record.topic(), metric);
            } catch (Exception e) {
                e.printStackTrace();
            }

            return Tuple2.of(record.topic(), null);
        }

        /**
         * 告诉 Flink 我输入的数据类型, 方便 Flink 的类型推断
         *
         * @return
         */
        @Override
        public TypeInformation<Tuple2<String, Metric>> getProducedType() {
            // return new TupleTypeInfo<>(BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
            return TypeInformation.of(new TypeHint<Tuple2<String, Metric>>() {});
        }
    }
}
