package com.chukun.flink.stream.kafka;


import com.chukun.flink.stream.bean.KafkaMess;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.junit.Test;

import java.util.ArrayList;

/**
 * @author chukun
 * @version 1.0.0
 * @description kafka的基本操作
 * @createTime 2022年06月04日 11:27:00
 */
public class KafkaSourceTemplate {

    /**
     * 从kafka接入数据源
     * @throws Exception
     */
    @Test
    public void testSourceForKafka() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 创建kafka数据源
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("linux01:9092")
                .setGroupId("flink-kafka-group")
                .setTopics("flink-kafka")
                // 从最近的记录开始消费
                .setStartingOffsets(OffsetsInitializer.latest())
                // 从开始的位置消费
                //.setStartingOffsets(OffsetsInitializer.earliest())
                // 指定时间戳偏移量消费
                //.setStartingOffsets(OffsetsInitializer.timestamp(System.currentTimeMillis() - 5000))
                .build();

        DataStreamSource<String> kafkaDataStream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka-source");

        kafkaDataStream.print("kafka-data-stream");

        env.execute("testSourceForKafka");
    }

    /**
     * kafka消费数据，带有水印与事件时间
     */
    @Test
    public void testKafkaWithTimestampAndWatermarker() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 创建kafka数据源
        KafkaSource<KafkaMess> kafkaSource = KafkaSource.<KafkaMess>builder()
                .setBootstrapServers("linux01:9092")
                .setGroupId("flink-kafka-group")
                .setTopics("flink-kafka-waterma")
                // 从最近的记录开始消费
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new KafkaMessageDeserializer())
                // 从开始的位置消费
                .build();

        // 指定kafka水印与事件时间戳
        DataStreamSource<KafkaMess> kafkaDataStream = env.fromSource(kafkaSource,
                WatermarkStrategy.forGenerator((ctx) -> new KafkaWatermarkGenerator())
                        .withTimestampAssigner((message, timestamp) -> message.getTime()),
                "kafka-source-with-watermarker", TypeInformation.of(KafkaMess.class));

        kafkaDataStream.print("kafka-data-stream-with-watermarker");

        env.execute("testKafkaWithTimestampAndWatermarker");

    }

    /**
     * 写入kafka消息
     * @throws Exception
     */
    @Test
    public void testSinkToKafka() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 加载原始数据集
        DataStreamSource<Tuple2<Integer, String>> sourceDataStream = env.fromCollection(new ArrayList<Tuple2<Integer, String>>() {
            {
                add(Tuple2.of(1, "spark"));
                add(Tuple2.of(2, "flink"));
                add(Tuple2.of(3, "stream"));
                add(Tuple2.of(4, "java"));
            }
        });

        // 创建kafka数据源
        KafkaSink<Tuple2<Integer, String>> kafkaSink = KafkaSink.<Tuple2<Integer, String>>builder()
                .setBootstrapServers("linux01:9092")
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.builder()
                                .setTopic("kafka-sink")
                                .setKafkaValueSerializer(KafkaValueSerializerSchema.class)
                                .build()
                )
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();


        // 写入kafka消息
        sourceDataStream.sinkTo(kafkaSink);

        env.execute("testSinkToKafka");
    }
    /**
     * 写入kafka消息,自定义消息分区器
     * @throws Exception
     */
    @Test
    public void testSinkToKafkaWithPartition() throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 加载原始数据集
        DataStreamSource<Tuple2<Integer, String>> sourceDataStream = env.fromCollection(new ArrayList<Tuple2<Integer, String>>() {
            {
                add(Tuple2.of(1, "spark"));
                add(Tuple2.of(2, "flink"));
                add(Tuple2.of(3, "stream"));
                add(Tuple2.of(4, "java"));
            }
        });

        // 创建kafka数据源
        KafkaSink<Tuple2<Integer, String>> kafkaSink = KafkaSink.<Tuple2<Integer, String>>builder()
                .setBootstrapServers("linux01:9092")
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.builder()
                                .setTopic("kafka-sink")
                                .setKafkaValueSerializer(KafkaValueSerializerSchema.class)
                                // 设置消息的分区器
                                .setPartitioner(new KafkaMessagePartition())
                                .build()
                )
                // 设置消息的投递语义为 至少一次投递
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();


        // 写入kafka消息
        sourceDataStream.sinkTo(kafkaSink);

        env.execute("testSinkToKafkaWithPartition");
    }
}
