package com.flink.sink.kafka;

import com.flink.entity.User;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.time.LocalDate;

/**
 * 描述:
 * 输出到kafka
 *
 * @author yanzhengwu
 * @create 2022-07-23 21:22
 */
public class SinkToKafka {
    public static void main(String[] args) {
        //声明执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //测试为了保证元素顺序并行度设置为4，则每个线程生成一个文件
        env.setParallelism(4);

        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("192.168.46.8:8089")
                .setTopics("input-topic")
                .setGroupId("my-group")
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        DataStreamSource<String> kafkaSource = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source");
        //接入kafka数据进行处理
        DataStream<String> map = kafkaSource.map(data -> {
            String[] dataStr = data.split(",");
            return new User(dataStr[0].trim(), dataStr[1].trim(), LocalDate.parse(dataStr[2].trim()), Integer.valueOf(dataStr[3].trim()),2000L).toString();
        });

        //flink1.15 以后的版本都是此种类型的写法，原来直接构造方法定义的方式已被废弃
        KafkaSink<String> sink = KafkaSink.<String>builder()
                .setBootstrapServers("192.168.46.8:8090")
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        .setTopic("output-topic")
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();

        map.sinkTo(sink);


    }
}
