package com.example.day04.controller;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.file.src.FileSource;
import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class FlieDemo1 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        FileSource<String> build = FileSource.forRecordStreamFormat(new TextLineInputFormat(), new Path("E:\\note\\flink-project\\flink-leaning\\input\\word.txt")).build();

        DataStreamSource<String> stream = env.fromSource(
                build,
                WatermarkStrategy.noWatermarks(),
                "MySourceName");

        //        stream.print();

        // 将数据写入到Apache Kafka
        KafkaSink<String> sink = KafkaSink.<String>builder()
                .setBootstrapServers("node3:9092") // 节点
                .setRecordSerializer(KafkaRecordSerializationSchema.builder() // 设置用于序列化记录的方案
                        .setTopic("test-topic")
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                .setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE) // 至少会被发送一次 设置消息交付的语义 AT_MOST_ONCE（最多一次）和EXACTLY_ONCE（恰好一次）
                .build();

        stream.sinkTo(sink);
        env.execute("aaa");
    }
}
