package com.shujia.flink.kafka;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.file.src.FileSource;
import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author shujia
 */
public class Demo2KafkaSink {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        FileSource<String> fileSource = FileSource
                //指定读取数据的格式和路径
                .forRecordStreamFormat(new TextLineInputFormat(), new Path("data/score.txt"))
                .build();

        //使用file source
        DataStream<String> scores = env.fromSource(fileSource, WatermarkStrategy.noWatermarks(), "file Source");


        //将数据保存到kafka
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                //指定kafka 集群列表
                .setBootstrapServers("master:9092,node1:9092,node2:9092")
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        //指定topic
                        .setTopic("score")
                        //指定序列化类
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                //数据处理的语义
                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
                .build();

        //使用kafka sink
        scores.sinkTo(kafkaSink);

        env.execute();

    }
}
