package com.wuwangfu.sink;

import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

/**
 * @Author jcshen
 * @Date 2023-02-22
 * @PackageName:com.wuwangfu.sink
 * @ClassName: DStreamFileSink
 * @Description:
 * @Version 1.0
 */
public class DStreamFileSink {
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME","hadoop");
        Configuration config = new Configuration();

        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(config);
        //开启checkpoint，否则不会生成文件
        env.enableCheckpointing(5000);

        DataStreamSource<String> line = env.socketTextStream("localhost", 8888);
        //构建文件滚动生成的策略
        DefaultRollingPolicy<String, String> rollingPolicy = DefaultRollingPolicy.builder()
                .withRolloverInterval(30 * 1000L)//30秒滚动生产一个文件
                .withMaxPartSize(1024L * 1024L * 128L)//当文件达到128MB时滚动生成一个文件
                .build();
        //创建StreamFileSink，数据以行格式写入
        StreamingFileSink<String> fileSink = StreamingFileSink.forRowFormat(
                        new Path("hdfs://node03:8020/out"),//指定文件存储目录
                        new SimpleStringEncoder<String>("UTF-8")//指定文件编码
                )
                .withRollingPolicy(rollingPolicy)//传入文件滚动生成策略
                .build();
        //调用DataStream的addSink添加sink
        line.addSink(fileSink);

        env.execute();
    }
}
