package com.it.sink;

import com.it.pojo.Event;
import com.it.operator.utils.SourceUtils;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.util.concurrent.TimeUnit;

/**
 * 写数据到文件中
 * 1)writeAsText or writeAsCSV : 不支持同时写一份文件，因此最后写出的时候需要将并行设置为1.而且对于数据一致性无法保证。
 * 2)
 *
 * @author code1997
 */
public class FileSink {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        SingleOutputStreamOperator<Event> eventSource = SourceUtils.getEventSource(executionEnvironment);
        //eventSource.writeAsText("data/chapter01/sink/file_sink");
        StreamingFileSink<String> fileSink = StreamingFileSink.<String>forRowFormat(new Path("data/chapter01/sink/file_sink"), new SimpleStringEncoder<>("UTF-8"))
                .withRollingPolicy(DefaultRollingPolicy.builder()
                        //设置文件的最大大小
                        .withMaxPartSize(1024 * 1024 * 1024)
                        //设置一个文件的最长写入的最长时间
                        .withRolloverInterval(TimeUnit.MINUTES.toMillis(15))
                        //设置如果多长时间不活跃就重新开一个文件
                        .withInactivityInterval(TimeUnit.MINUTES.toMillis(5)).build())
                .build();
        eventSource.map(Event::toString).addSink(fileSink);
        executionEnvironment.execute();
    }
}
