package com.atguigu.day04;

import com.atguigu.bean.Event;
import com.atguigu.day03.Flink05_Source_Custom;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

public class Flink01_Sink_File {
    public static void main(String[] args) throws Exception {
        //1.获取流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

//        DataStreamSource<String> streamSource = env.fromElements("hello world", "hello flink");
        DataStreamSource<Event> streamSource = env.addSource(new Flink05_Source_Custom.ClickSource());
        SingleOutputStreamOperator<String> map = streamSource.map(Event::toString);

        //TODO 2.调用addSink方法，将流中的数据写入文件系统
        StreamingFileSink<String> fileSink = StreamingFileSink

                .<String>forRowFormat(new Path("./output"), new SimpleStringEncoder<>())
                .withRollingPolicy(DefaultRollingPolicy.builder()
                        //按照文件大小滚动
//                        .withMaxPartSize(1*1024*1024*1024)
                        .withMaxPartSize(2*1024)
                        //按照时间滚动 每隔多久
                        .withRolloverInterval(15*60*1000)
                        //数据多久每来 滚动
                        .withInactivityInterval(5*60*1000)
                        .build())
                .build();


        map.addSink(fileSink);

        env.execute();
    }
}
