package com.atguigu.Flink.datastream.sink;

import com.alibaba.fastjson2.JSON;
import com.atguigu.Flink.POJO.Event;
import com.atguigu.Flink.function.ClickSource;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;

public class Flink01_FileSink {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //开启检查点
        env.enableCheckpointing(5000L) ;
        DataStreamSource<Event> ds = env.addSource(new ClickSource());
        //将数据写出到文件中
        FileSink<Event> fileSink = FileSink.<Event>forRowFormat(new Path("output"), new SimpleStringEncoder<>())
                .withRollingPolicy(  // 设置滚动策略
                        DefaultRollingPolicy.builder()
                                //.withMaxPartSize( new MemorySize( 1024 * 1024  * 10L ))  // 10M
                                .withMaxPartSize(MemorySize.parse("10m")) // 10m滚动
                                .withRolloverInterval(Duration.ofSeconds(10))  //10秒滚动
                                .withInactivityInterval(Duration.ofSeconds(5)) // 当文件多久不活跃(没有数据写进来)进行滚动
                                .build()
                )
                .withBucketCheckInterval(1000L)  //检查间隔
                .withOutputFileConfig(  // 设置文件的前缀和后缀
                        OutputFileConfig.builder()
                                .withPartPrefix("atguigu")
                                .withPartSuffix(".log")
                                .build()
                )
                .withBucketAssigner(  //设置目录策略
                        new DateTimeBucketAssigner<>("yyyy-MM-dd HH-mm")
                )
                .build();
        ds.sinkTo(fileSink);

//        ds.map(new MapFunction<Event, String>() {
//            @Override
//            public String map(Event event) throws Exception {
//                return JSON.toJSONString(event);
//            }
//        }).sinkTo(fileSink);

        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
