package com.atguigu.flink.datastream.sink;

import com.atguigu.flink.func.ClickSource;
import com.atguigu.flink.pojo.Event;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;

/**
 * @author WEIYUNHUI
 * @date 2023/6/14 10:41
 * <p>
 * <p>
 * 新的API：ds.sinkTo(Sink)
 * 旧的API: ds.addSink(SinkFunction)
 * <p>
 * File connector :
 * 1. FileSource
 * 2. FileSink
 */
public class Flink01_FileSink {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //开启检查点
        env.enableCheckpointing(5000);

        DataStreamSource<Event> ds = env.addSource(new ClickSource());


        //将流中的数据写入到文件中
        FileSink<Event> fileSink =
                FileSink.<Event>forRowFormat(new Path("output"), new SimpleStringEncoder<>())
                        .withRollingPolicy(
                                DefaultRollingPolicy.builder()
                                        //.withMaxPartSize(new MemorySize(1024 * 1024 * 10  )) //文件的最大的大小
                                        .withMaxPartSize(MemorySize.parse("10m"))
                                        .withRolloverInterval(Duration.ofSeconds(10)) // 隔多久滚动一次
                                        .withInactivityInterval(Duration.ofSeconds(5))// 文件多久没有数据写进来就滚动
                                        .build()
                        ) //滚动策略
                        .withBucketCheckInterval(1000L)  // 检查间隔
                        .withOutputFileConfig(
                                OutputFileConfig.builder()
                                        .withPartPrefix("atguigu-")
                                        .withPartSuffix(".log")
                                        .build()
                        )
                        .withBucketAssigner(
                                new DateTimeBucketAssigner<>("yyyy-MM-dd HH-mm")
                        )
                        .build();

        //新的API
        ds.sinkTo(fileSink);


        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
