package com.atguigu.flink.datastramapi.sink;

import com.atguigu.flink.function.MySourceFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

/**
 * Created by Smexy on 2023/2/25
 */
public class Demo2_FileSink
{
    public static void main(String[] args) {



        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //需要和checkpoint一起配合使用，不开启checkpoint，无法闭合。文件 xxx.inprocess(文件正在处理)
        //开启备份，每2s备份一次
        env.enableCheckpointing(2000);
        env.setParallelism(2);




          /*  forRowFormat: 以行式文件写出. txt
                final Path basePath, : 目标路径
                final Encoder<IN> encoder:  编码器
            forBulkFormat: 以列式存储的文件写出。 orc,parquet*/

        FileSink fileSink = FileSink.<String>forRowFormat(new Path("d:/tmp"), new SimpleStringEncoder())
            //设置文件写出的滚动策略
            .withRollingPolicy(DefaultRollingPolicy.builder()
                                                   //按照文件大小滚动，默认128M
                                                   //.withMaxPartSize(1024 * 1024 * 1024 * 1l)
                                                   //按照时间间隔滚动，默认60s
                                                   .withRolloverInterval(10000)
                                                   .build()
                //间隔了N秒，没有写入新的内容，就滚动。 默认60s
                //.withInactivityInterval()
            )
            //设置是否要分桶(分目录) 按照日期存放数据
            .withBucketAssigner(new DateTimeBucketAssigner("yyyy-MM-dd"))
            // 配合 .withRolloverInterval(10000) 参数一起使用。不写基于时间的滚动就不生效
            .withBucketCheckInterval(10000)
            //配置输出的文件的其他配置
            .withOutputFileConfig(new OutputFileConfig("atguigu-", ".log"))
            .build();

        env.addSource(new MySourceFunction())
           //FileSink 采用的是新API的设计
           .sinkTo(fileSink);


        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }


    }
}
