package com.atguigu.flink.day04;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.connector.source.util.ratelimit.RateLimiterStrategy;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.datagen.source.DataGeneratorSource;
import org.apache.flink.connector.datagen.source.GeneratorFunction;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;
import java.time.ZoneId;

/**
 * @author Felix
 * @date 2023/12/4
 * 该案例演示了sink算子--将数据写到文件
 */
public class Flink06_sink_file {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启检查点
        env.enableCheckpointing(5000L);

        //使用DataGen数据源生成数据
        DataGeneratorSource<String> dataSource = new DataGeneratorSource<>(
            new GeneratorFunction<Long, String>() {
                @Override
                public String map(Long value) throws Exception {
                    return "数据：" + value;
                }
            },
            Long.MAX_VALUE,
            RateLimiterStrategy.perSecond(1000),
            TypeInformation.of(String.class)
        );

        DataStreamSource<String> dataDS
            = env.fromSource(dataSource, WatermarkStrategy.noWatermarks(), "data_source");

        //将流中的数据写到指定的文件中
        FileSink<String> finkSink = FileSink
            //指定输出文件的目录
            .forRowFormat(new Path("E:\\output"), new SimpleStringEncoder<String>("UTF-8"))
            //对输出的文件进行配置
            .withOutputFileConfig(
                new OutputFileConfig(
                    "atguigu-",".log"
                )
            )
            //指定目录的生成策略  按照小时进行分桶
            .withBucketAssigner(
                new DateTimeBucketAssigner<>("yyyy-MM-dd HH", ZoneId.systemDefault())
            )
            //指定文件本身滚动策略
            .withRollingPolicy(
                DefaultRollingPolicy.builder()
                    //多长时间文件滚动一次
                    .withRolloverInterval(Duration.ofSeconds(10))
                    //如果上条数据到来之后 间隔多久没有数据进行也会进行滚动
                    .withInactivityInterval(Duration.ofSeconds(5))
                    //文件大小达到多少后滚动一次
                    .withMaxPartSize(new MemorySize(1024))
                    .build())
            .build();

        dataDS.sinkTo(finkSink);

        env.execute();
    }
}
