package com.com.zs21cp.SinkFunction;

import com.com.zs21cp.ToolBean.WaterSensor;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.connector.sink2.Sink;
import org.apache.flink.api.connector.source.util.ratelimit.RateLimiterStrategy;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.connector.datagen.source.DataGeneratorSource;
import org.apache.flink.connector.datagen.source.GeneratorFunction;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSinkHelper;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;
import java.time.ZoneId;
import java.util.Random;


/**
 * @program: test_demo
 * @description:
 * @author: Arctic
 * @create: 2024-03-19 21:49
 */
public class SinkFIle {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(2);
//        env.setDefaultSavepointDirectory("F:/tmp/checkpoint");
        env.enableCheckpointing(2000,CheckpointingMode.EXACTLY_ONCE);

        GeneratorFunction<Long, WaterSensor> generatorFunction= value -> {

            return new WaterSensor("s"+value%10,value%10,new Random().nextInt()%10);
        };

        DataGeneratorSource dataGeneratorSource = new DataGeneratorSource(
            //实现一个函数。函数里面是生成数据数的逻辑，这个只能是他自己传入一个Long类型的数据
            generatorFunction,
            //生成的数据的数量
            Long.MAX_VALUE,
            //生成数据的速度
            RateLimiterStrategy.perSecond(10),
            //数据的包装类型
            Types.POJO(WaterSensor.class));


        //addSource区别于fromsource是 fromSource是可以带时间语义的
        DataStreamSource test_demo = env.fromSource(
            dataGeneratorSource,
            WatermarkStrategy.noWatermarks(),
            "test_demo",
            Types.POJO(WaterSensor.class)
        );


        //filesink需要导入一个包
        /**
         *         <dependency>
         *             <groupId>org.apache.flink</groupId>
         *             <artifactId>flink-connector-files</artifactId>
         *             <version>1.17.2</version>
         *         </dependency>
         */
        Sink<String> fieSink = FileSink
            // 输出行式存储的文件，指定路径、指定编码
            .<String>forRowFormat(new Path("f:/tmp"),
                new SimpleStringEncoder<>("UTF-8"))
            // 输出文件的一些配置： 文件名的前缀、后缀
            .withOutputFileConfig(
                OutputFileConfig.builder()
                    .withPartPrefix("a-")
                    .withPartSuffix(".log")
                    .build()
            )
            // 按照目录分桶：如下，就是每个小时一个目录
            .withBucketAssigner(new DateTimeBucketAssigner<>("yyyy-MM-dd HH", ZoneId.systemDefault()))
            // 文件滚动策略:  1分钟 或 1m
            .withRollingPolicy(
                DefaultRollingPolicy.builder()
                    .withRolloverInterval(Duration.ofSeconds(10))
                    .withMaxPartSize(new MemorySize(1024*1024))
                    .build()
            )
            .build();



        test_demo.sinkTo(fieSink);

        env.execute();
    }
}

