package cn.itcast;

import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.flink.api.common.serialization.Encoder;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.functions.source.ParallelSourceFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.tukaani.xz.check.Check;

import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

public class StreamHDFSSink {
    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME","root");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        env.enableCheckpointing(5000);

        DataStreamSource<Order> dataStreamSource = env.addSource(new MyNoParallelSource());

        //todo 将数据实时写入hdfs中,指定分桶策略和滚动策略
        //使用行编码的方式实时写入
        String path = "hdfs://node01:8020/test/streamingfilesink";
        OutputFileConfig outputFileConfig = OutputFileConfig.builder()
                .withPartPrefix("prefix").withPartSuffix(".ext").build(); //指定文件前后缀
        StreamingFileSink hdfsFlieSink = StreamingFileSink.forRowFormat(
                new Path(path), new SimpleStringEncoder<>("UTF-8"))
                //指定分桶策略，默认每小时一个桶,基于时间的分配器
                .withBucketAssigner(new DateTimeBucketAssigner())
                //指定滚动策略
                .withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(TimeUnit.SECONDS.toMillis(5))//滚动时间间隔，每5s产生一个文件
                                .withInactivityInterval(TimeUnit.SECONDS.toMillis(2)) //不活动的时间间隔，2s内没有产生新的数据则滚动
                                .withMaxPartSize(1024 * 1024 * 128) //文件大小滚动策略，128M滚动
                                .build()
                ).withOutputFileConfig(outputFileConfig).build();

        dataStreamSource.addSink(hdfsFlieSink);
        dataStreamSource.print();
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    //读取hdfs
    private static class MyNoParallelSource implements SourceFunction<Order> {

        private boolean isRunning = true;
        @Override
        public void run(SourceContext<Order> sourceContext) throws Exception {
            while (isRunning){
                Random random = new Random();
                String id = UUID.randomUUID().toString();
                String userId =String.valueOf(random.nextInt(3));
                int money = random.nextInt(100);
                long timestamp = System.currentTimeMillis();
                Order order = new Order(id, userId, money, timestamp);
                sourceContext.collect(order);
            }
        }

        @Override
        public void cancel() {
            isRunning=false;
        }
    }

    @Data
    @AllArgsConstructor
    @NoArgsConstructor
    public static class Order{
        private String id;
        private String userId;
        private int money;
        private Long timestamp;
    }
}
