package com.itcast.flink.conncetors.hdfs;

import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.fs.StringWriter;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
import org.apache.flink.streaming.connectors.fs.bucketing.DateTimeBucketer;

/**
 * @program: flink-app
 * @description: flink写入hdfs中
 * @author: zhanghz001
 * @create: 2021-07-23 10:13
 **/
public class ZhzHdfsSinkApplication {
    public static void main(String[] args) throws Exception {
        //创建运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        //读取socket数据源,数据源来源于SocketSourceApplication,需要同步伐启动
        DataStreamSource<String> socketStr = env.socketTextStream("localhost", 9911, "\n");
        
        BucketingSink<String> sink = new BucketingSink<>("./data/hdfs");
        sink.setBucketer(new DateTimeBucketer<>("yyyy-MM-dd--HHmm"));
        sink.setWriter(new StringWriter<>())
                .setBatchSize(5 * 1024)//设置每个文件大小
                .setBatchRolloverInterval(5 * 1000)//设置滚动写入新文件的时间
                .setInactiveBucketCheckInterval(30 * 1000)//30秒检查一次不写入的文件
                .setInactiveBucketThreshold(60 * 1000)//60秒不屑露,就滚动写入新的文件
        ;
        socketStr.addSink(sink).setParallelism(1);
        
        //执行任务
        env.execute("hdfs sink job");
    }
}
