package com.itcast.flink.conncetors.hdfs;

import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.fs.StringWriter;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
import org.apache.flink.streaming.connectors.fs.bucketing.DateTimeBucketer;

/**
 * <p>Description: </p>
 *
 * @author
 * @version 1.0
 * <p>Copyright:Copyright(c)2020</p>
 * @date
 */
public class HdfsSinkApplication {
    
    public static void main(String[] args) throws Exception {
        // 1. 创建运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 2. 读取Socket数据源
        DataStreamSource<String> socketStr = env.socketTextStream("192.168.23.128", 9911, "\n");
        BucketingSink<String> sink = new BucketingSink<String>("d:/tmp/hdfs");
        sink.setBucketer(new DateTimeBucketer<>("yyyy-MM-dd--HHmm"));
        sink.setWriter(new StringWriter())
                .setBatchSize(5 * 1024) // 设置每个文件的大小
                .setBatchRolloverInterval(5 * 1000) // 设置滚动写入新文件的时间
                .setInactiveBucketCheckInterval(30 * 1000) // 30秒检查一次不写入的文件
                .setInactiveBucketThreshold(60 * 1000); // 60秒不写入，就滚动写入新的文件
        socketStr.addSink(sink).setParallelism(1);
        
        // 5. 执行任务
        env.execute("job");
    }
    
}
