package cn.jly.flink.connector;

import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

/**
 * Streaming File Sink
 * <p>
 * https://ci.apache.org/projects/flink/flink-docs-release-1.11/zh/dev/connectors/streamfile_sink.html
 * <p>
 * flink支持多种格式将数据写入HDFS，这里仅以行编码格式示例，更多请参考上面的网址
 *
 * @author lanyangji
 * @create 2020-09-03 20:35
 */
public class HdfsConnectorApp {
    /**
     * id生成器
     */
    private static final AtomicLong ID_GENERATOR = new AtomicLong(1000);

    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "lanyangji");
        System.setProperty("hadoop.home.dir", "D:\\software\\dev\\hadoop-2.8.1");

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<JdbcConnectorApp.Person> dataStreamSource = env.fromElements(
                new JdbcConnectorApp.Person(ID_GENERATOR.incrementAndGet(), UUID.randomUUID().toString().substring(0, 5)),
                new JdbcConnectorApp.Person(ID_GENERATOR.incrementAndGet(), UUID.randomUUID().toString().substring(0, 5)),
                new JdbcConnectorApp.Person(ID_GENERATOR.incrementAndGet(), UUID.randomUUID().toString().substring(0, 5)),
                new JdbcConnectorApp.Person(ID_GENERATOR.incrementAndGet(), UUID.randomUUID().toString().substring(0, 5))
        );

        // 配置StreamingFileSink
        StreamingFileSink<JdbcConnectorApp.Person> streamingFileSink =
                StreamingFileSink.<JdbcConnectorApp.Person>forRowFormat(
                        new Path("hdfs://hadoop101:9000/flink-sink/"),
                        new SimpleStringEncoder<>("UTF-8")
                )
                        .withRollingPolicy(
                                DefaultRollingPolicy.builder()
                                        .withRolloverInterval(TimeUnit.MINUTES.toMillis(15))
                                        .withInactivityInterval(TimeUnit.MINUTES.toMillis(5))
                                        .withMaxPartSize(1024 * 1024 * 1024)
                                        .build()
                        )
                        .build();

        // sink to HDFS
        dataStreamSource.addSink(streamingFileSink);

        env.execute("HdfsConnectorApp");
    }
}
