package streaming.api.sink;

import org.apache.commons.lang.SystemUtils;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.util.Collector;
import streaming.api.beans.SensorReading;
import utils.PropertiesReader;

import java.util.concurrent.TimeUnit;

/**
 * file -> file(HDFS)
 * 数据来源： file [sensor1.txt]
 * Sink目标： file []
 */
public class SinkTest3_file_2 {

    private static String formPath = PropertiesReader.get("default.file.from.path");
    private static String HDFSRoot = PropertiesReader.get("default.file.hdfs.url");
    private static String toPathDir = PropertiesReader.get("default.file.hdfs.basePath");

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing(1000L);
        if(SystemUtils.IS_OS_WINDOWS) {
            env.setStateBackend(new FsStateBackend("file:///E:/testData/ckp"));
        } else {
            env.setStateBackend(new FsStateBackend(HDFSRoot + toPathDir + "/flink-checkpoint"));
        }
        DataStream<String> inputStream = env.readTextFile(formPath);
        DataStream<SensorReading> dataStream1 = inputStream.map(line -> {
            String[] fields = line.split(",");
            return new SensorReading(fields[0], new Long(fields[1]), new Double(fields[2]));
        });
        // 转二元组
        DataStream dataStream2 = inputStream.flatMap(new FlatMapFunction<String, Tuple2<String, Double>>(){
            @Override
            public void flatMap(String s, Collector<Tuple2<String, Double>> out) throws Exception {
                String[] fields = s.split(",");
                out.collect(new Tuple2<String, Double>(fields[0], Double.valueOf(fields[2])));
            }
        });
        System.out.println("URL：" + HDFSRoot + toPathDir);
        //输出文件
        FileSink<String> sink = FileSink.forRowFormat(
                new Path(HDFSRoot + toPathDir),
                new SimpleStringEncoder<String>("UTF-8")
        ).withRollingPolicy(
            DefaultRollingPolicy.builder()
                .withRolloverInterval(TimeUnit.MINUTES.toMillis(15)) //每隔15分钟生成一个新文件
                .withInactivityInterval(TimeUnit.MINUTES.toMillis(5)) //每隔5分钟没有新数据到来，也把之前的生成一个新文件
                .withMaxPartSize(1024 * 1024 * 1024L)
                .build()
        )
        .withBucketAssigner(new DateTimeBucketAssigner<>("yyyy-MM-dd"))
        .build();

        env.execute();
    }
}
