package com.pk.flink.sink;


import com.pk.flink.bean.Access;
import com.pk.flink.functions.sink.AccessConsoleSinkFunction;
import com.pk.flink.functions.source.AccecssMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;

/**
 * 使用Flink 实现单词计数
 * @author pk
 */
public class FlinkSinkApp {
    public static void main(String[] args) throws Exception {
        //获取上下文环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStreamSource<String> accessLogStreamSource = env.readTextFile("data/access.log");

        //Console Sink 打印到标准输出
        SingleOutputStreamOperator<Access> mapResults = accessLogStreamSource.map(new AccecssMapFunction());
        mapResults.print().setParallelism(1);
        mapResults.printToErr().setParallelism(1);


        //AccessConsoleSinkFunction 打印到标准输出
        mapResults.addSink(new AccessConsoleSinkFunction());

        //File Sink 打印到文件
        //1.旧输出方式
        mapResults.writeAsText("out/access.log", org.apache.flink.core.fs.FileSystem.WriteMode.OVERWRITE).setParallelism(4);

        //2.新输出方式

        final StreamingFileSink<Access> fileSink = StreamingFileSink
                .forRowFormat(new Path("out/access.log.sink"), new SimpleStringEncoder<Access>("UTF-8"))
                .withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(Duration.ofMinutes(15))
                                .withInactivityInterval(Duration.ofSeconds(5))
                                .withMaxPartSize(MemorySize.ofMebiBytes(1))
                                .build())
                .build();

        mapResults.addSink(fileSink).setParallelism(4);

        env.execute();
    }
}
