package com.pk.flink.sink;


import com.pk.flink.bean.Access;
import com.pk.flink.functions.sink.AccessConsoleSinkFunction;
import com.pk.flink.functions.source.AccecssMapFunction;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;

import java.time.Duration;

/**
 * 使用Flink 实现单词计数
 * @author pk
 */
public class FlinkSocketSinkApp {

    public static void main(String[] args) throws Exception {
        //获取上下文环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        DataStreamSource<String> accessLogStreamSource = env.readTextFile("data/access.log");

        //Console Sink 打印到标准输出
        SingleOutputStreamOperator<Access> mapResults = accessLogStreamSource.map(new AccecssMapFunction());

        //File Sink 打印到文件
        //1.旧输出方式
        mapResults.writeToSocket("localhost", 9999, new SerializationSchema<Access>() {
            @Override
            public byte[] serialize(Access element) {
                return element.toString().getBytes();
            }
        });
        env.execute();
    }

}
