package com.test.flink_sinks;

import com.test.beans.LogBean;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.parquet.ParquetWriterFactory;
import org.apache.flink.formats.parquet.avro.AvroParquetWriters;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;

public class F03SinkFile {

    public static void main(String[] args) throws Exception {
        Configuration configuration = new Configuration();
        configuration.setInteger("rest.port", 8888);
        StreamExecutionEnvironment webUI = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration);
        webUI.enableCheckpointing(3000);

        DataStreamSource<String> data = webUI.socketTextStream("127.0.0.1", 9001);

        // 数据转换成bean
        DataStream<LogBean> ds =  data.map((MapFunction<String, LogBean>) line -> {
            // 数据在解析的时候可能会抛出异常 需要捕获异常防止程序停止  trycatch 快捷键 ctr+alt+t
            LogBean logBean = new LogBean();
            try {
                String[] arr = line.split(",");
                logBean.setName(String.valueOf(arr[0]));
                logBean.setLog(String.valueOf(arr[1]));
                logBean.setDesc(String.valueOf(arr[2]));
                logBean.setTimeStamp(Long.parseLong(arr[3]));
            } catch (Exception e) {

            }
            return logBean;
        });

        Path path = new Path("data/sink2/");
        ParquetWriterFactory<LogBean> wf = AvroParquetWriters.forReflectRecord(LogBean.class);
        FileSink<LogBean> fileSink = FileSink.forBulkFormat(path, wf)
                .withBucketAssigner(new DateTimeBucketAssigner())
                .withBucketCheckInterval(1000)
                .withOutputFileConfig(OutputFileConfig.builder()
                        .withPartPrefix("dow-")
                        .withPartSuffix(".par")
                        .build())
                .withRollingPolicy(OnCheckpointRollingPolicy.build())
                .build();

        ds.sinkTo(fileSink);

        webUI.execute("测试输出");
    }
}
