package com.zlx.base;

import com.alibaba.fastjson.JSON;
import com.zlx.base.bean.EventLog;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.core.fs.Path;
import org.apache.flink.formats.avro.typeutils.GenericRecordAvroTypeInfo;
import org.apache.flink.formats.parquet.ParquetWriterFactory;
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;

/**
 * org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink}
 *
 *
 */
public class _09_StreamFlileSink_Demo0 {

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //开启checkpoint
        env.enableCheckpointing(10000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:////Volumes/D/tmp/flink/ckpt");

        env.setParallelism(5);

        DataStreamSource<EventLog> source = env.addSource(new MySourceFunction());

        /**
         * BulkWriter.Factory 我们也不会写 我们可以看一下flink给我们提供的
         * 可以通过工具来
         *
         *     private Long guid;
         *     private String sessionId;
         *     private String eventId;
         *     private Long timeStamp;
         *     private Map<String,String> eventInfo;
         *
         */

        FileSink<String> fileSink = FileSink
                //设置输出的格式和路径
                .forRowFormat(new Path("/Volumes/D/tmp/flink/data_demo0"), new SimpleStringEncoder<String>("utf-8"))
                // 设置文件的滚动策略，（间隔时间10s，或者文件大小达到）
                .withRollingPolicy(DefaultRollingPolicy.builder().withRolloverInterval(10000).withMaxPartSize(1024*1024).build())
                .build();

        SingleOutputStreamOperator<String> result = source.map(JSON::toJSONString);
        result.sinkTo(fileSink);

        env.execute("_09_StreamFlileSink_Demo1");

    }
}
