package com.ruyuan.event.streaming.sink;
import com.ruyuan.event.streaming.pojo.EventClientLog;
import com.ruyuan.event.streaming.pojo.EventJoinLog;
import com.ruyuan.event.streaming.pojo.EventServerLog;
import com.ruyuan.event.streaming.schema.EventClientLogSchema;
import com.ruyuan.event.streaming.schema.EventServerLogSchema;
import com.ruyuan.event.streaming.utils.Constants;
import com.ruyuan.event.streaming.utils.FlinkKafkaUtils;
import com.twitter.chill.protobuf.ProtobufSerializer;
import org.apache.flink.api.common.ExecutionConfig;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializer;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerRecord;

/**
 * 新版本Flink1.12以上版本写入HDFS
 * */
public class DataSinkToHDFSByFileSink {

    /**
     * DataStream<String> input = ...;
     *
     * final FileSink<String> sink = FileSink
     *     .forRowFormat(new Path(outputPath), new SimpleStringEncoder<String>("UTF-8"))
     *     .withRollingPolicy(
     *         DefaultRollingPolicy.builder()
     *             .withRolloverInterval(Duration.ofSeconds(10))
     *             .withInactivityInterval(Duration.ofSeconds(10))
     *             .withMaxPartSize(MemorySize.ofMebiBytes(1))
     *             .build())
     * 	.build();
     * input.sinkTo(sink);
     * */
}