package com.ruyuan.event.streaming.sink;
import com.ruyuan.event.streaming.pojo.EventClientLog;
import com.ruyuan.event.streaming.pojo.EventJoinLog;
import com.ruyuan.event.streaming.pojo.EventServerLog;
import com.ruyuan.event.streaming.schema.EventClientLogSchema;
import com.ruyuan.event.streaming.schema.EventServerLogSchema;
import com.ruyuan.event.streaming.utils.Constants;
import com.ruyuan.event.streaming.utils.FlinkKafkaUtils;
import com.twitter.chill.protobuf.ProtobufSerializer;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.fs.bucketing.Bucketer;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;

/**
 * Lambda架构离线方案：同步数据到hive，通过离线校验实时数据准确率。
 * 需求：按照小时分区落地到HDFS
 * 最终落地形式（HDFS）：以小时为分区落地数据
 *      /user/hive/warehouse/json/event_server_log/2022-02-01:13
 *      /user/hive/warehouse/json/event_server_log/2022-02-01:14
 *      /user/hive/warehouse/json/event_server_log/2022-02-01:15
 *
 *      /user/hive/warehouse/json/event_client_log/2022-02-01:13
 *      /user/hive/warehouse/json/event_client_log/2022-02-01:14
 *      /user/hive/warehouse/json/event_client_log/2022-02-01:15
 *
 * */
public class DataSinkToHDFS {
    private static String KAFKA_EVENT_SERVER_LOG = Constants.KAFKA_EVENT_SERVER_LOG;
    private static String KAFKA_EVENT_CLIENT_LOG = Constants.KAFKA_EVENT_CLIENT_LOG;
    private static String BROKERS = Constants.BROKERS;
    private static String partition = "'dt='yyyyMMdd/'hour'=HH";
    private static String HDFS_LOG_DIR = Constants.HDFS_LOG_DIR;

    public static void config(BucketingSink sink) {
        sink.setUseTruncate(false); //关闭
        sink.setBatchSize(1024 * 1024 * 256L); // 256M 一个文件
        sink.setBatchRolloverInterval(30 * 60 * 1000L);     // 时间间隔
        sink.setInactiveBucketThreshold(3 * 60 * 1000L);    // 3分钟不写入就从 in-progress 转变为 pending
        sink.setInactiveBucketCheckInterval(30 * 1000L);    // 30秒钟检查一次多久没有写入了，用于判断是否从 in-progress 转变为 pending
        sink.setInProgressSuffix(".in-progress");   //写入过程中的文件不可读
        sink.setPendingSuffix(".pending");
    }

    public static void buildSink(DataStreamSource streamSource, String topic, Bucketer bucketer) {
        BucketingSink stringSink = new BucketingSink<>(HDFS_LOG_DIR + "json/" + topic);
        //实现数据按事件时间分区
        stringSink.setBucketer(bucketer);
        stringSink.setWriter(new ProtoBufToStringWriter<>());
        config(stringSink);
        streamSource.addSink(stringSink);
    }

    public static void main(String[] args) throws Exception {
        String groupId = "DataSinkToHDFS";
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(60000);
        env.setParallelism(2);
        env.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        env.getConfig().enableForceAvro();
        env.getConfig().registerTypeWithKryoSerializer(EventServerLog.class, ProtobufSerializer.class);
        env.getConfig().registerTypeWithKryoSerializer(EventClientLog.class, ProtobufSerializer.class);
        env.getConfig().registerTypeWithKryoSerializer(EventJoinLog.class, ProtobufSerializer.class);
        DataStreamSource<EventServerLog> eventServerLogDataStreamSource = env.addSource(FlinkKafkaUtils.getKafkaEventServerSource(Constants.KAFKA_EVENT_SERVER_LOG, new EventServerLogSchema(), groupId));
        DataStreamSource<EventClientLog> eventClientLogDataStreamSource = env.addSource(FlinkKafkaUtils.getKafkaEventClientSource(Constants.KAFKA_EVENT_CLIENT_LOG, new EventClientLogSchema(), groupId));
        buildSink(eventServerLogDataStreamSource, KAFKA_EVENT_SERVER_LOG, new EventServerLogByEventTimeBucket(partition));
        buildSink(eventClientLogDataStreamSource, KAFKA_EVENT_CLIENT_LOG, new EventClientLogByEventTimePartition(partition));
        env.execute("DataSinkToHDFS");
    }
}