package com.maverick;

import com.maverick.util.JsonTimeFieldBucketAssigner;
import com.maverick.util.KafkaUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.connector.file.sink.FileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.hadoop.hdfs.server.namenode.ha.RequestHedgingProxyProvider;

import java.util.Date;
import java.util.Locale;
import java.util.concurrent.TimeUnit;

public class Kafka2HDFS {

    public static void main(String[] args) throws Exception {

        ParameterTool params = ParameterTool.fromArgs(args);
        String checkpointDataUri = params.get("checkpointDataUri");
        assert (checkpointDataUri != null);
        int parallelism = params.getInt("parallelism", 4);
        long checkpointInteval = params.getLong("checkpointInteval", 600);
        CheckpointingMode checkpointingMode =
                params.get("checkPointMode", "default").toLowerCase(Locale.ROOT).equals("exactly_once") ?
                        CheckpointingMode.EXACTLY_ONCE : CheckpointingMode.AT_LEAST_ONCE;
        long checkpointTimeout = params.getLong("checkpointTimeout", 60L) * 1000L;
        String hadoopUserName = params.get("hadoopUserName", "hdfs");

        String topic = params.get("topic");
        assert (topic != null);
        String group = params.get("group","kafka2hdfs-" + new Date().getTime());
        String kafkaServer = params.get("kafkaServer", "192.168.10.102:9092,192.168.10.103:9092,192.168.10.104:9092");
        String timeField = params.get("timeField", "");
        String dateSchema = params.get("dateSchema", "yyyy/MM/dd/HH");
        String sinkPath = params.get("sinkPath");
        assert (sinkPath != null);

        long rolloverInterval = params.getLong("rolloverInterval", 15 * 60L);
        long inactivityInterval = params.getLong("inactivityInterval", 5 * 60L);
        long maxPartSize = params.getLong("maxPartSize", 1024 * 1024 * 1024L);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);
        env.enableCheckpointing(checkpointInteval, checkpointingMode);
        env.getCheckpointConfig().setCheckpointTimeout(checkpointTimeout);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setRestartStrategy(RestartStrategies.noRestart());
        env.setStateBackend(new FsStateBackend(checkpointDataUri));
        System.setProperty("HADOOP_USER_NAME", hadoopUserName);
        System.setProperty("HADOOP_HOME", "http://192.168.10.102/{HADOOP_HOME}");

        DataStreamSource<String> sourceStream = KafkaUtil.getKafkaSourceStream(env, topic, group, kafkaServer);

        BucketAssigner<String, String> assigner =
                    new JsonTimeFieldBucketAssigner(timeField, dateSchema);

        FileSink<String> fileSink = FileSink
                .forRowFormat(
                        new Path(sinkPath),
                        new SimpleStringEncoder<String>("utf-8")
                )
                .withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(TimeUnit.SECONDS.toMillis(rolloverInterval))
                                .withInactivityInterval(TimeUnit.SECONDS.toMillis(inactivityInterval))
                                .withMaxPartSize(maxPartSize)
                                .build()
                )
                .withBucketAssigner(assigner)
                .build();

        sourceStream.sinkTo(fileSink).uid("sink2hdfs").name("sink2hdfs");

        env.execute();
    }
}