/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.asp.bigdata.flink.kafka2hdfs.task;

import com.asp.bigdata.flink.kafka2hdfs.gz.GzipBulkStringWriterFactory;
import com.asp.bigdata.flink.kafka2hdfs.pojo.Event;
import com.asp.bigdata.flink.kafka2hdfs.utils.CsvBucketAssigner;
import com.asp.bigdata.flink.kafka2hdfs.utils.CustomUtils;
import com.asp.bigdata.flink.kafka2hdfs.utils.DealUtils;
import com.asp.bigdata.flink.kafka2hdfs.utils.JsonTypeBucketAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.io.FileInputStream;
import java.time.Duration;
import java.util.Properties;

/**
 * 目前数据源kafka集群版本： kafka_2.11-0.10.1.1
 */
public class kafka2hdfsUnion {
    public static void main(String[] args) throws Exception {

        Properties jobConfig = new Properties();
        jobConfig.load(new FileInputStream(args[0]));

        // set up the streaming execution environment
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.enableCheckpointing(Long.valueOf(jobConfig.getProperty("checkpoint.interval")));
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.valueOf(jobConfig.getProperty("fixed-delay.attempts")), Integer.valueOf(jobConfig.getProperty("fixed-delay.delay"))));

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 任务取消时清除外部保存的检查点(非暴力停止如yarn kill) 取消任务checkpoint不删除
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        checkpointConfig.setTolerableCheckpointFailureNumber(10);

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", jobConfig.getProperty("bootstrap.servers"));
        properties.setProperty("group.id", jobConfig.getProperty("group.id"));
        if (jobConfig.containsKey("security.protocol")) {
            properties.setProperty("security.protocol", jobConfig.getProperty("security.protocol"));
        }
        if (jobConfig.containsKey("sasl.mechanism")) {
            properties.setProperty("sasl.mechanism", jobConfig.getProperty("sasl.mechanism"));
        }
        // properties.setProperty("sasl.kerberos.service.name", "kafka");
        //消费多个topic
        //List<String> topics = new ArrayList<>();
        //for (String topic:jobConfig.getProperty("topic").split(",")) {
            //topics.add(topic);
        //}
        //FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topics, new SimpleStringSchema(), properties);
        //消费单个topic
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(jobConfig.getProperty("topic"), new SimpleStringSchema(), properties);
        /**
         * Flink从topic中指定的group上次消费的位置开始消费，所以必须配置group.id参数
         * 消费位置有多种，这里展示两种
         */

        String offsetStartFromTimestamp = jobConfig.getProperty("offsetStartFromTimestamp");
        if (offsetStartFromTimestamp != null&&!offsetStartFromTimestamp.isEmpty()) {
            consumer.setStartFromTimestamp(Long.valueOf(offsetStartFromTimestamp));
        } else {
            consumer.setStartFromGroupOffsets(); // 这是默认的方式，指定的group上次消费的位置开始消费，所以必须配置group.id参数
        }

        /**
         * 这个offset会保存在状态，也会保存在__consumer_offset中
         * 因此如果没有使用新的消费者组，重启任务即使不使用状态也不会从头消费
         */
        consumer.setCommitOffsetsOnCheckpoints(true);
        DataStreamSource<String> streamSource = env.addSource(consumer, "Kafka");

        // 输出流
        SingleOutputStreamOperator<Event> operatorWithWM;
        BucketAssigner<Event, String> BucketAssigner;

        // 根据输入输出类型，选择不同数据处理方式
        switch (jobConfig.getProperty("date.type")) {
            case "json":
                operatorWithWM = new DealUtils().dealJson(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "json2csv":
                operatorWithWM = new DealUtils().dealJson2Csv(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "jsonMessage":
                operatorWithWM = new DealUtils().dealJsonMessage(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "umchysdk":
                operatorWithWM = new CustomUtils().dealUmchysdk(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "umcrhsdk":
                operatorWithWM = new CustomUtils().dealUmcrhsdk(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "openapilog":
                operatorWithWM = new CustomUtils().dealOpenapilog(streamSource,jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            case "rcsMsg":
                operatorWithWM = new CustomUtils().dealRcsMsg(streamSource, jobConfig);
                BucketAssigner = new JsonTypeBucketAssigner(jobConfig);
                break;
            default:
                operatorWithWM = new DealUtils().dealCsv(streamSource, jobConfig);
                BucketAssigner = new CsvBucketAssigner(jobConfig);
        }
        // 配置watermark，指定事件时间列
        operatorWithWM = operatorWithWM
                .returns(TypeInformation.of(Event.class)) // 将自定义类注册进slot里
                .assignTimestampsAndWatermarks(WatermarkStrategy
                        .<Event>forBoundedOutOfOrderness(Duration.ofMinutes(5))
                        .withTimestampAssigner((event, timestamp) -> {
                            return event.getTime();
                        })  // 指定事件时间列
                        .withIdleness(Duration.ofMinutes(1))  // 1分钟内仍未收到数据，标记为空闲数据源
                );

        // 添加gz后缀
        OutputFileConfig config = OutputFileConfig.builder().withPartSuffix(".gz").build();

        final StreamingFileSink<Event> hdfsSink = StreamingFileSink
                .forBulkFormat(
                        new Path(jobConfig.getProperty("path")),
                        new GzipBulkStringWriterFactory<Event>())
                .withBucketAssigner(BucketAssigner)                   // 分桶策略，分表
                .withRollingPolicy(OnCheckpointRollingPolicy.build()) // 滚动策略：该策略在每次checkpoint时滚动part-file。
                .withOutputFileConfig(config)
                .build();


        operatorWithWM.addSink(hdfsSink).name("hdfs");

        // execute program
        env.execute(jobConfig.getProperty("app.name"));
    }
}
