package com.chengyanan.main;

import com.chengyanan.conf.ConfigurationManager;
import com.chengyanan.conf.KafkaProperties;
import com.chengyanan.elasticsearch.ElasticSearchUtils;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerForUser;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import java.util.HashMap;
import java.util.Map;

/**
 * @ Author: Chengyanan
 * @ Date: 2019/8/29 10:34
 * @ Description : main方法入口
 */
public class AppMain {

    private static String esIndex = ConfigurationManager.getProperty("es_index");
    private static int numOfPartition = ConfigurationManager.getInteger("numOfPartition");
    private static String topic = KafkaProperties.getTopic();

    public static void main(String[] args) throws Exception {

        HashMap<KafkaTopicPartition, Long> offsetMap = initOffset();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.disableOperatorChaining();
        //创建Kafka连接
        FlinkKafkaConsumerForUser<String> consumer011 =
                new FlinkKafkaConsumerForUser<>(topic, new SimpleStringSchema(), KafkaProperties.getKafkaProperties());
        //从获取的offset开始读取，如果某个分区没有得到，则offset为null（手动重置），Flink会从该分区上次提交的offset开始读取
        //如果没有之前没有提交过offset，则使用配置中auto.offset.reset中的配置
        consumer011.setStartFromSpecificOffsets(offsetMap);
//        consumer011.setStartFromEarliest();
        DataStreamSource<String> dataStreamSource = env.addSource(consumer011).setParallelism(numOfPartition);
        dataStreamSource.disableChaining()
                .map((MapFunction<String, Object>) s -> {
                    ElasticSearchUtils.indexForJson(s, esIndex);
                    return null;
                }).setParallelism(numOfPartition);
        env.execute();
    }

    /**
     * 每次启动前去获取上次最大的offset
     * 第一次启动时为什么置为Null可从以下源码中解读
     * {@link FlinkKafkaConsumerBase#setStartFromSpecificOffsets(Map<KafkaTopicPartition, Long>)}
     * {@link FlinkKafkaConsumerBase#open(Configuration)} 第552行
     * {@link }
     *
     * @return
     */
    private static HashMap<KafkaTopicPartition, Long> initOffset() {
        //每次启动前去查最大Offset
        HashMap<Integer, Long> map = ElasticSearchUtils.QueryLargestOffset(esIndex, numOfPartition);
        HashMap<KafkaTopicPartition, Long> offsetMap = new HashMap<>();
        for (Map.Entry<Integer, Long> entry : map.entrySet()) {
            Long value = entry.getValue();
            if (value < 0) {
                value = null;
            }
            offsetMap.put(new KafkaTopicPartition(topic, entry.getKey()), value);
        }
        return offsetMap;
    }
}
