package com.example.demo.kafka;

import com.example.demo.JsonMapper;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkBase;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.http.HttpHost;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;

import java.util.*;

public class WriteIntoKafka {
    public static void main(String[] args) throws Exception {
        // create execution environment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        Properties propertiesProduces = new Properties();
        propertiesProduces.setProperty("bootstrap.servers", "182.119.185.228:9092,182.119.185.229:9092,182.119.185.230:9092");
        propertiesProduces.setProperty("group.id", "test_table_group");
        FlinkKafkaConsumer<String> kafkaSource = new FlinkKafkaConsumer<>("MOBS-MSAT-KAFKA-CSTLOG-TOPIC", new SimpleStringSchema(), propertiesProduces);
        // add a simple source which is writing some strings
        DataStream<Tuple2<Object, Object>> messageStream = env.addSource(kafkaSource)
                .map(new MapFunction<String, Tuple2<Object, Object>>() {
                    @Override
                    public Tuple2<Object, Object> map(String s) throws Exception {
                        Map<String, Object> map = JsonMapper.getMap(s);
                        Tuple2<Object, Object> tp = new Tuple2<>();
                        tp.f0 = map.get("TransCode");
                        tp.f1 = map.get("TraceNo");
                        return tp;
                    }
                });

        List<HttpHost> httpHosts = new ArrayList<>(8);
        httpHosts.add(new HttpHost("182.119.88.168", 9701));
        Requests.createIndexRequest("es-index").settings(
                Settings.builder().put("index.number_of_shards", 1)
                        .put("index.number_of_replicas", 0)
        );

        ElasticsearchSink.Builder<Tuple2<Object, Object>> esSinkBuilder = new ElasticsearchSink.Builder<>(
                httpHosts,
                (ElasticsearchSinkFunction<Tuple2<Object, Object>>) (tp, runtimeContext, requestIndexer) -> {
                    Map<String, Object> jsonMap = new HashMap<String, Object>(2);
                    jsonMap.put("uid", tp.f0);
                    jsonMap.put("name", tp.f1);


                    System.out.println(jsonMap);
                    IndexRequest ir = Requests.indexRequest().index("es-index").source(jsonMap);
                    requestIndexer.add(ir);
                }
        );
        // Maximum amount of actions to buffer before flushing
        esSinkBuilder.setBulkFlushMaxActions(200); // (flush size)*并发sink数 <= es queque size（默认200）
        // Maximum size of data(in megabytes) to buffer before flushing.
        esSinkBuilder.setBulkFlushMaxSizeMb(20);
        // 批量写入的时间间隔，配置后会严格执行，无视上面的两个批量写入配置
//        esSinkBuilder.setBulkFlushInterval(6000);
        // 是否开启重试机制
        esSinkBuilder.setBulkFlushBackoff(true);
        // 重试策略，有两种：EXPONENTIAL 指数型（表示多次重试之间的时间间隔按指数方式增长）、CONSTANT 常数型（表示多次重试之间的时间间隔为固定常数）
        esSinkBuilder.setBulkFlushBackoffType(ElasticsearchSinkBase.FlushBackoffType.EXPONENTIAL);
        // 重试的时间间隔
        esSinkBuilder.setBulkFlushBackoffDelay(6000);
        // 失败重试的次数
        esSinkBuilder.setBulkFlushBackoffRetries(5);
//
        ElasticsearchSink<Tuple2<Object, Object>> build1 = esSinkBuilder.build();
        messageStream.addSink(build1).setParallelism(1);

        messageStream.print();
//
//        env.execute("table data sink");
//        messageStream.print();

        env.execute();
    }


}