package com.aurora.advanced;

import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
import org.apache.http.HttpHost;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.UUID;


/**
 * 描述：Flink集成Elasticsearch Connector连接器进阶Demo
 * 实现实时数据流如何无缝地流向Elasticsearch
 *
 * @author 浅夏的猫
 * @version 1.0.0
 * @date 2024-02-11 22:06:45
 */
public class ElasticsearchSinkStreamJobAdvancedDemo {

    private static final Logger logger = LoggerFactory.getLogger(ElasticsearchSinkStreamJobAdvancedDemo.class);

    public static void main(String[] args) {

        try {

            // 读取配置参数
            ElasticsearchEntity elasticsearchEntity = paramsInit();

            // 设置elasticsearch节点
            List<HttpHost> httpHosts = esClusterHttpHostHandler(elasticsearchEntity);

            // 创建esSinkFunction函数
            ElasticsearchSinkFunction<JSONObject> esSinkFunction = new CustomElasticsearchSinkFunction(elasticsearchEntity);

            // 构建ElasticsearchSink算子builder
            ElasticsearchSink.Builder<JSONObject> esSinkBuilder = new ElasticsearchSink.Builder<>(httpHosts, esSinkFunction);

            // es参数配置
            esBuilderHandler(esSinkBuilder, elasticsearchEntity);

            // 构建sink算子
            ElasticsearchSink<JSONObject> esSink = esSinkBuilder.build();

            // 自定义数据源，模拟生产环境交易接入,json格式数据
            SourceFunction<JSONObject> dataSource = new SourceFunction<JSONObject>() {
                @Override
                public void run(SourceContext sourceContext) throws Exception {
                    while (true) {
                        //交易流水号
                        String tradeId = UUID.randomUUID().toString();
                        //交易发生时间戳
                        long timeStamp = System.currentTimeMillis();
                        //交易发生金额
                        long tradeAmount = new Random().nextInt(100);
                        //交易名称
                        String tradeName = "支付宝转账";

                        JSONObject dataObj = new JSONObject();
                        dataObj.put("transId", tradeId);
                        dataObj.put("timeStamp", timeStamp);
                        dataObj.put("tradeTime", dateUtil(timeStamp));
                        dataObj.put("tradeAmount", tradeAmount);
                        dataObj.put("tradeName", tradeName);

                        //模拟生产，每隔1秒生成一笔交易
                        Thread.sleep(1000);
                        logger.info("交易接入,原始报文={}", dataObj.toJSONString());
                        sourceContext.collect(dataObj);
                    }
                }

                @Override
                public void cancel() {

                }
            };

            // 创建运行环境
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            // 构建数据源
            DataStreamSource<JSONObject> dataStreamSource = env.addSource(dataSource);
            // 构建sink算子
            dataStreamSource.addSink(esSink);
            // 运行作业
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }

    /**
     * 描述：Flink参数配置读取
     *
     * @return {@code ElasticsearchEntity }
     * @throws IOException
     */
    private static ElasticsearchEntity paramsInit() throws IOException {
        // 通过flink内置工具类获取命令行参数
        String propertiesFilePath = "E:\\project\\aurora_dev\\aurora_flink_connector_elasticsearch\\src\\main\\resources\\application.properties";
        ParameterTool paramsMap = ParameterTool.fromPropertiesFile(propertiesFilePath);
        ElasticsearchEntity elasticsearchEntity = new ElasticsearchEntity();
        String hosts = paramsMap.get("es.cluster.hosts");
        int port = paramsMap.getInt("es.cluster.port");
        String scheme = paramsMap.get("es.cluster.scheme");
        String type = paramsMap.get("es.cluster.type");
        String indexPrefix = paramsMap.get("es.cluster.indexPrefix");
        int bulkFlushMaxActions = paramsMap.getInt("es.cluster.bulkFlushMaxActions");
        int bulkFlushMaxSizeMb = paramsMap.getInt("es.cluster.bulkFlushMaxSizeMb");
        int bulkFlushInterval = paramsMap.getInt("es.cluster.bulkFlushInterval");
        boolean bulkFlushBackoff = paramsMap.getBoolean("es.cluster.bulkFlushBackoff");
        int bulkFlushBackoffDelay = paramsMap.getInt("es.cluster.bulkFlushBackoffDelay");
        int bulkFlushBackoffRetries = paramsMap.getInt("es.cluster.bulkFlushBackoffRetries");
        int connectTimeout = paramsMap.getInt("es.cluster.connectTimeout");
        int socketTimeout = paramsMap.getInt("es.cluster.socketTimeout");
        int connectionRequestTimeout = paramsMap.getInt("es.cluster.connectionRequestTimeout");
        boolean redirectsEnabled = paramsMap.getBoolean("es.cluster.redirectsEnabled");
        int maxRedirects = paramsMap.getInt("es.cluster.maxRedirects");
        boolean authenticationEnabled = paramsMap.getBoolean("es.cluster.authenticationEnabled");
        boolean circularRedirectsAllowed = paramsMap.getBoolean("es.cluster.circularRedirectsAllowed");
        boolean contentCompressionEnabled = paramsMap.getBoolean("es.cluster.contentCompressionEnabled");
        boolean expectContinueEnabled = paramsMap.getBoolean("es.cluster.expectContinueEnabled");
        boolean normalizeUri = paramsMap.getBoolean("es.cluster.normalizeUri");

        elasticsearchEntity.setHosts(hosts);
        elasticsearchEntity.setPort(port);
        elasticsearchEntity.setScheme(scheme);
        elasticsearchEntity.setType(type);
        elasticsearchEntity.setIndexPrefix(indexPrefix);
        elasticsearchEntity.setBulkFlushMaxActions(bulkFlushMaxActions);
        elasticsearchEntity.setBulkFlushMaxSizeMb(bulkFlushMaxSizeMb);
        elasticsearchEntity.setBulkFlushInterval(bulkFlushInterval);
        elasticsearchEntity.setBulkFlushBackoff(bulkFlushBackoff);
        elasticsearchEntity.setBulkFlushBackoffDelay(bulkFlushBackoffDelay);
        elasticsearchEntity.setBulkFlushBackoffRetries(bulkFlushBackoffRetries);
        elasticsearchEntity.setConnectTimeout(connectTimeout);
        elasticsearchEntity.setSocketTimeout(socketTimeout);
        elasticsearchEntity.setConnectionRequestTimeout(connectionRequestTimeout);
        elasticsearchEntity.setRedirectsEnabled(redirectsEnabled);
        elasticsearchEntity.setMaxRedirects(maxRedirects);
        elasticsearchEntity.setAuthenticationEnabled(authenticationEnabled);
        elasticsearchEntity.setCircularRedirectsAllowed(circularRedirectsAllowed);
        elasticsearchEntity.setExpectContinueEnabled(expectContinueEnabled);
        elasticsearchEntity.setContentCompressionEnabled(contentCompressionEnabled);
        elasticsearchEntity.setNormalizeUri(normalizeUri);

        return elasticsearchEntity;
    }

    /**
     * 描述：时间格式化工具类
     *
     * @param timestamp 时间戳
     * @return {@code String }
     */
    private static String dateUtil(long timestamp) {
        timestamp = timestamp / 1000;
        // 将时间戳转换为 LocalDateTime 对象
        LocalDateTime dateTime = LocalDateTime.ofInstant(Instant.ofEpochSecond(timestamp), ZoneId.systemDefault());
        // 定义日期时间格式
        DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd");
        // 格式化日期时间对象为指定格式的字符串
        String dateTimeFormat = formatter.format(dateTime);
        return dateTimeFormat;
    }

    /**
     * 描述：es参数配置
     *
     * @param esSinkBuilder       esSinkBuilder建造器
     * @param elasticsearchEntity es实体类
     */
    private static void esBuilderHandler(ElasticsearchSink.Builder<JSONObject> esSinkBuilder, ElasticsearchEntity elasticsearchEntity) {
        // 设置触发批量写入的最大动作数,
        // 解释：当达到指定的最大动作数时，将触发批量写入到 Elasticsearch。如果你希望在每次写入到 Elasticsearch 时都进行批量写入，可以将该值设置为 1
        esSinkBuilder.setBulkFlushMaxActions(elasticsearchEntity.getBulkFlushMaxActions());

        // 设置触发批量写入的最大数据量
        // 解释：当写入的数据量达到指定的最大值时，将触发批量写入到 Elasticsearch。单位为 MB
        esSinkBuilder.setBulkFlushMaxSizeMb(elasticsearchEntity.getBulkFlushMaxSizeMb());

        // 设置批量写入的时间间隔
        // 解释：每隔指定的时间间隔，无论是否达到最大动作数或最大数据量，都会触发批量写入
        esSinkBuilder.setBulkFlushInterval(elasticsearchEntity.getBulkFlushInterval());

        // 启用批量写入的退避策略
        // 解释：当 Elasticsearch 写入失败时，可以启用退避策略，以避免频繁的重试。此时，setBulkFlushBackoffDelay 和 setBulkFlushBackoffRetries 参数生效。
        esSinkBuilder.setBulkFlushBackoff(elasticsearchEntity.getBulkFlushBackoff());

        // 设置批量写入的退避延迟时间
        // 解释：在发生写入失败后，等待指定的延迟时间后再进行重试
        esSinkBuilder.setBulkFlushBackoffDelay(elasticsearchEntity.getBulkFlushBackoffDelay());

        // 设置批量写入的最大重试次数
        // 解释：设置在写入失败后的最大重试次数。超过这个次数后，将不再重试
        esSinkBuilder.setBulkFlushBackoffRetries(elasticsearchEntity.getBulkFlushBackoffRetries());

        // 设置写入失败时的处理策略
        // 解释：可以自定义处理失败的策略，实现 ElasticsearchSinkFunction.FailureHandler 接口
        esSinkBuilder.setFailureHandler(new CustomActionRequestFailureHandler());

        // 设置用于创建 Elasticsearch REST 客户端的工厂
        // 解释：可以自定义创建 Elasticsearch REST 客户端的逻辑，实现 ElasticsearchSinkBase.RestClientFactory 接口
        esSinkBuilder.setRestClientFactory(new CustomRestClientFactory(elasticsearchEntity));
    }

    /**
     * 描述:
     * elasticsearch 节点配置
     *
     * @param elasticsearchEntity es实体类
     * @return {@code List<HttpHost> }
     */
    private static List<HttpHost> esClusterHttpHostHandler(ElasticsearchEntity elasticsearchEntity) {
        List<HttpHost> httpHosts = new ArrayList<>();
        String[] clusterArray = elasticsearchEntity.getHosts().split(",");
        for (String node : clusterArray) {
            httpHosts.add(new HttpHost(node, elasticsearchEntity.getPort(), elasticsearchEntity.getScheme()));
        }
        return httpHosts;
    }
}
