package org.bigwinner.elasticsearch.utils;

import cn.hutool.core.map.MapUtil;
import org.apache.http.HttpHost;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.List;
import java.util.Map;

/**
 * @author: IT大狮兄
 * @date: 2021/8/11 上午8:56
 * @version: 1.0.0
 * @description: ElasticSearch客户端工具类
 */
public class ElasticSearchUtil {
    private static Logger LOGGER = LoggerFactory.getLogger(ElasticSearchUtil.class);
    private static RestHighLevelClient restHighLevelClient = null;
    private final static String ES_SERVERS = ConfigFileUtil.getValue("elasticsearch.servers");

    public static RestHighLevelClient getRestHighLevelClient() {

        try {
            String[] hostsArr = ES_SERVERS.split(",");
            // 1. 构建builder
            List<HttpHost> hosts = new ArrayList<>();
            for(String host: hostsArr){
                hosts.add(new HttpHost(host.split(":")[0].trim(),
                        Integer.parseInt(host.split(":")[1]), "http"));
            }
            RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()]));

            if (restHighLevelClient == null) {
                // 2. 封装builder，构建RestHighLevelClient
                restHighLevelClient = new RestHighLevelClient(builder);

            }
        } catch (NumberFormatException e) {
            LOGGER.error("es client init failed!", e);

        }
        return restHighLevelClient;

    }
    public static BulkProcessor initBulkProcessor(RestHighLevelClient client) {
        //创建BulkProcessor.Listener
        BulkProcessor.Listener listener = new BulkProcessor.Listener() {
            @Override
            //在每次执行BulkRequest之前调用，这个方法允许知道在BulkRequest中将要执行的操作的数量
            public void beforeBulk(long executionId, BulkRequest request) {
                LOGGER.info("Executing bulk [{}] with {} requests", executionId, request.numberOfActions());
            }

            @Override
            //在每次执行BulkRequest后调用，该方法允许知道BulkResponse是否包含错误
            public void afterBulk(long executionId,
                                  BulkRequest request,
                                  BulkResponse response) {
                if (response.hasFailures()) {

                    LOGGER.warn("Bulk [{}] executed with failures, reason: {}", executionId, response.buildFailureMessage());
                } else {
                    LOGGER.debug("Bulk [{}] completed in {} milliseconds", executionId, response.getTook().getMillis());
                    LOGGER.info("{} data bulk success", request.numberOfActions());
                }
            }

            @Override
            //如果BulkRequest失败，则调用该方法，该方法允许知道失败
            public void afterBulk(long executionId, BulkRequest bulkRequest, Throwable failure) {
                LOGGER.error("{} data bulk failed,reason :{}", bulkRequest.numberOfActions(), failure);
            }
        };

        return BulkProcessor.builder(
                (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener),
                listener)
                //指定每次处理多少请求，此处是10000次。
                .setBulkActions((int) Double.parseDouble(ConfigFileUtil.getValue("elasticsearch.bulkActionsNum")))
                // 每次刷新到请求的字节数， 此处是5M
                .setBulkSize(new ByteSizeValue((int) Double.parseDouble(ConfigFileUtil.getValue("elasticsearch.bulkByteSize")), ByteSizeUnit.MB))
                //无论请求量多少 我们都5秒刷新一次请求（即每五秒将所有请求执行）。
                .setFlushInterval(TimeValue.timeValueSeconds((int) Double.parseDouble(ConfigFileUtil.getValue("elasticsearch.flushIntervalTime"))))
                //设置并发请求数。 值为0表示只允许执行单个请求。 值为1表示在累积新的批量请求时允许执行1个并发请求。
                .setConcurrentRequests((int) Double.parseDouble(ConfigFileUtil.getValue("elasticsearch.concurrentRequests")))
                .setBackoffPolicy(
                        BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis((int) Double.parseDouble(
                                //设置回退策略，当请求执行错误时，可进行回退操作，
                                ConfigFileUtil.getValue("elasticsearch.backoffPolicyTime"))),
                                (int) Double.parseDouble(ConfigFileUtil.getValue("elasticsearch.backoffPolicyNum"))))
                //TimeValue.timeValueMillis(100)执行错误后延迟100MS，重试三次后执行回退。
                //要禁用回退，请传递BackoffPolicy.noBackoff（）。
                .build();
    }
}
