package com.john.elasticsearch.config;

import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.PropertySource;

import java.util.function.BiConsumer;

@Configuration
@PropertySource(value = {"classpath:application-elasticsearch.properties"})
public class ESBulkProcessorConfig {
    private static final Logger logger = LoggerFactory.getLogger("sys-elasticsearch");
    /**每10000个request flush一次**/
    @Value(value = "${bulk.bulkActions}")
    private Integer bulkActions;

    /**每30秒flush一次**/
    @Value(value = "${bulk.flushInterval}")
    private Integer flushInterval;

    /**bulk数据每达到1GB flush一次**/
    @Value(value = "${bulk.bulkSize}")
    private Integer bulkSize;

    /**0代表同步提交即只能提交一个request, 1代表当有一个新的bulk正在累积时，1个并发请求可被允许执行**/
    @Value(value = "${bulk.concurrentRequests}")
    private Integer concurrentRequests;

    /**进行重试的策略,初始等待100ms**/
    @Value(value = "${bulk.initialDelay}")
    private Integer initialDelay;

    /**进行重试的策略,初始等待100ms，后面指数级增加，总共重试3次**/
    @Value(value = "${bulk.maxNumberOfRetries}")
    private Integer maxNumberOfRetries;

    @Autowired
    private RestHighLevelClient restHighLevelClient;

    @Bean
    public BulkProcessor bulkProcessor(){

        BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer =
                (request, bulkListener) -> restHighLevelClient.bulkAsync(request, RequestOptions.DEFAULT, bulkListener);

        BulkProcessor bulkProcessor =  BulkProcessor.builder(bulkConsumer, new BulkProcessor.Listener() {
            @Override
            public void beforeBulk(long executionId, BulkRequest request) {
                // 在这儿你可以自定义执行同步之前执行什么
//                System.out.println("线程" + Thread.currentThread().getId() + "开始同步数据至ES");
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                //在这儿你可以自定义执行完同步之后执行什么
                /*Iterator<BulkItemResponse> iterator = response.iterator();
                while (iterator.hasNext()){
                    System.out.println(JSON.toJSONString(iterator.next()));
                }*/
                int i = request.numberOfActions();
                logger.info("同步数据至ES成功共计"+i+"条数据");
            }

            @Override
            public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                // 写入失败后
                logger.error("同步数据至ES失败，共计" + request.numberOfActions()+"条数据"+failure.getMessage());
                System.err.println(failure);
            }
        })
        .setBulkActions(bulkActions)
        .setBulkSize(new ByteSizeValue(bulkSize, ByteSizeUnit.MB))
        .setFlushInterval(TimeValue.timeValueSeconds(flushInterval))
        .setConcurrentRequests(concurrentRequests)
        // 设置当出现代表ES集群拥有很少的可用资源来处理request时抛出
        // EsRejectedExecutionException造成N个bulk内request失败时
        // 进行重试的策略,初始等待100ms，后面指数级增加，总共重试3次.
        // 不重试设为BackoffPolicy.noBackoff()
        .setBackoffPolicy(BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(initialDelay), maxNumberOfRetries))
        .build();
        return bulkProcessor;
    }

}