package demo.db.elasticSearch.highClient;

import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;

import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * <h1>Java High Level REST Client</h1>从6.0.0开始加入的，目的是以java面向对象的方式来进行请求、响应处理。
 * 每个API 支持 同步/异步 两种方式，同步方法直接返回一个结果对象。异步的方法以async为后缀，通过listener参数来通知结果。
 * 
 * 兼容性说明：依赖 java1.8 和 Elasticsearch core project,请使用与服务端ES版本一致的客户端版本
 * 
 * @author hanjy
 *
 */
public class BatchDemo {

    public static final String host = "172.17.16.165";
    public static final int port = 9200;

    private static Logger logger = LoggerFactory.getLogger(BatchDemo.class);

    /**
     * 批量索引文档，即批量往索引里面放入文档数据.类似于数据库里面批量向表里面插入多行数据，一行数据就是一个文档
     */
    @Test
    public void bulkDemo() {
        try (RestHighLevelClient client = RestHighLevelClientDemo.getClient()) {

            // 1、创建批量操作请求
            BulkRequest request = new BulkRequest();
            request.add(new IndexRequest("mess", "_doc", "5").source(XContentType.JSON, "field", "foo"));
            request.add(new IndexRequest("mess", "_doc", "6").source(XContentType.JSON, "field", "bar"));
            request.add(new IndexRequest("mess", "_doc", "7").source(XContentType.JSON, "field", "baz"));

            request.add(new DeleteRequest("mess", "_doc", "3"));
            request.add(new UpdateRequest("mess", "_doc", "2").doc(XContentType.JSON, "other", "test"));
            request.add(new IndexRequest("mess", "_doc", "4").source(XContentType.JSON, "field", "baz"));

            // 2、可选的设置
            // request.timeout(TimeValue.timeValueMinutes(2));
            // request.timeout("2m");
            // request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
            // request.setRefreshPolicy("wait_for");
            /**
             * Sets the number of shard copies that must be active before proceeding with
             * the index/update/delete operations.
             */
            // request.waitForActiveShards(ActiveShardCount.ALL);
            // request.waitForActiveShards(2);

            // 3、发送请求

            // 同步请求
            BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);

            // 4、处理响应
            if (bulkResponse != null) {
                for (BulkItemResponse bulkItemResponse : bulkResponse) {
                    DocWriteResponse itemResponse = bulkItemResponse.getResponse();

                    if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.INDEX
                            || bulkItemResponse.getOpType() == DocWriteRequest.OpType.CREATE) {
                        IndexResponse indexResponse = (IndexResponse) itemResponse;
                        // TODO 新增成功的处理

                    } else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.UPDATE) {
                        UpdateResponse updateResponse = (UpdateResponse) itemResponse;
                        // TODO 修改成功的处理

                    } else if (bulkItemResponse.getOpType() == DocWriteRequest.OpType.DELETE) {
                        DeleteResponse deleteResponse = (DeleteResponse) itemResponse;
                        // TODO 删除成功的处理
                    }
                }
            }
            // The Bulk response provides a method to quickly check if one or more operation
            // has failed:
            if (bulkResponse.hasFailures()) {
                for (BulkItemResponse bulkItemResponse : bulkResponse) {
                    if (bulkItemResponse.isFailed()) {
                        BulkItemResponse.Failure failure = bulkItemResponse.getFailure();

                    }
                }
            }

            // 异步方式发送批量操作请求
            ActionListener<BulkResponse> listener = new ActionListener<BulkResponse>() {
                @Override
                public void onResponse(BulkResponse bulkResponse) {

                }

                @Override
                public void onFailure(Exception e) {

                }
            };

            BulkProcessor.Listener bulkProcessorListener = new BulkProcessor.Listener() {
                @Override
                public void beforeBulk(long executionId, BulkRequest request) {
                    /*
                     * Called before each execution of a BulkRequest, this method allows to know the
                     * number of operations that are going to be executed within the BulkRequest
                     */
                    int numberOfActions = request.numberOfActions();
                    logger.debug("Executing bulk [{}] with {} requests", executionId, numberOfActions);
                }

                @Override
                public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
                    /*
                     * Called after each execution of a BulkRequest, this method allows to know if
                     * the BulkResponse contains errors
                     */
                    if (response.hasFailures()) {
                        logger.warn("Bulk [{}] executed with failures", executionId);
                    } else {
                        logger.debug("Bulk [{}] completed in {} milliseconds", executionId,
                                response.getTook().getMillis());
                    }
                }

                @Override
                public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
                    /* Called if the BulkRequest failed, this method allows to know the failure */
                    logger.error("Failed to execute bulk", failure);
                }
            };

            BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer = (req, bulkListener) -> client
                    .bulkAsync(req, RequestOptions.DEFAULT, bulkListener);

            BulkProcessor bulkProcessor = BulkProcessor.builder(bulkConsumer, bulkProcessorListener).build();

            BulkProcessor.Builder builder = BulkProcessor.builder(bulkConsumer, bulkProcessorListener);
            /*
             * Set when to flush a new bulk request based on the number of actions currently
             * added (defaults to 1000, use -1 to disable it)
             */
            builder.setBulkActions(500);
            /*
             * Set when to flush a new bulk request based on the size of actions currently
             * added (defaults to 5Mb, use -1 to disable it)
             */
            builder.setBulkSize(new ByteSizeValue(1L, ByteSizeUnit.MB));
            /*
             * Set the number of concurrent requests allowed to be executed (default to 1,
             * use 0 to only allow the execution of a single request)
             */
            builder.setConcurrentRequests(0);

            /*
             * Set a flush interval flushing any BulkRequest pending if the interval passes
             * (defaults to not set)
             */
            builder.setFlushInterval(TimeValue.timeValueSeconds(10L));
            /*
             * Set a constant back off policy that initially waits for 1 second and retries
             * up to 3 times. See BackoffPolicy.noBackoff(), BackoffPolicy.constantBackoff()
             * and BackoffPolicy.exponentialBackoff() for more options.
             */
            builder.setBackoffPolicy(BackoffPolicy.constantBackoff(TimeValue.timeValueSeconds(1L), 3));
            /* Once the BulkProcessor is created requests can be added to it: */
            IndexRequest one = new IndexRequest("posts", "doc", "1").source(XContentType.JSON, "title",
                    "In which order are my Elasticsearch queries executed?");
            IndexRequest two = new IndexRequest("posts", "doc", "2").source(XContentType.JSON, "title",
                    "Current status and upcoming changes in Elasticsearch");
            IndexRequest three = new IndexRequest("posts", "doc", "3").source(XContentType.JSON, "title",
                    "The Future of Federated Search in Elasticsearch");

            bulkProcessor.add(one);
            bulkProcessor.add(two);
            bulkProcessor.add(three);
            try {
                /*
                 * The awaitClose() method can be used to wait until all requests have been
                 * processed or the specified waiting time elapses:
                 */
                boolean terminated = bulkProcessor.awaitClose(30L, TimeUnit.SECONDS);
            } catch (InterruptedException e1) {
                logger.error("", e1);
            }
            /*
             * The close() method can be used to immediately close the BulkProcessor ,Both
             * methods flush the requests added to the processor before closing the
             * processor and also forbid any new request to be added to it.
             */
            bulkProcessor.close();
        } catch (IOException e) {
            logger.error("", e);
        }
    }

}
