package com.lazy.es.es_demo.index;

import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;

import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;

import com.lazy.es.common.AbstractEsClient;
import com.lazy.es.common.LatchTask;

import static org.elasticsearch.index.query.QueryBuilders.*;

public class IndexBulkDemo extends AbstractEsClient {
    private static AtomicInteger seq = new AtomicInteger(0);
    
    ExecutorService executorService;
    
    public IndexBulkDemo(String clusterName, int poolCount){
        super(clusterName);
        executorService = Executors.newFixedThreadPool(poolCount);
    }
    
    public void batchInsert(int batchNum) {
        
        long startTime = System.currentTimeMillis();
        //logger.info("startTime  {}  ", startTime);
        
        CountDownLatch latch = new CountDownLatch(batchNum);
        for (int i = 0; i < batchNum; i++) {
            executorService.submit(new LatchTask( latch, new LatchTask.LatchWorker() {

                        @Override
                        public void work(){
                            bulkAction(10000);
                        }
                    },
                    i));
        }
        
        try{
            latch.await();
        }catch(InterruptedException e){
            e.printStackTrace();
        }
        //shutdown() 方法在终止前允许执行以前提交的任务
        //第一阶段调用 shutdown 拒绝传入任务，然后调用 shutdownNow（如有必要）取消所有遗留的任务
        //提交的任务运行结束后关闭线程池
        // executorService.shutdown(); 
        long endTime = System.currentTimeMillis();
        //logger.info("endTime  {}  ", endTime);
        logger.info("insert {} takes time  {}  ", batchNum, (endTime - startTime));
        
    }
    
    public void bulkAction(int batchLimit) {
        //long startTime = System.currentTimeMillis();
        BulkRequestBuilder bulkRequest = client.prepareBulk();
        
     // either use client#prepare, or use Requests# to directly build index/delete requests
        for (int i= 1; i<batchLimit; i++) {
            bulkRequest.add(client.prepareIndex("twitter", "tweet", Integer.toString(seq.incrementAndGet()))
                    .setSource(getTestData())
                    );
        }
        
        BulkResponse bulkResponse = bulkRequest.get();
        if (bulkResponse.hasFailures()) {
            logger.info("result = {}", "hasFailures");
        } else {
            logger.info("result = {}", bulkResponse.getHeaders());
        }
        //logger.info("takes time {}ms", (System.currentTimeMillis() - startTime));
    }
    
    
    public Map<String, Object> getTestData () {
        Map<String, Object> json = new HashMap<String, Object>();
        json.put("user","kimchy");
        json.put("postDate",new Date());
        json.put("message","trying out Elasticsearch");
        return json;
    }
    
    
    @Override
    public void destroy(){
        super.destroy();
        executorService.shutdown();
    }

    public static void main(String[] args){
        
        int poolCount = 4;
        
        IndexBulkDemo demo = new IndexBulkDemo("my-es", poolCount);
        demo.addTransportAddress("192.168.1.60", 9300);
        
        long startTime = System.currentTimeMillis();
        demo.batchInsert(100);
        
        demo.destroy();
        
        long endTime = System.currentTimeMillis();
        logger.info("all take time {} ms", (endTime - startTime));
    }

}
