package esdemo;

import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

import org.elasticsearch.action.bulk.BackoffPolicy;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;

import static org.elasticsearch.common.xcontent.XContentFactory.*;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.junit.Before;
import org.junit.Test;

public class ApisCURD {
	private TransportClient client;

	@Before
	public void init() throws UnknownHostException {
		client = TransportClient.builder().build()
				.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300));
	}

	@SuppressWarnings("unused")
	@Test
	public void index01() throws IOException {
		// 几种文档
		// 1.原始json串
		String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\","
				+ "\"message\":\"trying out Elasticsearch\"" + "}";
		// 2.map
		Map<String, Object> json2 = new HashMap<String, Object>();
		json2.put("user", "kimchy");
		json2.put("postDate", new Date());
		json2.put("message", "trying out Elasticsearch");

		// 3. 第三方json工具
		// instance a json mapper
		// ObjectMapper mapper = new ObjectMapper(); // create once, reuse
		// byte[] json = mapper.writeValueAsBytes(yourbeaninstance);

		// 4. es helpers
		XContentBuilder builder = XContentFactory.jsonBuilder().startObject().field("user", "kimchy")
				.field("postDate", new Date()).field("message", "trying out Elasticsearch").endObject();
		String json4 = builder.string();

		//
	}

	/**
	 * idex document
	 * 
	 * @throws IOException
	 */
	@Test
	public void insert() throws IOException {
		IndexResponse response = client.prepareIndex("twitter", "tweet", "1")
				.setSource(XContentFactory.jsonBuilder().startObject().field("user", "kimchy")
						.field("postDate", new Date()).field("message", "trying out Elasticsearch").endObject())
				.get();
		System.out.println(response);
	}

	//////////////// GET API////////////////
	/**
	 * get by id
	 * 
	 * @throws UnknownHostException
	 */
	@Test
	public void getById() throws UnknownHostException {
		//
		GetResponse response = client.prepareGet("twitter", "tweet", "1").get();
		System.out.println(response.getSourceAsMap());
	}

	/**
	 * setOperationThreaded 设置是否在另一线程运行（默认为true）
	 * 
	 * @throws UnknownHostException
	 */
	@Test
	public void getByIdOperateOneThread() throws UnknownHostException {
		//
		GetResponse response = client.prepareGet("twitter", "tweet", "1").setOperationThreaded(false).get();
		System.out.println(response.getSourceAsMap());
	}

	////////////// DELETE //////////////////

	@Test
	public void delById() throws UnknownHostException {
		//
		DeleteResponse response = client.prepareDelete("twitter", "tweet", "1").get();
		System.out.println(response);
	}

	@Test
	public void delByIdRunOneThread() throws UnknownHostException {
		//
		DeleteResponse response = client.prepareDelete("twitter", "tweet", "1")
				// .setOperationThreaded(false)
				.get();
		System.out.println(response);
	}

	//////////// UPDATE //////////////
	@Test
	public void updateWithUpdateRequest() throws IOException, InterruptedException, ExecutionException {
		UpdateRequest updateRequest = new UpdateRequest();
		updateRequest.index("twitter");
		updateRequest.type("tweet");
		updateRequest.id("1");
		updateRequest.doc(XContentFactory.jsonBuilder().startObject().field("user", "male").endObject());
		UpdateResponse response = client.update(updateRequest).get();
		System.out.println(response);
	}

	@Test
	public void updateWithPrepareUpdateByScript() {
		client.prepareUpdate("twitter", "tweet", "1")
				.setScript(new Script("ctx._source.user = \"male\"", ScriptType.INLINE, null, null)).get();

	}

	@Test
	public void updateWithPrepareUpdateByDoc() throws IOException {
		client.prepareUpdate("twitter", "tweet", "1")
				.setDoc(XContentFactory.jsonBuilder().startObject().field("user2", "male2").endObject()).get();
	}

	/**
	 * If the document does not exist, the content of the upsert element will be
	 * used to index the fresh doc:
	 * 
	 * @throws IOException
	 * @throws ExecutionException
	 * @throws InterruptedException
	 */
	@Test
	public void upsert() throws IOException, InterruptedException, ExecutionException {
		IndexRequest indexRequest = new IndexRequest("index", "type", "1")
				.source(jsonBuilder().startObject().field("name2", "Joe Smith").field("gender2", "male").endObject());
		UpdateRequest updateRequest = new UpdateRequest("index", "type", "1")
				.doc(jsonBuilder().startObject().field("gender", "male2").endObject()).upsert(indexRequest);
		client.update(updateRequest).get();
	}

	////////////// Multi Get //////////////////
	@Test
	public void multiGet() {
		MultiGetResponse multiGetItemResponses = client.prepareMultiGet().add("twitter", "tweet", "1")
				.add("twitter", "tweet", "2", "3", "4").add("index", "type", "1").get();

		for (MultiGetItemResponse itemResponse : multiGetItemResponses.getResponses()) {
			GetResponse response = itemResponse.getResponse();
			// System.out.println(response);
			if (response.isExists()) {
				String json = response.getSourceAsString();
				System.out.println(json);
			}
		}
	}

	////////////////// bulk API //////////////////////
	@Test
	public void bulkApi() throws IOException {
		BulkRequestBuilder bulkRequest = client.prepareBulk();

		// either use client#prepare, or use Requests# to directly build
		// index/delete requests
		bulkRequest.add(client.prepareIndex("twitter", "tweet", "1")
				.setSource(jsonBuilder().startObject().field("user", "kimchy").field("postDate", new Date())
						.field("message", "trying out Elasticsearch").endObject()));

		bulkRequest.add(client.prepareIndex("twitter", "tweet", "2").setSource(jsonBuilder().startObject()
				.field("user", "kimchy").field("postDate", new Date()).field("message", "another post").endObject()));

		BulkResponse bulkResponse = bulkRequest.get();
		if (bulkResponse.hasFailures()) {
			// process failures by iterating through each bulk response item
			System.out.println("bulkResponse.hasFailures");
		}
	}

	/**
	 * The BulkProcessor class offers a simple interface to flush bulk
	 * operations automatically based on the number or size of requests, or
	 * after a given period.
	 * <p>
	 * <B>By default, BulkProcessor:</B><br>
	 * sets bulkActions to 1000<br>
	 * sets bulkSize to 5mb<br>
	 * does not set flushInterval<br>
	 * sets concurrentRequests to 1<br>
	 * sets backoffPolicy to an exponential backoff with 8 retries and a start
	 * delay of 50ms. The total wait time is roughly 5.1 seconds.
	 * </p>
	 * *
	 * 
	 * @throws IOException
	 * @throws InterruptedException
	 */
	@Test
	public void bulkProcessor() throws IOException, InterruptedException {
		BulkProcessor bulkProcessor = BulkProcessor.builder(
				// Add your elasticsearch client
				client,
				// listener
				new BulkProcessor.Listener() {
					// This method is called just before bulk is executed. You
					// can for example see the numberOfActions with
					// request.numberOfActions()
					public void beforeBulk(long executionId, BulkRequest request) {
						System.out.println("beforeBulk --> executionId : " + executionId);
						System.out.println("beforeBulk --> request : " + request);
					}

					// This method is called after bulk execution. You can for
					// example check if there was some failing requests with
					// response.hasFailures()
					public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
						System.out.println("afterBulk --> executionId : " + executionId);
						System.out.println("afterBulk --> request : " + request);
						System.out.println("afterBulk --> response : " + response);
					}

					// This method is called when the bulk failed and raised a
					// Throwable
					public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
						System.out.println("afterBulker --> executionId : " + executionId);
						System.out.println("afterBulker --> request : " + request);
						System.out.println("afterBulkre --> failure : " + failure);
					}

				})
				// We want to execute the bulk every 10 000 requests
				.setBulkActions(10000)
				// We want to flush the bulk every 1gb
				.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
				// We want to flush the bulk every 5 seconds whatever the number
				// of requests
				.setFlushInterval(TimeValue.timeValueSeconds(5))
				// Set the number of concurrent requests. A value of 0 means
				// that only a single request will be allowed to be executed. A
				// value of 1 means 1 concurrent request is allowed to be
				// executed while accumulating new bulk requests.
				.setConcurrentRequests(1)
				// Set a custom backoff policy which will initially wait for
				// 100ms, increase exponentially and retries up to three times.
				// A retry is attempted whenever one or more bulk item requests
				// have failed with an EsRejectedExecutionException which
				// indicates that there were too little compute resources
				// available for processing the request. To disable backoff,
				// pass BackoffPolicy.noBackoff().
				.setBackoffPolicy(BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3)).build();

		bulkProcessor.add(
				new IndexRequest("twitter", "tweet", "1").source(jsonBuilder().startObject().field("user", "kimchy2")
						.field("postDate", new Date()).field("message", "trying out Elasticsearch2").endObject()));
		bulkProcessor.add(new DeleteRequest("twitter", "tweet", "2"));
		
		for (int i = 0; i < 10; i++) {
			bulkProcessor.add(
					new IndexRequest("twitter", "tweet", (i+3)+"").source(jsonBuilder().startObject().field("user", "kimchy2 " +(i+3))
							.field("postDate", new Date()).field("message", "trying out Elasticsearch2 " +(i+3)).endObject()));
		}

		// When all documents are loaded to the BulkProcessor it can be closed
		// by using awaitClose or close methods:
		bulkProcessor.awaitClose(10, TimeUnit.MINUTES);
		//
		// bulkProcessor.close();
		//
		// Both methods flush any remaining documents and disable all other
		// scheduled flushes if they were scheduled by setting flushInterval. If
		// concurrent requests were enabled the awaitClose method waits for up
		// to the specified timeout for all bulk requests to complete then
		// returns true, if the specified waiting time elapses before all bulk
		// requests complete, false is returned. The close method doesn’t wait
		// for any remaining bulk requests to complete and exits immediately.
	}

}
